From 25a72113b122f984c904b24c4af23a1cba1eff45 Mon Sep 17 00:00:00 2001 From: Wim Date: Sun, 4 Mar 2018 23:46:13 +0100 Subject: [PATCH] Add vendor files for spf13/viper --- vendor/github.com/armon/consul-api/LICENSE | 362 + vendor/github.com/armon/consul-api/acl.go | 140 + vendor/github.com/armon/consul-api/agent.go | 272 + vendor/github.com/armon/consul-api/api.go | 323 + vendor/github.com/armon/consul-api/catalog.go | 181 + vendor/github.com/armon/consul-api/event.go | 104 + vendor/github.com/armon/consul-api/health.go | 136 + vendor/github.com/armon/consul-api/kv.go | 219 + vendor/github.com/armon/consul-api/session.go | 204 + vendor/github.com/armon/consul-api/status.go | 43 + vendor/github.com/coreos/etcd/client/LICENSE | 202 + .../coreos/etcd/client/auth_role.go | 236 + .../coreos/etcd/client/auth_user.go | 319 + .../coreos/etcd/client/cancelreq.go | 18 + .../github.com/coreos/etcd/client/client.go | 710 + .../coreos/etcd/client/cluster_error.go | 37 + vendor/github.com/coreos/etcd/client/curl.go | 70 + .../github.com/coreos/etcd/client/discover.go | 40 + vendor/github.com/coreos/etcd/client/doc.go | 73 + .../coreos/etcd/client/integration/doc.go | 17 + .../coreos/etcd/client/keys.generated.go | 5218 +++ vendor/github.com/coreos/etcd/client/keys.go | 681 + .../github.com/coreos/etcd/client/members.go | 303 + vendor/github.com/coreos/etcd/client/util.go | 53 + .../coreos/etcd/pkg/pathutil/LICENSE | 202 + .../coreos/etcd/pkg/pathutil/path.go | 31 + vendor/github.com/coreos/etcd/pkg/srv/LICENSE | 202 + vendor/github.com/coreos/etcd/pkg/srv/srv.go | 130 + .../github.com/coreos/etcd/pkg/types/LICENSE | 202 + .../github.com/coreos/etcd/pkg/types/doc.go | 17 + vendor/github.com/coreos/etcd/pkg/types/id.go | 41 + .../github.com/coreos/etcd/pkg/types/set.go | 178 + .../github.com/coreos/etcd/pkg/types/slice.go | 22 + .../github.com/coreos/etcd/pkg/types/urls.go | 82 + .../coreos/etcd/pkg/types/urlsmap.go | 107 + .../coreos/go-semver/semver/LICENSE | 202 + .../coreos/go-semver/semver/semver.go | 268 + .../coreos/go-semver/semver/sort.go | 38 + .../vendor/github.com/ugorji/go/codec/0doc.go | 185 + .../vendor/github.com/ugorji/go/codec/LICENSE | 202 + .../vendor/github.com/ugorji/go/codec/binc.go | 946 + .../vendor/github.com/ugorji/go/codec/cbor.go | 631 + .../github.com/ugorji/go/codec/decode.go | 2520 ++ .../github.com/ugorji/go/codec/encode.go | 1414 + .../ugorji/go/codec/fast-path.generated.go | 33034 ++++++++++++++++ .../ugorji/go/codec/fast-path.not.go | 35 + .../ugorji/go/codec/gen-helper.generated.go | 250 + .../ugorji/go/codec/gen.generated.go | 132 + .../vendor/github.com/ugorji/go/codec/gen.go | 2014 + .../go/codec/goversion_arrayof_gte_go15.go | 14 + .../go/codec/goversion_arrayof_lt_go15.go | 14 + .../go/codec/goversion_makemap_gte_go19.go | 15 + .../go/codec/goversion_makemap_lt_go19.go | 12 + .../go/codec/goversion_unsupported_lt_go14.go | 17 + .../go/codec/goversion_vendor_eq_go15.go | 10 + .../go/codec/goversion_vendor_eq_go16.go | 10 + .../go/codec/goversion_vendor_gte_go17.go | 8 + .../go/codec/goversion_vendor_lt_go15.go | 8 + .../github.com/ugorji/go/codec/helper.go | 1944 + .../ugorji/go/codec/helper_internal.go | 221 + .../ugorji/go/codec/helper_not_unsafe.go | 156 + .../ugorji/go/codec/helper_unsafe.go | 418 + .../vendor/github.com/ugorji/go/codec/json.go | 1172 + .../github.com/ugorji/go/codec/msgpack.go | 899 + .../vendor/github.com/ugorji/go/codec/noop.go | 214 + .../vendor/github.com/ugorji/go/codec/rpc.go | 187 + .../github.com/ugorji/go/codec/simple.go | 541 + .../vendor/github.com/ugorji/go/codec/time.go | 220 + .../vendor/github.com/ugorji/go/codec/xml.go | 426 + .../vendor/github.com/ugorji/go/codec/z.go | 23 + vendor/github.com/coreos/etcd/version/LICENSE | 202 + .../github.com/coreos/etcd/version/version.go | 56 + vendor/github.com/fsnotify/fsnotify/LICENSE | 28 + vendor/github.com/fsnotify/fsnotify/fen.go | 37 + .../github.com/fsnotify/fsnotify/fsnotify.go | 66 + .../github.com/fsnotify/fsnotify/inotify.go | 337 + .../fsnotify/fsnotify/inotify_poller.go | 187 + vendor/github.com/fsnotify/fsnotify/kqueue.go | 521 + .../fsnotify/fsnotify/open_mode_bsd.go | 11 + .../fsnotify/fsnotify/open_mode_darwin.go | 12 + .../github.com/fsnotify/fsnotify/windows.go | 561 + vendor/github.com/hashicorp/hcl/LICENSE | 354 + vendor/github.com/hashicorp/hcl/decoder.go | 729 + vendor/github.com/hashicorp/hcl/hcl.go | 11 + .../github.com/hashicorp/hcl/hcl/ast/ast.go | 219 + .../github.com/hashicorp/hcl/hcl/ast/walk.go | 52 + .../hashicorp/hcl/hcl/fmtcmd/fmtcmd.go | 162 + .../hashicorp/hcl/hcl/parser/error.go | 17 + .../hashicorp/hcl/hcl/parser/parser.go | 526 + .../hashicorp/hcl/hcl/printer/nodes.go | 779 + .../hashicorp/hcl/hcl/printer/printer.go | 66 + .../hashicorp/hcl/hcl/scanner/scanner.go | 651 + .../hashicorp/hcl/hcl/strconv/quote.go | 241 + .../hashicorp/hcl/hcl/token/position.go | 46 + .../hashicorp/hcl/hcl/token/token.go | 219 + .../hashicorp/hcl/json/parser/flatten.go | 117 + .../hashicorp/hcl/json/parser/parser.go | 313 + .../hashicorp/hcl/json/scanner/scanner.go | 451 + .../hashicorp/hcl/json/token/position.go | 46 + .../hashicorp/hcl/json/token/token.go | 118 + vendor/github.com/hashicorp/hcl/lex.go | 38 + vendor/github.com/hashicorp/hcl/parse.go | 39 + .../hashicorp/hcl/testhelper/unix2dos.go | 15 + vendor/github.com/kr/fs/LICENSE | 27 + vendor/github.com/kr/fs/filesystem.go | 36 + vendor/github.com/kr/fs/walk.go | 95 + .../github.com/magiconair/properties/LICENSE | 25 + .../magiconair/properties/assert/assert.go | 90 + .../magiconair/properties/decode.go | 289 + .../github.com/magiconair/properties/doc.go | 156 + .../magiconair/properties/integrate.go | 34 + .../github.com/magiconair/properties/lex.go | 407 + .../github.com/magiconair/properties/load.go | 241 + .../magiconair/properties/parser.go | 95 + .../magiconair/properties/properties.go | 822 + .../magiconair/properties/rangecheck.go | 31 + .../github.com/mitchellh/mapstructure/LICENSE | 21 + .../mitchellh/mapstructure/decode_hooks.go | 171 + .../mitchellh/mapstructure/error.go | 50 + .../mitchellh/mapstructure/mapstructure.go | 1043 + vendor/github.com/pelletier/go-toml/LICENSE | 21 + .../pelletier/go-toml/cmd/test_program.go | 91 + .../pelletier/go-toml/cmd/tomljson/main.go | 72 + .../pelletier/go-toml/cmd/tomll/main.go | 66 + vendor/github.com/pelletier/go-toml/doc.go | 23 + vendor/github.com/pelletier/go-toml/fuzz.go | 31 + .../pelletier/go-toml/keysparsing.go | 85 + vendor/github.com/pelletier/go-toml/lexer.go | 750 + .../github.com/pelletier/go-toml/marshal.go | 600 + vendor/github.com/pelletier/go-toml/parser.go | 430 + .../github.com/pelletier/go-toml/position.go | 29 + .../github.com/pelletier/go-toml/query/doc.go | 175 + .../pelletier/go-toml/query/lexer.go | 357 + .../pelletier/go-toml/query/match.go | 232 + .../pelletier/go-toml/query/parser.go | 275 + .../pelletier/go-toml/query/query.go | 158 + .../pelletier/go-toml/query/tokens.go | 106 + vendor/github.com/pelletier/go-toml/token.go | 144 + vendor/github.com/pelletier/go-toml/toml.go | 309 + .../pelletier/go-toml/tomltree_create.go | 142 + .../pelletier/go-toml/tomltree_write.go | 287 + vendor/github.com/pkg/errors/LICENSE | 23 + vendor/github.com/pkg/errors/errors.go | 269 + vendor/github.com/pkg/errors/stack.go | 147 + vendor/github.com/pkg/sftp/LICENSE | 9 + vendor/github.com/pkg/sftp/attrs.go | 241 + vendor/github.com/pkg/sftp/attrs_stubs.go | 11 + vendor/github.com/pkg/sftp/attrs_unix.go | 17 + vendor/github.com/pkg/sftp/client.go | 1174 + vendor/github.com/pkg/sftp/conn.go | 133 + vendor/github.com/pkg/sftp/debug.go | 9 + .../examples/buffered-read-benchmark/main.go | 78 + .../examples/buffered-write-benchmark/main.go | 84 + .../pkg/sftp/examples/request-server/main.go | 131 + .../pkg/sftp/examples/sftp-server/main.go | 147 + .../examples/streaming-read-benchmark/main.go | 85 + .../streaming-write-benchmark/main.go | 85 + vendor/github.com/pkg/sftp/match.go | 295 + vendor/github.com/pkg/sftp/packet-manager.go | 173 + vendor/github.com/pkg/sftp/packet-typing.go | 133 + vendor/github.com/pkg/sftp/packet.go | 952 + vendor/github.com/pkg/sftp/release.go | 5 + vendor/github.com/pkg/sftp/request-attrs.go | 63 + vendor/github.com/pkg/sftp/request-errors.go | 42 + vendor/github.com/pkg/sftp/request-example.go | 261 + .../github.com/pkg/sftp/request-interfaces.go | 46 + vendor/github.com/pkg/sftp/request-server.go | 238 + vendor/github.com/pkg/sftp/request-unix.go | 23 + vendor/github.com/pkg/sftp/request.go | 353 + vendor/github.com/pkg/sftp/request_windows.go | 11 + vendor/github.com/pkg/sftp/server.go | 675 + .../pkg/sftp/server_standalone/main.go | 52 + .../pkg/sftp/server_statvfs_darwin.go | 21 + .../pkg/sftp/server_statvfs_impl.go | 25 + .../pkg/sftp/server_statvfs_linux.go | 22 + .../pkg/sftp/server_statvfs_stubs.go | 11 + vendor/github.com/pkg/sftp/server_stubs.go | 32 + vendor/github.com/pkg/sftp/server_unix.go | 54 + vendor/github.com/pkg/sftp/sftp.go | 217 + vendor/github.com/spf13/afero/LICENSE.txt | 174 + vendor/github.com/spf13/afero/afero.go | 108 + vendor/github.com/spf13/afero/basepath.go | 145 + .../github.com/spf13/afero/cacheOnReadFs.go | 290 + vendor/github.com/spf13/afero/const_bsds.go | 22 + .../github.com/spf13/afero/const_win_unix.go | 25 + .../github.com/spf13/afero/copyOnWriteFs.go | 253 + vendor/github.com/spf13/afero/httpFs.go | 110 + vendor/github.com/spf13/afero/ioutil.go | 230 + vendor/github.com/spf13/afero/match.go | 110 + vendor/github.com/spf13/afero/mem/dir.go | 37 + vendor/github.com/spf13/afero/mem/dirmap.go | 43 + vendor/github.com/spf13/afero/mem/file.go | 314 + vendor/github.com/spf13/afero/memmap.go | 365 + vendor/github.com/spf13/afero/os.go | 94 + vendor/github.com/spf13/afero/path.go | 108 + vendor/github.com/spf13/afero/readonlyfs.go | 70 + vendor/github.com/spf13/afero/regexpfs.go | 214 + vendor/github.com/spf13/afero/sftpfs/file.go | 95 + vendor/github.com/spf13/afero/sftpfs/sftp.go | 129 + vendor/github.com/spf13/afero/unionFile.go | 274 + vendor/github.com/spf13/afero/util.go | 330 + vendor/github.com/spf13/cast/LICENSE | 21 + vendor/github.com/spf13/cast/cast.go | 159 + vendor/github.com/spf13/cast/caste.go | 1166 + .../spf13/jwalterweatherman/LICENSE | 21 + .../jwalterweatherman/default_notepad.go | 113 + .../spf13/jwalterweatherman/log_counter.go | 55 + .../spf13/jwalterweatherman/notepad.go | 194 + vendor/github.com/spf13/pflag/LICENSE | 28 + vendor/github.com/spf13/pflag/bool.go | 94 + vendor/github.com/spf13/pflag/bool_slice.go | 147 + vendor/github.com/spf13/pflag/count.go | 96 + vendor/github.com/spf13/pflag/duration.go | 86 + .../github.com/spf13/pflag/duration_slice.go | 128 + vendor/github.com/spf13/pflag/flag.go | 1157 + vendor/github.com/spf13/pflag/float32.go | 88 + vendor/github.com/spf13/pflag/float64.go | 84 + vendor/github.com/spf13/pflag/golangflag.go | 101 + vendor/github.com/spf13/pflag/int.go | 84 + vendor/github.com/spf13/pflag/int16.go | 88 + vendor/github.com/spf13/pflag/int32.go | 88 + vendor/github.com/spf13/pflag/int64.go | 84 + vendor/github.com/spf13/pflag/int8.go | 88 + vendor/github.com/spf13/pflag/int_slice.go | 128 + vendor/github.com/spf13/pflag/ip.go | 94 + vendor/github.com/spf13/pflag/ip_slice.go | 148 + vendor/github.com/spf13/pflag/ipmask.go | 122 + vendor/github.com/spf13/pflag/ipnet.go | 98 + vendor/github.com/spf13/pflag/string.go | 80 + vendor/github.com/spf13/pflag/string_array.go | 103 + vendor/github.com/spf13/pflag/string_slice.go | 149 + vendor/github.com/spf13/pflag/uint.go | 88 + vendor/github.com/spf13/pflag/uint16.go | 88 + vendor/github.com/spf13/pflag/uint32.go | 88 + vendor/github.com/spf13/pflag/uint64.go | 88 + vendor/github.com/spf13/pflag/uint8.go | 88 + vendor/github.com/spf13/pflag/uint_slice.go | 126 + vendor/github.com/spf13/viper/LICENSE | 21 + vendor/github.com/spf13/viper/flags.go | 57 + .../github.com/spf13/viper/remote/remote.go | 105 + vendor/github.com/spf13/viper/util.go | 221 + vendor/github.com/spf13/viper/viper.go | 1775 + .../xordataexchange/crypt/backend/LICENSE | 9 + .../xordataexchange/crypt/backend/backend.go | 32 + .../crypt/backend/consul/consul.go | 87 + .../crypt/backend/etcd/etcd.go | 116 + .../crypt/backend/mock/mock.go | 61 + .../xordataexchange/crypt/config/LICENSE | 9 + .../xordataexchange/crypt/config/config.go | 201 + .../crypt/encoding/secconf/LICENSE | 9 + .../crypt/encoding/secconf/secconf.go | 68 + vendor/golang.org/x/crypto/cast5/LICENSE | 27 + vendor/golang.org/x/crypto/cast5/cast5.go | 526 + vendor/golang.org/x/crypto/openpgp/LICENSE | 27 + .../x/crypto/openpgp/armor/armor.go | 219 + .../x/crypto/openpgp/armor/encode.go | 160 + .../x/crypto/openpgp/canonical_text.go | 59 + .../x/crypto/openpgp/clearsign/clearsign.go | 376 + .../x/crypto/openpgp/elgamal/elgamal.go | 122 + .../x/crypto/openpgp/errors/errors.go | 72 + vendor/golang.org/x/crypto/openpgp/keys.go | 641 + .../x/crypto/openpgp/packet/compressed.go | 123 + .../x/crypto/openpgp/packet/config.go | 91 + .../x/crypto/openpgp/packet/encrypted_key.go | 199 + .../x/crypto/openpgp/packet/literal.go | 89 + .../x/crypto/openpgp/packet/ocfb.go | 143 + .../openpgp/packet/one_pass_signature.go | 73 + .../x/crypto/openpgp/packet/opaque.go | 162 + .../x/crypto/openpgp/packet/packet.go | 537 + .../x/crypto/openpgp/packet/private_key.go | 380 + .../x/crypto/openpgp/packet/public_key.go | 748 + .../x/crypto/openpgp/packet/public_key_v3.go | 279 + .../x/crypto/openpgp/packet/reader.go | 76 + .../x/crypto/openpgp/packet/signature.go | 731 + .../x/crypto/openpgp/packet/signature_v3.go | 146 + .../openpgp/packet/symmetric_key_encrypted.go | 155 + .../openpgp/packet/symmetrically_encrypted.go | 290 + .../x/crypto/openpgp/packet/userattribute.go | 91 + .../x/crypto/openpgp/packet/userid.go | 160 + vendor/golang.org/x/crypto/openpgp/read.go | 442 + vendor/golang.org/x/crypto/openpgp/s2k/s2k.go | 273 + vendor/golang.org/x/crypto/openpgp/write.go | 378 + vendor/golang.org/x/text/internal/gen/LICENSE | 27 + .../x/text/internal/gen/bitfield/bitfield.go | 226 + vendor/golang.org/x/text/internal/gen/code.go | 371 + vendor/golang.org/x/text/internal/gen/gen.go | 333 + .../x/text/internal/triegen/LICENSE | 27 + .../x/text/internal/triegen/compact.go | 58 + .../x/text/internal/triegen/print.go | 251 + .../x/text/internal/triegen/triegen.go | 494 + vendor/golang.org/x/text/internal/ucd/LICENSE | 27 + vendor/golang.org/x/text/internal/ucd/ucd.go | 371 + vendor/golang.org/x/text/unicode/cldr/LICENSE | 27 + vendor/golang.org/x/text/unicode/cldr/base.go | 105 + vendor/golang.org/x/text/unicode/cldr/cldr.go | 130 + .../golang.org/x/text/unicode/cldr/collate.go | 359 + .../golang.org/x/text/unicode/cldr/decode.go | 171 + .../golang.org/x/text/unicode/cldr/makexml.go | 400 + .../golang.org/x/text/unicode/cldr/resolve.go | 602 + .../golang.org/x/text/unicode/cldr/slice.go | 144 + vendor/golang.org/x/text/unicode/cldr/xml.go | 1494 + vendor/golang.org/x/text/unicode/norm/LICENSE | 27 + .../x/text/unicode/norm/composition.go | 508 + .../x/text/unicode/norm/forminfo.go | 259 + .../golang.org/x/text/unicode/norm/input.go | 109 + vendor/golang.org/x/text/unicode/norm/iter.go | 457 + .../x/text/unicode/norm/maketables.go | 976 + .../x/text/unicode/norm/normalize.go | 609 + .../x/text/unicode/norm/readwriter.go | 125 + .../x/text/unicode/norm/tables10.0.0.go | 7653 ++++ .../x/text/unicode/norm/tables9.0.0.go | 7633 ++++ .../x/text/unicode/norm/transform.go | 88 + vendor/golang.org/x/text/unicode/norm/trie.go | 54 + .../golang.org/x/text/unicode/norm/triegen.go | 117 + vendor/manifest | 274 + 315 files changed, 125435 insertions(+) create mode 100644 vendor/github.com/armon/consul-api/LICENSE create mode 100644 vendor/github.com/armon/consul-api/acl.go create mode 100644 vendor/github.com/armon/consul-api/agent.go create mode 100644 vendor/github.com/armon/consul-api/api.go create mode 100644 vendor/github.com/armon/consul-api/catalog.go create mode 100644 vendor/github.com/armon/consul-api/event.go create mode 100644 vendor/github.com/armon/consul-api/health.go create mode 100644 vendor/github.com/armon/consul-api/kv.go create mode 100644 vendor/github.com/armon/consul-api/session.go create mode 100644 vendor/github.com/armon/consul-api/status.go create mode 100644 vendor/github.com/coreos/etcd/client/LICENSE create mode 100644 vendor/github.com/coreos/etcd/client/auth_role.go create mode 100644 vendor/github.com/coreos/etcd/client/auth_user.go create mode 100644 vendor/github.com/coreos/etcd/client/cancelreq.go create mode 100644 vendor/github.com/coreos/etcd/client/client.go create mode 100644 vendor/github.com/coreos/etcd/client/cluster_error.go create mode 100644 vendor/github.com/coreos/etcd/client/curl.go create mode 100644 vendor/github.com/coreos/etcd/client/discover.go create mode 100644 vendor/github.com/coreos/etcd/client/doc.go create mode 100644 vendor/github.com/coreos/etcd/client/integration/doc.go create mode 100644 vendor/github.com/coreos/etcd/client/keys.generated.go create mode 100644 vendor/github.com/coreos/etcd/client/keys.go create mode 100644 vendor/github.com/coreos/etcd/client/members.go create mode 100644 vendor/github.com/coreos/etcd/client/util.go create mode 100644 vendor/github.com/coreos/etcd/pkg/pathutil/LICENSE create mode 100644 vendor/github.com/coreos/etcd/pkg/pathutil/path.go create mode 100644 vendor/github.com/coreos/etcd/pkg/srv/LICENSE create mode 100644 vendor/github.com/coreos/etcd/pkg/srv/srv.go create mode 100644 vendor/github.com/coreos/etcd/pkg/types/LICENSE create mode 100644 vendor/github.com/coreos/etcd/pkg/types/doc.go create mode 100644 vendor/github.com/coreos/etcd/pkg/types/id.go create mode 100644 vendor/github.com/coreos/etcd/pkg/types/set.go create mode 100644 vendor/github.com/coreos/etcd/pkg/types/slice.go create mode 100644 vendor/github.com/coreos/etcd/pkg/types/urls.go create mode 100644 vendor/github.com/coreos/etcd/pkg/types/urlsmap.go create mode 100644 vendor/github.com/coreos/etcd/vendor/github.com/coreos/go-semver/semver/LICENSE create mode 100644 vendor/github.com/coreos/etcd/vendor/github.com/coreos/go-semver/semver/semver.go create mode 100644 vendor/github.com/coreos/etcd/vendor/github.com/coreos/go-semver/semver/sort.go create mode 100644 vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/0doc.go create mode 100644 vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/LICENSE create mode 100644 vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/binc.go create mode 100644 vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/cbor.go create mode 100644 vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/decode.go create mode 100644 vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/encode.go create mode 100644 vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/fast-path.generated.go create mode 100644 vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/fast-path.not.go create mode 100644 vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/gen-helper.generated.go create mode 100644 vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/gen.generated.go create mode 100644 vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/gen.go create mode 100644 vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/goversion_arrayof_gte_go15.go create mode 100644 vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/goversion_arrayof_lt_go15.go create mode 100644 vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/goversion_makemap_gte_go19.go create mode 100644 vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/goversion_makemap_lt_go19.go create mode 100644 vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/goversion_unsupported_lt_go14.go create mode 100644 vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/goversion_vendor_eq_go15.go create mode 100644 vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/goversion_vendor_eq_go16.go create mode 100644 vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/goversion_vendor_gte_go17.go create mode 100644 vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/goversion_vendor_lt_go15.go create mode 100644 vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/helper.go create mode 100644 vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/helper_internal.go create mode 100644 vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/helper_not_unsafe.go create mode 100644 vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/helper_unsafe.go create mode 100644 vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/json.go create mode 100644 vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/msgpack.go create mode 100644 vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/noop.go create mode 100644 vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/rpc.go create mode 100644 vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/simple.go create mode 100644 vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/time.go create mode 100644 vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/xml.go create mode 100644 vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/z.go create mode 100644 vendor/github.com/coreos/etcd/version/LICENSE create mode 100644 vendor/github.com/coreos/etcd/version/version.go create mode 100644 vendor/github.com/fsnotify/fsnotify/LICENSE create mode 100644 vendor/github.com/fsnotify/fsnotify/fen.go create mode 100644 vendor/github.com/fsnotify/fsnotify/fsnotify.go create mode 100644 vendor/github.com/fsnotify/fsnotify/inotify.go create mode 100644 vendor/github.com/fsnotify/fsnotify/inotify_poller.go create mode 100644 vendor/github.com/fsnotify/fsnotify/kqueue.go create mode 100644 vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go create mode 100644 vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go create mode 100644 vendor/github.com/fsnotify/fsnotify/windows.go create mode 100644 vendor/github.com/hashicorp/hcl/LICENSE create mode 100644 vendor/github.com/hashicorp/hcl/decoder.go create mode 100644 vendor/github.com/hashicorp/hcl/hcl.go create mode 100644 vendor/github.com/hashicorp/hcl/hcl/ast/ast.go create mode 100644 vendor/github.com/hashicorp/hcl/hcl/ast/walk.go create mode 100644 vendor/github.com/hashicorp/hcl/hcl/fmtcmd/fmtcmd.go create mode 100644 vendor/github.com/hashicorp/hcl/hcl/parser/error.go create mode 100644 vendor/github.com/hashicorp/hcl/hcl/parser/parser.go create mode 100644 vendor/github.com/hashicorp/hcl/hcl/printer/nodes.go create mode 100644 vendor/github.com/hashicorp/hcl/hcl/printer/printer.go create mode 100644 vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go create mode 100644 vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go create mode 100644 vendor/github.com/hashicorp/hcl/hcl/token/position.go create mode 100644 vendor/github.com/hashicorp/hcl/hcl/token/token.go create mode 100644 vendor/github.com/hashicorp/hcl/json/parser/flatten.go create mode 100644 vendor/github.com/hashicorp/hcl/json/parser/parser.go create mode 100644 vendor/github.com/hashicorp/hcl/json/scanner/scanner.go create mode 100644 vendor/github.com/hashicorp/hcl/json/token/position.go create mode 100644 vendor/github.com/hashicorp/hcl/json/token/token.go create mode 100644 vendor/github.com/hashicorp/hcl/lex.go create mode 100644 vendor/github.com/hashicorp/hcl/parse.go create mode 100644 vendor/github.com/hashicorp/hcl/testhelper/unix2dos.go create mode 100644 vendor/github.com/kr/fs/LICENSE create mode 100644 vendor/github.com/kr/fs/filesystem.go create mode 100644 vendor/github.com/kr/fs/walk.go create mode 100644 vendor/github.com/magiconair/properties/LICENSE create mode 100644 vendor/github.com/magiconair/properties/assert/assert.go create mode 100644 vendor/github.com/magiconair/properties/decode.go create mode 100644 vendor/github.com/magiconair/properties/doc.go create mode 100644 vendor/github.com/magiconair/properties/integrate.go create mode 100644 vendor/github.com/magiconair/properties/lex.go create mode 100644 vendor/github.com/magiconair/properties/load.go create mode 100644 vendor/github.com/magiconair/properties/parser.go create mode 100644 vendor/github.com/magiconair/properties/properties.go create mode 100644 vendor/github.com/magiconair/properties/rangecheck.go create mode 100644 vendor/github.com/mitchellh/mapstructure/LICENSE create mode 100644 vendor/github.com/mitchellh/mapstructure/decode_hooks.go create mode 100644 vendor/github.com/mitchellh/mapstructure/error.go create mode 100644 vendor/github.com/mitchellh/mapstructure/mapstructure.go create mode 100644 vendor/github.com/pelletier/go-toml/LICENSE create mode 100644 vendor/github.com/pelletier/go-toml/cmd/test_program.go create mode 100644 vendor/github.com/pelletier/go-toml/cmd/tomljson/main.go create mode 100644 vendor/github.com/pelletier/go-toml/cmd/tomll/main.go create mode 100644 vendor/github.com/pelletier/go-toml/doc.go create mode 100644 vendor/github.com/pelletier/go-toml/fuzz.go create mode 100644 vendor/github.com/pelletier/go-toml/keysparsing.go create mode 100644 vendor/github.com/pelletier/go-toml/lexer.go create mode 100644 vendor/github.com/pelletier/go-toml/marshal.go create mode 100644 vendor/github.com/pelletier/go-toml/parser.go create mode 100644 vendor/github.com/pelletier/go-toml/position.go create mode 100644 vendor/github.com/pelletier/go-toml/query/doc.go create mode 100644 vendor/github.com/pelletier/go-toml/query/lexer.go create mode 100644 vendor/github.com/pelletier/go-toml/query/match.go create mode 100644 vendor/github.com/pelletier/go-toml/query/parser.go create mode 100644 vendor/github.com/pelletier/go-toml/query/query.go create mode 100644 vendor/github.com/pelletier/go-toml/query/tokens.go create mode 100644 vendor/github.com/pelletier/go-toml/token.go create mode 100644 vendor/github.com/pelletier/go-toml/toml.go create mode 100644 vendor/github.com/pelletier/go-toml/tomltree_create.go create mode 100644 vendor/github.com/pelletier/go-toml/tomltree_write.go create mode 100644 vendor/github.com/pkg/errors/LICENSE create mode 100644 vendor/github.com/pkg/errors/errors.go create mode 100644 vendor/github.com/pkg/errors/stack.go create mode 100644 vendor/github.com/pkg/sftp/LICENSE create mode 100644 vendor/github.com/pkg/sftp/attrs.go create mode 100644 vendor/github.com/pkg/sftp/attrs_stubs.go create mode 100644 vendor/github.com/pkg/sftp/attrs_unix.go create mode 100644 vendor/github.com/pkg/sftp/client.go create mode 100644 vendor/github.com/pkg/sftp/conn.go create mode 100644 vendor/github.com/pkg/sftp/debug.go create mode 100644 vendor/github.com/pkg/sftp/examples/buffered-read-benchmark/main.go create mode 100644 vendor/github.com/pkg/sftp/examples/buffered-write-benchmark/main.go create mode 100644 vendor/github.com/pkg/sftp/examples/request-server/main.go create mode 100644 vendor/github.com/pkg/sftp/examples/sftp-server/main.go create mode 100644 vendor/github.com/pkg/sftp/examples/streaming-read-benchmark/main.go create mode 100644 vendor/github.com/pkg/sftp/examples/streaming-write-benchmark/main.go create mode 100644 vendor/github.com/pkg/sftp/match.go create mode 100644 vendor/github.com/pkg/sftp/packet-manager.go create mode 100644 vendor/github.com/pkg/sftp/packet-typing.go create mode 100644 vendor/github.com/pkg/sftp/packet.go create mode 100644 vendor/github.com/pkg/sftp/release.go create mode 100644 vendor/github.com/pkg/sftp/request-attrs.go create mode 100644 vendor/github.com/pkg/sftp/request-errors.go create mode 100644 vendor/github.com/pkg/sftp/request-example.go create mode 100644 vendor/github.com/pkg/sftp/request-interfaces.go create mode 100644 vendor/github.com/pkg/sftp/request-server.go create mode 100644 vendor/github.com/pkg/sftp/request-unix.go create mode 100644 vendor/github.com/pkg/sftp/request.go create mode 100644 vendor/github.com/pkg/sftp/request_windows.go create mode 100644 vendor/github.com/pkg/sftp/server.go create mode 100644 vendor/github.com/pkg/sftp/server_standalone/main.go create mode 100644 vendor/github.com/pkg/sftp/server_statvfs_darwin.go create mode 100644 vendor/github.com/pkg/sftp/server_statvfs_impl.go create mode 100644 vendor/github.com/pkg/sftp/server_statvfs_linux.go create mode 100644 vendor/github.com/pkg/sftp/server_statvfs_stubs.go create mode 100644 vendor/github.com/pkg/sftp/server_stubs.go create mode 100644 vendor/github.com/pkg/sftp/server_unix.go create mode 100644 vendor/github.com/pkg/sftp/sftp.go create mode 100644 vendor/github.com/spf13/afero/LICENSE.txt create mode 100644 vendor/github.com/spf13/afero/afero.go create mode 100644 vendor/github.com/spf13/afero/basepath.go create mode 100644 vendor/github.com/spf13/afero/cacheOnReadFs.go create mode 100644 vendor/github.com/spf13/afero/const_bsds.go create mode 100644 vendor/github.com/spf13/afero/const_win_unix.go create mode 100644 vendor/github.com/spf13/afero/copyOnWriteFs.go create mode 100644 vendor/github.com/spf13/afero/httpFs.go create mode 100644 vendor/github.com/spf13/afero/ioutil.go create mode 100644 vendor/github.com/spf13/afero/match.go create mode 100644 vendor/github.com/spf13/afero/mem/dir.go create mode 100644 vendor/github.com/spf13/afero/mem/dirmap.go create mode 100644 vendor/github.com/spf13/afero/mem/file.go create mode 100644 vendor/github.com/spf13/afero/memmap.go create mode 100644 vendor/github.com/spf13/afero/os.go create mode 100644 vendor/github.com/spf13/afero/path.go create mode 100644 vendor/github.com/spf13/afero/readonlyfs.go create mode 100644 vendor/github.com/spf13/afero/regexpfs.go create mode 100644 vendor/github.com/spf13/afero/sftpfs/file.go create mode 100644 vendor/github.com/spf13/afero/sftpfs/sftp.go create mode 100644 vendor/github.com/spf13/afero/unionFile.go create mode 100644 vendor/github.com/spf13/afero/util.go create mode 100644 vendor/github.com/spf13/cast/LICENSE create mode 100644 vendor/github.com/spf13/cast/cast.go create mode 100644 vendor/github.com/spf13/cast/caste.go create mode 100644 vendor/github.com/spf13/jwalterweatherman/LICENSE create mode 100644 vendor/github.com/spf13/jwalterweatherman/default_notepad.go create mode 100644 vendor/github.com/spf13/jwalterweatherman/log_counter.go create mode 100644 vendor/github.com/spf13/jwalterweatherman/notepad.go create mode 100644 vendor/github.com/spf13/pflag/LICENSE create mode 100644 vendor/github.com/spf13/pflag/bool.go create mode 100644 vendor/github.com/spf13/pflag/bool_slice.go create mode 100644 vendor/github.com/spf13/pflag/count.go create mode 100644 vendor/github.com/spf13/pflag/duration.go create mode 100644 vendor/github.com/spf13/pflag/duration_slice.go create mode 100644 vendor/github.com/spf13/pflag/flag.go create mode 100644 vendor/github.com/spf13/pflag/float32.go create mode 100644 vendor/github.com/spf13/pflag/float64.go create mode 100644 vendor/github.com/spf13/pflag/golangflag.go create mode 100644 vendor/github.com/spf13/pflag/int.go create mode 100644 vendor/github.com/spf13/pflag/int16.go create mode 100644 vendor/github.com/spf13/pflag/int32.go create mode 100644 vendor/github.com/spf13/pflag/int64.go create mode 100644 vendor/github.com/spf13/pflag/int8.go create mode 100644 vendor/github.com/spf13/pflag/int_slice.go create mode 100644 vendor/github.com/spf13/pflag/ip.go create mode 100644 vendor/github.com/spf13/pflag/ip_slice.go create mode 100644 vendor/github.com/spf13/pflag/ipmask.go create mode 100644 vendor/github.com/spf13/pflag/ipnet.go create mode 100644 vendor/github.com/spf13/pflag/string.go create mode 100644 vendor/github.com/spf13/pflag/string_array.go create mode 100644 vendor/github.com/spf13/pflag/string_slice.go create mode 100644 vendor/github.com/spf13/pflag/uint.go create mode 100644 vendor/github.com/spf13/pflag/uint16.go create mode 100644 vendor/github.com/spf13/pflag/uint32.go create mode 100644 vendor/github.com/spf13/pflag/uint64.go create mode 100644 vendor/github.com/spf13/pflag/uint8.go create mode 100644 vendor/github.com/spf13/pflag/uint_slice.go create mode 100644 vendor/github.com/spf13/viper/LICENSE create mode 100644 vendor/github.com/spf13/viper/flags.go create mode 100644 vendor/github.com/spf13/viper/remote/remote.go create mode 100644 vendor/github.com/spf13/viper/util.go create mode 100644 vendor/github.com/spf13/viper/viper.go create mode 100644 vendor/github.com/xordataexchange/crypt/backend/LICENSE create mode 100644 vendor/github.com/xordataexchange/crypt/backend/backend.go create mode 100644 vendor/github.com/xordataexchange/crypt/backend/consul/consul.go create mode 100644 vendor/github.com/xordataexchange/crypt/backend/etcd/etcd.go create mode 100644 vendor/github.com/xordataexchange/crypt/backend/mock/mock.go create mode 100644 vendor/github.com/xordataexchange/crypt/config/LICENSE create mode 100644 vendor/github.com/xordataexchange/crypt/config/config.go create mode 100644 vendor/github.com/xordataexchange/crypt/encoding/secconf/LICENSE create mode 100644 vendor/github.com/xordataexchange/crypt/encoding/secconf/secconf.go create mode 100644 vendor/golang.org/x/crypto/cast5/LICENSE create mode 100644 vendor/golang.org/x/crypto/cast5/cast5.go create mode 100644 vendor/golang.org/x/crypto/openpgp/LICENSE create mode 100644 vendor/golang.org/x/crypto/openpgp/armor/armor.go create mode 100644 vendor/golang.org/x/crypto/openpgp/armor/encode.go create mode 100644 vendor/golang.org/x/crypto/openpgp/canonical_text.go create mode 100644 vendor/golang.org/x/crypto/openpgp/clearsign/clearsign.go create mode 100644 vendor/golang.org/x/crypto/openpgp/elgamal/elgamal.go create mode 100644 vendor/golang.org/x/crypto/openpgp/errors/errors.go create mode 100644 vendor/golang.org/x/crypto/openpgp/keys.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/compressed.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/config.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/encrypted_key.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/literal.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/ocfb.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/one_pass_signature.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/opaque.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/packet.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/private_key.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/public_key.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/public_key_v3.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/reader.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/signature.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/signature_v3.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/symmetric_key_encrypted.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/symmetrically_encrypted.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/userattribute.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/userid.go create mode 100644 vendor/golang.org/x/crypto/openpgp/read.go create mode 100644 vendor/golang.org/x/crypto/openpgp/s2k/s2k.go create mode 100644 vendor/golang.org/x/crypto/openpgp/write.go create mode 100644 vendor/golang.org/x/text/internal/gen/LICENSE create mode 100644 vendor/golang.org/x/text/internal/gen/bitfield/bitfield.go create mode 100644 vendor/golang.org/x/text/internal/gen/code.go create mode 100644 vendor/golang.org/x/text/internal/gen/gen.go create mode 100644 vendor/golang.org/x/text/internal/triegen/LICENSE create mode 100644 vendor/golang.org/x/text/internal/triegen/compact.go create mode 100644 vendor/golang.org/x/text/internal/triegen/print.go create mode 100644 vendor/golang.org/x/text/internal/triegen/triegen.go create mode 100644 vendor/golang.org/x/text/internal/ucd/LICENSE create mode 100644 vendor/golang.org/x/text/internal/ucd/ucd.go create mode 100644 vendor/golang.org/x/text/unicode/cldr/LICENSE create mode 100644 vendor/golang.org/x/text/unicode/cldr/base.go create mode 100644 vendor/golang.org/x/text/unicode/cldr/cldr.go create mode 100644 vendor/golang.org/x/text/unicode/cldr/collate.go create mode 100644 vendor/golang.org/x/text/unicode/cldr/decode.go create mode 100644 vendor/golang.org/x/text/unicode/cldr/makexml.go create mode 100644 vendor/golang.org/x/text/unicode/cldr/resolve.go create mode 100644 vendor/golang.org/x/text/unicode/cldr/slice.go create mode 100644 vendor/golang.org/x/text/unicode/cldr/xml.go create mode 100644 vendor/golang.org/x/text/unicode/norm/LICENSE create mode 100644 vendor/golang.org/x/text/unicode/norm/composition.go create mode 100644 vendor/golang.org/x/text/unicode/norm/forminfo.go create mode 100644 vendor/golang.org/x/text/unicode/norm/input.go create mode 100644 vendor/golang.org/x/text/unicode/norm/iter.go create mode 100644 vendor/golang.org/x/text/unicode/norm/maketables.go create mode 100644 vendor/golang.org/x/text/unicode/norm/normalize.go create mode 100644 vendor/golang.org/x/text/unicode/norm/readwriter.go create mode 100644 vendor/golang.org/x/text/unicode/norm/tables10.0.0.go create mode 100644 vendor/golang.org/x/text/unicode/norm/tables9.0.0.go create mode 100644 vendor/golang.org/x/text/unicode/norm/transform.go create mode 100644 vendor/golang.org/x/text/unicode/norm/trie.go create mode 100644 vendor/golang.org/x/text/unicode/norm/triegen.go diff --git a/vendor/github.com/armon/consul-api/LICENSE b/vendor/github.com/armon/consul-api/LICENSE new file mode 100644 index 00000000..f0e5c79e --- /dev/null +++ b/vendor/github.com/armon/consul-api/LICENSE @@ -0,0 +1,362 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. \ No newline at end of file diff --git a/vendor/github.com/armon/consul-api/acl.go b/vendor/github.com/armon/consul-api/acl.go new file mode 100644 index 00000000..e0179f54 --- /dev/null +++ b/vendor/github.com/armon/consul-api/acl.go @@ -0,0 +1,140 @@ +package consulapi + +const ( + // ACLCLientType is the client type token + ACLClientType = "client" + + // ACLManagementType is the management type token + ACLManagementType = "management" +) + +// ACLEntry is used to represent an ACL entry +type ACLEntry struct { + CreateIndex uint64 + ModifyIndex uint64 + ID string + Name string + Type string + Rules string +} + +// ACL can be used to query the ACL endpoints +type ACL struct { + c *Client +} + +// ACL returns a handle to the ACL endpoints +func (c *Client) ACL() *ACL { + return &ACL{c} +} + +// Create is used to generate a new token with the given parameters +func (a *ACL) Create(acl *ACLEntry, q *WriteOptions) (string, *WriteMeta, error) { + r := a.c.newRequest("PUT", "/v1/acl/create") + r.setWriteOptions(q) + r.obj = acl + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return "", nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + var out struct{ ID string } + if err := decodeBody(resp, &out); err != nil { + return "", nil, err + } + return out.ID, wm, nil +} + +// Update is used to update the rules of an existing token +func (a *ACL) Update(acl *ACLEntry, q *WriteOptions) (*WriteMeta, error) { + r := a.c.newRequest("PUT", "/v1/acl/update") + r.setWriteOptions(q) + r.obj = acl + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + return wm, nil +} + +// Destroy is used to destroy a given ACL token ID +func (a *ACL) Destroy(id string, q *WriteOptions) (*WriteMeta, error) { + r := a.c.newRequest("PUT", "/v1/acl/destroy/"+id) + r.setWriteOptions(q) + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, err + } + resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + return wm, nil +} + +// Clone is used to return a new token cloned from an existing one +func (a *ACL) Clone(id string, q *WriteOptions) (string, *WriteMeta, error) { + r := a.c.newRequest("PUT", "/v1/acl/clone/"+id) + r.setWriteOptions(q) + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return "", nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + var out struct{ ID string } + if err := decodeBody(resp, &out); err != nil { + return "", nil, err + } + return out.ID, wm, nil +} + +// Info is used to query for information about an ACL token +func (a *ACL) Info(id string, q *QueryOptions) (*ACLEntry, *QueryMeta, error) { + r := a.c.newRequest("GET", "/v1/acl/info/"+id) + r.setQueryOptions(q) + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var entries []*ACLEntry + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, err + } + if len(entries) > 0 { + return entries[0], qm, nil + } + return nil, qm, nil +} + +// List is used to get all the ACL tokens +func (a *ACL) List(q *QueryOptions) ([]*ACLEntry, *QueryMeta, error) { + r := a.c.newRequest("GET", "/v1/acl/list") + r.setQueryOptions(q) + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var entries []*ACLEntry + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, err + } + return entries, qm, nil +} diff --git a/vendor/github.com/armon/consul-api/agent.go b/vendor/github.com/armon/consul-api/agent.go new file mode 100644 index 00000000..eec93cb9 --- /dev/null +++ b/vendor/github.com/armon/consul-api/agent.go @@ -0,0 +1,272 @@ +package consulapi + +import ( + "fmt" +) + +// AgentCheck represents a check known to the agent +type AgentCheck struct { + Node string + CheckID string + Name string + Status string + Notes string + Output string + ServiceID string + ServiceName string +} + +// AgentService represents a service known to the agent +type AgentService struct { + ID string + Service string + Tags []string + Port int +} + +// AgentMember represents a cluster member known to the agent +type AgentMember struct { + Name string + Addr string + Port uint16 + Tags map[string]string + Status int + ProtocolMin uint8 + ProtocolMax uint8 + ProtocolCur uint8 + DelegateMin uint8 + DelegateMax uint8 + DelegateCur uint8 +} + +// AgentServiceRegistration is used to register a new service +type AgentServiceRegistration struct { + ID string `json:",omitempty"` + Name string `json:",omitempty"` + Tags []string `json:",omitempty"` + Port int `json:",omitempty"` + Check *AgentServiceCheck +} + +// AgentCheckRegistration is used to register a new check +type AgentCheckRegistration struct { + ID string `json:",omitempty"` + Name string `json:",omitempty"` + Notes string `json:",omitempty"` + AgentServiceCheck +} + +// AgentServiceCheck is used to create an associated +// check for a service +type AgentServiceCheck struct { + Script string `json:",omitempty"` + Interval string `json:",omitempty"` + TTL string `json:",omitempty"` +} + +// Agent can be used to query the Agent endpoints +type Agent struct { + c *Client + + // cache the node name + nodeName string +} + +// Agent returns a handle to the agent endpoints +func (c *Client) Agent() *Agent { + return &Agent{c: c} +} + +// Self is used to query the agent we are speaking to for +// information about itself +func (a *Agent) Self() (map[string]map[string]interface{}, error) { + r := a.c.newRequest("GET", "/v1/agent/self") + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var out map[string]map[string]interface{} + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + return out, nil +} + +// NodeName is used to get the node name of the agent +func (a *Agent) NodeName() (string, error) { + if a.nodeName != "" { + return a.nodeName, nil + } + info, err := a.Self() + if err != nil { + return "", err + } + name := info["Config"]["NodeName"].(string) + a.nodeName = name + return name, nil +} + +// Checks returns the locally registered checks +func (a *Agent) Checks() (map[string]*AgentCheck, error) { + r := a.c.newRequest("GET", "/v1/agent/checks") + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var out map[string]*AgentCheck + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + return out, nil +} + +// Services returns the locally registered services +func (a *Agent) Services() (map[string]*AgentService, error) { + r := a.c.newRequest("GET", "/v1/agent/services") + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var out map[string]*AgentService + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + return out, nil +} + +// Members returns the known gossip members. The WAN +// flag can be used to query a server for WAN members. +func (a *Agent) Members(wan bool) ([]*AgentMember, error) { + r := a.c.newRequest("GET", "/v1/agent/members") + if wan { + r.params.Set("wan", "1") + } + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var out []*AgentMember + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + return out, nil +} + +// ServiceRegister is used to register a new service with +// the local agent +func (a *Agent) ServiceRegister(service *AgentServiceRegistration) error { + r := a.c.newRequest("PUT", "/v1/agent/service/register") + r.obj = service + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// ServiceDeregister is used to deregister a service with +// the local agent +func (a *Agent) ServiceDeregister(serviceID string) error { + r := a.c.newRequest("PUT", "/v1/agent/service/deregister/"+serviceID) + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// PassTTL is used to set a TTL check to the passing state +func (a *Agent) PassTTL(checkID, note string) error { + return a.UpdateTTL(checkID, note, "pass") +} + +// WarnTTL is used to set a TTL check to the warning state +func (a *Agent) WarnTTL(checkID, note string) error { + return a.UpdateTTL(checkID, note, "warn") +} + +// FailTTL is used to set a TTL check to the failing state +func (a *Agent) FailTTL(checkID, note string) error { + return a.UpdateTTL(checkID, note, "fail") +} + +// UpdateTTL is used to update the TTL of a check +func (a *Agent) UpdateTTL(checkID, note, status string) error { + switch status { + case "pass": + case "warn": + case "fail": + default: + return fmt.Errorf("Invalid status: %s", status) + } + endpoint := fmt.Sprintf("/v1/agent/check/%s/%s", status, checkID) + r := a.c.newRequest("PUT", endpoint) + r.params.Set("note", note) + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// CheckRegister is used to register a new check with +// the local agent +func (a *Agent) CheckRegister(check *AgentCheckRegistration) error { + r := a.c.newRequest("PUT", "/v1/agent/check/register") + r.obj = check + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// CheckDeregister is used to deregister a check with +// the local agent +func (a *Agent) CheckDeregister(checkID string) error { + r := a.c.newRequest("PUT", "/v1/agent/check/deregister/"+checkID) + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// Join is used to instruct the agent to attempt a join to +// another cluster member +func (a *Agent) Join(addr string, wan bool) error { + r := a.c.newRequest("PUT", "/v1/agent/join/"+addr) + if wan { + r.params.Set("wan", "1") + } + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// ForceLeave is used to have the agent eject a failed node +func (a *Agent) ForceLeave(node string) error { + r := a.c.newRequest("PUT", "/v1/agent/force-leave/"+node) + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} diff --git a/vendor/github.com/armon/consul-api/api.go b/vendor/github.com/armon/consul-api/api.go new file mode 100644 index 00000000..e1335769 --- /dev/null +++ b/vendor/github.com/armon/consul-api/api.go @@ -0,0 +1,323 @@ +package consulapi + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "time" +) + +// QueryOptions are used to parameterize a query +type QueryOptions struct { + // Providing a datacenter overwrites the DC provided + // by the Config + Datacenter string + + // AllowStale allows any Consul server (non-leader) to service + // a read. This allows for lower latency and higher throughput + AllowStale bool + + // RequireConsistent forces the read to be fully consistent. + // This is more expensive but prevents ever performing a stale + // read. + RequireConsistent bool + + // WaitIndex is used to enable a blocking query. Waits + // until the timeout or the next index is reached + WaitIndex uint64 + + // WaitTime is used to bound the duration of a wait. + // Defaults to that of the Config, but can be overriden. + WaitTime time.Duration + + // Token is used to provide a per-request ACL token + // which overrides the agent's default token. + Token string +} + +// WriteOptions are used to parameterize a write +type WriteOptions struct { + // Providing a datacenter overwrites the DC provided + // by the Config + Datacenter string + + // Token is used to provide a per-request ACL token + // which overrides the agent's default token. + Token string +} + +// QueryMeta is used to return meta data about a query +type QueryMeta struct { + // LastIndex. This can be used as a WaitIndex to perform + // a blocking query + LastIndex uint64 + + // Time of last contact from the leader for the + // server servicing the request + LastContact time.Duration + + // Is there a known leader + KnownLeader bool + + // How long did the request take + RequestTime time.Duration +} + +// WriteMeta is used to return meta data about a write +type WriteMeta struct { + // How long did the request take + RequestTime time.Duration +} + +// HttpBasicAuth is used to authenticate http client with HTTP Basic Authentication +type HttpBasicAuth struct { + // Username to use for HTTP Basic Authentication + Username string + + // Password to use for HTTP Basic Authentication + Password string +} + +// Config is used to configure the creation of a client +type Config struct { + // Address is the address of the Consul server + Address string + + // Scheme is the URI scheme for the Consul server + Scheme string + + // Datacenter to use. If not provided, the default agent datacenter is used. + Datacenter string + + // HttpClient is the client to use. Default will be + // used if not provided. + HttpClient *http.Client + + // HttpAuth is the auth info to use for http access. + HttpAuth *HttpBasicAuth + + // WaitTime limits how long a Watch will block. If not provided, + // the agent default values will be used. + WaitTime time.Duration + + // Token is used to provide a per-request ACL token + // which overrides the agent's default token. + Token string +} + +// DefaultConfig returns a default configuration for the client +func DefaultConfig() *Config { + return &Config{ + Address: "127.0.0.1:8500", + Scheme: "http", + HttpClient: http.DefaultClient, + } +} + +// Client provides a client to the Consul API +type Client struct { + config Config +} + +// NewClient returns a new client +func NewClient(config *Config) (*Client, error) { + // bootstrap the config + defConfig := DefaultConfig() + + if len(config.Address) == 0 { + config.Address = defConfig.Address + } + + if len(config.Scheme) == 0 { + config.Scheme = defConfig.Scheme + } + + if config.HttpClient == nil { + config.HttpClient = defConfig.HttpClient + } + + client := &Client{ + config: *config, + } + return client, nil +} + +// request is used to help build up a request +type request struct { + config *Config + method string + url *url.URL + params url.Values + body io.Reader + obj interface{} +} + +// setQueryOptions is used to annotate the request with +// additional query options +func (r *request) setQueryOptions(q *QueryOptions) { + if q == nil { + return + } + if q.Datacenter != "" { + r.params.Set("dc", q.Datacenter) + } + if q.AllowStale { + r.params.Set("stale", "") + } + if q.RequireConsistent { + r.params.Set("consistent", "") + } + if q.WaitIndex != 0 { + r.params.Set("index", strconv.FormatUint(q.WaitIndex, 10)) + } + if q.WaitTime != 0 { + r.params.Set("wait", durToMsec(q.WaitTime)) + } + if q.Token != "" { + r.params.Set("token", q.Token) + } +} + +// durToMsec converts a duration to a millisecond specified string +func durToMsec(dur time.Duration) string { + return fmt.Sprintf("%dms", dur/time.Millisecond) +} + +// setWriteOptions is used to annotate the request with +// additional write options +func (r *request) setWriteOptions(q *WriteOptions) { + if q == nil { + return + } + if q.Datacenter != "" { + r.params.Set("dc", q.Datacenter) + } + if q.Token != "" { + r.params.Set("token", q.Token) + } +} + +// toHTTP converts the request to an HTTP request +func (r *request) toHTTP() (*http.Request, error) { + // Encode the query parameters + r.url.RawQuery = r.params.Encode() + + // Get the url sring + urlRaw := r.url.String() + + // Check if we should encode the body + if r.body == nil && r.obj != nil { + if b, err := encodeBody(r.obj); err != nil { + return nil, err + } else { + r.body = b + } + } + + // Create the HTTP request + req, err := http.NewRequest(r.method, urlRaw, r.body) + + // Setup auth + if err == nil && r.config.HttpAuth != nil { + req.SetBasicAuth(r.config.HttpAuth.Username, r.config.HttpAuth.Password) + } + + return req, err +} + +// newRequest is used to create a new request +func (c *Client) newRequest(method, path string) *request { + r := &request{ + config: &c.config, + method: method, + url: &url.URL{ + Scheme: c.config.Scheme, + Host: c.config.Address, + Path: path, + }, + params: make(map[string][]string), + } + if c.config.Datacenter != "" { + r.params.Set("dc", c.config.Datacenter) + } + if c.config.WaitTime != 0 { + r.params.Set("wait", durToMsec(r.config.WaitTime)) + } + if c.config.Token != "" { + r.params.Set("token", r.config.Token) + } + return r +} + +// doRequest runs a request with our client +func (c *Client) doRequest(r *request) (time.Duration, *http.Response, error) { + req, err := r.toHTTP() + if err != nil { + return 0, nil, err + } + start := time.Now() + resp, err := c.config.HttpClient.Do(req) + diff := time.Now().Sub(start) + return diff, resp, err +} + +// parseQueryMeta is used to help parse query meta-data +func parseQueryMeta(resp *http.Response, q *QueryMeta) error { + header := resp.Header + + // Parse the X-Consul-Index + index, err := strconv.ParseUint(header.Get("X-Consul-Index"), 10, 64) + if err != nil { + return fmt.Errorf("Failed to parse X-Consul-Index: %v", err) + } + q.LastIndex = index + + // Parse the X-Consul-LastContact + last, err := strconv.ParseUint(header.Get("X-Consul-LastContact"), 10, 64) + if err != nil { + return fmt.Errorf("Failed to parse X-Consul-LastContact: %v", err) + } + q.LastContact = time.Duration(last) * time.Millisecond + + // Parse the X-Consul-KnownLeader + switch header.Get("X-Consul-KnownLeader") { + case "true": + q.KnownLeader = true + default: + q.KnownLeader = false + } + return nil +} + +// decodeBody is used to JSON decode a body +func decodeBody(resp *http.Response, out interface{}) error { + dec := json.NewDecoder(resp.Body) + return dec.Decode(out) +} + +// encodeBody is used to encode a request body +func encodeBody(obj interface{}) (io.Reader, error) { + buf := bytes.NewBuffer(nil) + enc := json.NewEncoder(buf) + if err := enc.Encode(obj); err != nil { + return nil, err + } + return buf, nil +} + +// requireOK is used to wrap doRequest and check for a 200 +func requireOK(d time.Duration, resp *http.Response, e error) (time.Duration, *http.Response, error) { + if e != nil { + return d, resp, e + } + if resp.StatusCode != 200 { + var buf bytes.Buffer + io.Copy(&buf, resp.Body) + return d, resp, fmt.Errorf("Unexpected response code: %d (%s)", resp.StatusCode, buf.Bytes()) + } + return d, resp, e +} diff --git a/vendor/github.com/armon/consul-api/catalog.go b/vendor/github.com/armon/consul-api/catalog.go new file mode 100644 index 00000000..8080e2a9 --- /dev/null +++ b/vendor/github.com/armon/consul-api/catalog.go @@ -0,0 +1,181 @@ +package consulapi + +type Node struct { + Node string + Address string +} + +type CatalogService struct { + Node string + Address string + ServiceID string + ServiceName string + ServiceTags []string + ServicePort int +} + +type CatalogNode struct { + Node *Node + Services map[string]*AgentService +} + +type CatalogRegistration struct { + Node string + Address string + Datacenter string + Service *AgentService + Check *AgentCheck +} + +type CatalogDeregistration struct { + Node string + Address string + Datacenter string + ServiceID string + CheckID string +} + +// Catalog can be used to query the Catalog endpoints +type Catalog struct { + c *Client +} + +// Catalog returns a handle to the catalog endpoints +func (c *Client) Catalog() *Catalog { + return &Catalog{c} +} + +func (c *Catalog) Register(reg *CatalogRegistration, q *WriteOptions) (*WriteMeta, error) { + r := c.c.newRequest("PUT", "/v1/catalog/register") + r.setWriteOptions(q) + r.obj = reg + rtt, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return nil, err + } + resp.Body.Close() + + wm := &WriteMeta{} + wm.RequestTime = rtt + + return wm, nil +} + +func (c *Catalog) Deregister(dereg *CatalogDeregistration, q *WriteOptions) (*WriteMeta, error) { + r := c.c.newRequest("PUT", "/v1/catalog/deregister") + r.setWriteOptions(q) + r.obj = dereg + rtt, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return nil, err + } + resp.Body.Close() + + wm := &WriteMeta{} + wm.RequestTime = rtt + + return wm, nil +} + +// Datacenters is used to query for all the known datacenters +func (c *Catalog) Datacenters() ([]string, error) { + r := c.c.newRequest("GET", "/v1/catalog/datacenters") + _, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var out []string + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + return out, nil +} + +// Nodes is used to query all the known nodes +func (c *Catalog) Nodes(q *QueryOptions) ([]*Node, *QueryMeta, error) { + r := c.c.newRequest("GET", "/v1/catalog/nodes") + r.setQueryOptions(q) + rtt, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out []*Node + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// Services is used to query for all known services +func (c *Catalog) Services(q *QueryOptions) (map[string][]string, *QueryMeta, error) { + r := c.c.newRequest("GET", "/v1/catalog/services") + r.setQueryOptions(q) + rtt, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out map[string][]string + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// Service is used to query catalog entries for a given service +func (c *Catalog) Service(service, tag string, q *QueryOptions) ([]*CatalogService, *QueryMeta, error) { + r := c.c.newRequest("GET", "/v1/catalog/service/"+service) + r.setQueryOptions(q) + if tag != "" { + r.params.Set("tag", tag) + } + rtt, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out []*CatalogService + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// Node is used to query for service information about a single node +func (c *Catalog) Node(node string, q *QueryOptions) (*CatalogNode, *QueryMeta, error) { + r := c.c.newRequest("GET", "/v1/catalog/node/"+node) + r.setQueryOptions(q) + rtt, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out *CatalogNode + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} diff --git a/vendor/github.com/armon/consul-api/event.go b/vendor/github.com/armon/consul-api/event.go new file mode 100644 index 00000000..59813d40 --- /dev/null +++ b/vendor/github.com/armon/consul-api/event.go @@ -0,0 +1,104 @@ +package consulapi + +import ( + "bytes" + "strconv" +) + +// Event can be used to query the Event endpoints +type Event struct { + c *Client +} + +// UserEvent represents an event that was fired by the user +type UserEvent struct { + ID string + Name string + Payload []byte + NodeFilter string + ServiceFilter string + TagFilter string + Version int + LTime uint64 +} + +// Event returns a handle to the event endpoints +func (c *Client) Event() *Event { + return &Event{c} +} + +// Fire is used to fire a new user event. Only the Name, Payload and Filters +// are respected. This returns the ID or an associated error. Cross DC requests +// are supported. +func (e *Event) Fire(params *UserEvent, q *WriteOptions) (string, *WriteMeta, error) { + r := e.c.newRequest("PUT", "/v1/event/fire/"+params.Name) + r.setWriteOptions(q) + if params.NodeFilter != "" { + r.params.Set("node", params.NodeFilter) + } + if params.ServiceFilter != "" { + r.params.Set("service", params.ServiceFilter) + } + if params.TagFilter != "" { + r.params.Set("tag", params.TagFilter) + } + if params.Payload != nil { + r.body = bytes.NewReader(params.Payload) + } + + rtt, resp, err := requireOK(e.c.doRequest(r)) + if err != nil { + return "", nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + var out UserEvent + if err := decodeBody(resp, &out); err != nil { + return "", nil, err + } + return out.ID, wm, nil +} + +// List is used to get the most recent events an agent has received. +// This list can be optionally filtered by the name. This endpoint supports +// quasi-blocking queries. The index is not monotonic, nor does it provide provide +// LastContact or KnownLeader. +func (e *Event) List(name string, q *QueryOptions) ([]*UserEvent, *QueryMeta, error) { + r := e.c.newRequest("GET", "/v1/event/list") + r.setQueryOptions(q) + if name != "" { + r.params.Set("name", name) + } + rtt, resp, err := requireOK(e.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var entries []*UserEvent + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, err + } + return entries, qm, nil +} + +// IDToIndex is a bit of a hack. This simulates the index generation to +// convert an event ID into a WaitIndex. +func (e *Event) IDToIndex(uuid string) uint64 { + lower := uuid[0:8] + uuid[9:13] + uuid[14:18] + upper := uuid[19:23] + uuid[24:36] + lowVal, err := strconv.ParseUint(lower, 16, 64) + if err != nil { + panic("Failed to convert " + lower) + } + highVal, err := strconv.ParseUint(upper, 16, 64) + if err != nil { + panic("Failed to convert " + upper) + } + return lowVal ^ highVal +} diff --git a/vendor/github.com/armon/consul-api/health.go b/vendor/github.com/armon/consul-api/health.go new file mode 100644 index 00000000..12225790 --- /dev/null +++ b/vendor/github.com/armon/consul-api/health.go @@ -0,0 +1,136 @@ +package consulapi + +import ( + "fmt" +) + +// HealthCheck is used to represent a single check +type HealthCheck struct { + Node string + CheckID string + Name string + Status string + Notes string + Output string + ServiceID string + ServiceName string +} + +// ServiceEntry is used for the health service endpoint +type ServiceEntry struct { + Node *Node + Service *AgentService + Checks []*HealthCheck +} + +// Health can be used to query the Health endpoints +type Health struct { + c *Client +} + +// Health returns a handle to the health endpoints +func (c *Client) Health() *Health { + return &Health{c} +} + +// Node is used to query for checks belonging to a given node +func (h *Health) Node(node string, q *QueryOptions) ([]*HealthCheck, *QueryMeta, error) { + r := h.c.newRequest("GET", "/v1/health/node/"+node) + r.setQueryOptions(q) + rtt, resp, err := requireOK(h.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out []*HealthCheck + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// Checks is used to return the checks associated with a service +func (h *Health) Checks(service string, q *QueryOptions) ([]*HealthCheck, *QueryMeta, error) { + r := h.c.newRequest("GET", "/v1/health/checks/"+service) + r.setQueryOptions(q) + rtt, resp, err := requireOK(h.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out []*HealthCheck + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// Service is used to query health information along with service info +// for a given service. It can optionally do server-side filtering on a tag +// or nodes with passing health checks only. +func (h *Health) Service(service, tag string, passingOnly bool, q *QueryOptions) ([]*ServiceEntry, *QueryMeta, error) { + r := h.c.newRequest("GET", "/v1/health/service/"+service) + r.setQueryOptions(q) + if tag != "" { + r.params.Set("tag", tag) + } + if passingOnly { + r.params.Set("passing", "1") + } + rtt, resp, err := requireOK(h.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out []*ServiceEntry + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// State is used to retrieve all the checks in a given state. +// The wildcard "any" state can also be used for all checks. +func (h *Health) State(state string, q *QueryOptions) ([]*HealthCheck, *QueryMeta, error) { + switch state { + case "any": + case "warning": + case "critical": + case "passing": + case "unknown": + default: + return nil, nil, fmt.Errorf("Unsupported state: %v", state) + } + r := h.c.newRequest("GET", "/v1/health/state/"+state) + r.setQueryOptions(q) + rtt, resp, err := requireOK(h.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out []*HealthCheck + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} diff --git a/vendor/github.com/armon/consul-api/kv.go b/vendor/github.com/armon/consul-api/kv.go new file mode 100644 index 00000000..98c3b1a0 --- /dev/null +++ b/vendor/github.com/armon/consul-api/kv.go @@ -0,0 +1,219 @@ +package consulapi + +import ( + "bytes" + "fmt" + "io" + "net/http" + "strconv" + "strings" +) + +// KVPair is used to represent a single K/V entry +type KVPair struct { + Key string + CreateIndex uint64 + ModifyIndex uint64 + LockIndex uint64 + Flags uint64 + Value []byte + Session string +} + +// KVPairs is a list of KVPair objects +type KVPairs []*KVPair + +// KV is used to manipulate the K/V API +type KV struct { + c *Client +} + +// KV is used to return a handle to the K/V apis +func (c *Client) KV() *KV { + return &KV{c} +} + +// Get is used to lookup a single key +func (k *KV) Get(key string, q *QueryOptions) (*KVPair, *QueryMeta, error) { + resp, qm, err := k.getInternal(key, nil, q) + if err != nil { + return nil, nil, err + } + if resp == nil { + return nil, qm, nil + } + defer resp.Body.Close() + + var entries []*KVPair + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, err + } + if len(entries) > 0 { + return entries[0], qm, nil + } + return nil, qm, nil +} + +// List is used to lookup all keys under a prefix +func (k *KV) List(prefix string, q *QueryOptions) (KVPairs, *QueryMeta, error) { + resp, qm, err := k.getInternal(prefix, map[string]string{"recurse": ""}, q) + if err != nil { + return nil, nil, err + } + if resp == nil { + return nil, qm, nil + } + defer resp.Body.Close() + + var entries []*KVPair + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, err + } + return entries, qm, nil +} + +// Keys is used to list all the keys under a prefix. Optionally, +// a separator can be used to limit the responses. +func (k *KV) Keys(prefix, separator string, q *QueryOptions) ([]string, *QueryMeta, error) { + params := map[string]string{"keys": ""} + if separator != "" { + params["separator"] = separator + } + resp, qm, err := k.getInternal(prefix, params, q) + if err != nil { + return nil, nil, err + } + if resp == nil { + return nil, qm, nil + } + defer resp.Body.Close() + + var entries []string + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, err + } + return entries, qm, nil +} + +func (k *KV) getInternal(key string, params map[string]string, q *QueryOptions) (*http.Response, *QueryMeta, error) { + r := k.c.newRequest("GET", "/v1/kv/"+key) + r.setQueryOptions(q) + for param, val := range params { + r.params.Set(param, val) + } + rtt, resp, err := k.c.doRequest(r) + if err != nil { + return nil, nil, err + } + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + if resp.StatusCode == 404 { + resp.Body.Close() + return nil, qm, nil + } else if resp.StatusCode != 200 { + resp.Body.Close() + return nil, nil, fmt.Errorf("Unexpected response code: %d", resp.StatusCode) + } + return resp, qm, nil +} + +// Put is used to write a new value. Only the +// Key, Flags and Value is respected. +func (k *KV) Put(p *KVPair, q *WriteOptions) (*WriteMeta, error) { + params := make(map[string]string, 1) + if p.Flags != 0 { + params["flags"] = strconv.FormatUint(p.Flags, 10) + } + _, wm, err := k.put(p.Key, params, p.Value, q) + return wm, err +} + +// CAS is used for a Check-And-Set operation. The Key, +// ModifyIndex, Flags and Value are respected. Returns true +// on success or false on failures. +func (k *KV) CAS(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) { + params := make(map[string]string, 2) + if p.Flags != 0 { + params["flags"] = strconv.FormatUint(p.Flags, 10) + } + params["cas"] = strconv.FormatUint(p.ModifyIndex, 10) + return k.put(p.Key, params, p.Value, q) +} + +// Acquire is used for a lock acquisiiton operation. The Key, +// Flags, Value and Session are respected. Returns true +// on success or false on failures. +func (k *KV) Acquire(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) { + params := make(map[string]string, 2) + if p.Flags != 0 { + params["flags"] = strconv.FormatUint(p.Flags, 10) + } + params["acquire"] = p.Session + return k.put(p.Key, params, p.Value, q) +} + +// Release is used for a lock release operation. The Key, +// Flags, Value and Session are respected. Returns true +// on success or false on failures. +func (k *KV) Release(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) { + params := make(map[string]string, 2) + if p.Flags != 0 { + params["flags"] = strconv.FormatUint(p.Flags, 10) + } + params["release"] = p.Session + return k.put(p.Key, params, p.Value, q) +} + +func (k *KV) put(key string, params map[string]string, body []byte, q *WriteOptions) (bool, *WriteMeta, error) { + r := k.c.newRequest("PUT", "/v1/kv/"+key) + r.setWriteOptions(q) + for param, val := range params { + r.params.Set(param, val) + } + r.body = bytes.NewReader(body) + rtt, resp, err := requireOK(k.c.doRequest(r)) + if err != nil { + return false, nil, err + } + defer resp.Body.Close() + + qm := &WriteMeta{} + qm.RequestTime = rtt + + var buf bytes.Buffer + if _, err := io.Copy(&buf, resp.Body); err != nil { + return false, nil, fmt.Errorf("Failed to read response: %v", err) + } + res := strings.Contains(string(buf.Bytes()), "true") + return res, qm, nil +} + +// Delete is used to delete a single key +func (k *KV) Delete(key string, w *WriteOptions) (*WriteMeta, error) { + return k.deleteInternal(key, nil, w) +} + +// DeleteTree is used to delete all keys under a prefix +func (k *KV) DeleteTree(prefix string, w *WriteOptions) (*WriteMeta, error) { + return k.deleteInternal(prefix, []string{"recurse"}, w) +} + +func (k *KV) deleteInternal(key string, params []string, q *WriteOptions) (*WriteMeta, error) { + r := k.c.newRequest("DELETE", "/v1/kv/"+key) + r.setWriteOptions(q) + for _, param := range params { + r.params.Set(param, "") + } + rtt, resp, err := requireOK(k.c.doRequest(r)) + if err != nil { + return nil, err + } + resp.Body.Close() + + qm := &WriteMeta{} + qm.RequestTime = rtt + return qm, nil +} diff --git a/vendor/github.com/armon/consul-api/session.go b/vendor/github.com/armon/consul-api/session.go new file mode 100644 index 00000000..4fbfc5ee --- /dev/null +++ b/vendor/github.com/armon/consul-api/session.go @@ -0,0 +1,204 @@ +package consulapi + +import ( + "time" +) + +// SessionEntry represents a session in consul +type SessionEntry struct { + CreateIndex uint64 + ID string + Name string + Node string + Checks []string + LockDelay time.Duration + Behavior string + TTL string +} + +// Session can be used to query the Session endpoints +type Session struct { + c *Client +} + +// Session returns a handle to the session endpoints +func (c *Client) Session() *Session { + return &Session{c} +} + +// CreateNoChecks is like Create but is used specifically to create +// a session with no associated health checks. +func (s *Session) CreateNoChecks(se *SessionEntry, q *WriteOptions) (string, *WriteMeta, error) { + body := make(map[string]interface{}) + body["Checks"] = []string{} + if se != nil { + if se.Name != "" { + body["Name"] = se.Name + } + if se.Node != "" { + body["Node"] = se.Node + } + if se.LockDelay != 0 { + body["LockDelay"] = durToMsec(se.LockDelay) + } + if se.Behavior != "" { + body["Behavior"] = se.Behavior + } + if se.TTL != "" { + body["TTL"] = se.TTL + } + } + return s.create(body, q) + +} + +// Create makes a new session. Providing a session entry can +// customize the session. It can also be nil to use defaults. +func (s *Session) Create(se *SessionEntry, q *WriteOptions) (string, *WriteMeta, error) { + var obj interface{} + if se != nil { + body := make(map[string]interface{}) + obj = body + if se.Name != "" { + body["Name"] = se.Name + } + if se.Node != "" { + body["Node"] = se.Node + } + if se.LockDelay != 0 { + body["LockDelay"] = durToMsec(se.LockDelay) + } + if len(se.Checks) > 0 { + body["Checks"] = se.Checks + } + if se.Behavior != "" { + body["Behavior"] = se.Behavior + } + if se.TTL != "" { + body["TTL"] = se.TTL + } + } + return s.create(obj, q) +} + +func (s *Session) create(obj interface{}, q *WriteOptions) (string, *WriteMeta, error) { + r := s.c.newRequest("PUT", "/v1/session/create") + r.setWriteOptions(q) + r.obj = obj + rtt, resp, err := requireOK(s.c.doRequest(r)) + if err != nil { + return "", nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + var out struct{ ID string } + if err := decodeBody(resp, &out); err != nil { + return "", nil, err + } + return out.ID, wm, nil +} + +// Destroy invalides a given session +func (s *Session) Destroy(id string, q *WriteOptions) (*WriteMeta, error) { + r := s.c.newRequest("PUT", "/v1/session/destroy/"+id) + r.setWriteOptions(q) + rtt, resp, err := requireOK(s.c.doRequest(r)) + if err != nil { + return nil, err + } + resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + return wm, nil +} + +// Renew renews the TTL on a given session +func (s *Session) Renew(id string, q *WriteOptions) (*SessionEntry, *WriteMeta, error) { + r := s.c.newRequest("PUT", "/v1/session/renew/"+id) + r.setWriteOptions(q) + rtt, resp, err := requireOK(s.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + + var entries []*SessionEntry + if err := decodeBody(resp, &entries); err != nil { + return nil, wm, err + } + + if len(entries) > 0 { + return entries[0], wm, nil + } + return nil, wm, nil +} + +// Info looks up a single session +func (s *Session) Info(id string, q *QueryOptions) (*SessionEntry, *QueryMeta, error) { + r := s.c.newRequest("GET", "/v1/session/info/"+id) + r.setQueryOptions(q) + rtt, resp, err := requireOK(s.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var entries []*SessionEntry + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, err + } + + if len(entries) > 0 { + return entries[0], qm, nil + } + return nil, qm, nil +} + +// List gets sessions for a node +func (s *Session) Node(node string, q *QueryOptions) ([]*SessionEntry, *QueryMeta, error) { + r := s.c.newRequest("GET", "/v1/session/node/"+node) + r.setQueryOptions(q) + rtt, resp, err := requireOK(s.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var entries []*SessionEntry + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, err + } + return entries, qm, nil +} + +// List gets all active sessions +func (s *Session) List(q *QueryOptions) ([]*SessionEntry, *QueryMeta, error) { + r := s.c.newRequest("GET", "/v1/session/list") + r.setQueryOptions(q) + rtt, resp, err := requireOK(s.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var entries []*SessionEntry + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, err + } + return entries, qm, nil +} diff --git a/vendor/github.com/armon/consul-api/status.go b/vendor/github.com/armon/consul-api/status.go new file mode 100644 index 00000000..21c31982 --- /dev/null +++ b/vendor/github.com/armon/consul-api/status.go @@ -0,0 +1,43 @@ +package consulapi + +// Status can be used to query the Status endpoints +type Status struct { + c *Client +} + +// Status returns a handle to the status endpoints +func (c *Client) Status() *Status { + return &Status{c} +} + +// Leader is used to query for a known leader +func (s *Status) Leader() (string, error) { + r := s.c.newRequest("GET", "/v1/status/leader") + _, resp, err := requireOK(s.c.doRequest(r)) + if err != nil { + return "", err + } + defer resp.Body.Close() + + var leader string + if err := decodeBody(resp, &leader); err != nil { + return "", err + } + return leader, nil +} + +// Peers is used to query for a known raft peers +func (s *Status) Peers() ([]string, error) { + r := s.c.newRequest("GET", "/v1/status/peers") + _, resp, err := requireOK(s.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var peers []string + if err := decodeBody(resp, &peers); err != nil { + return nil, err + } + return peers, nil +} diff --git a/vendor/github.com/coreos/etcd/client/LICENSE b/vendor/github.com/coreos/etcd/client/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/vendor/github.com/coreos/etcd/client/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/coreos/etcd/client/auth_role.go b/vendor/github.com/coreos/etcd/client/auth_role.go new file mode 100644 index 00000000..b6ba7e15 --- /dev/null +++ b/vendor/github.com/coreos/etcd/client/auth_role.go @@ -0,0 +1,236 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import ( + "bytes" + "context" + "encoding/json" + "net/http" + "net/url" +) + +type Role struct { + Role string `json:"role"` + Permissions Permissions `json:"permissions"` + Grant *Permissions `json:"grant,omitempty"` + Revoke *Permissions `json:"revoke,omitempty"` +} + +type Permissions struct { + KV rwPermission `json:"kv"` +} + +type rwPermission struct { + Read []string `json:"read"` + Write []string `json:"write"` +} + +type PermissionType int + +const ( + ReadPermission PermissionType = iota + WritePermission + ReadWritePermission +) + +// NewAuthRoleAPI constructs a new AuthRoleAPI that uses HTTP to +// interact with etcd's role creation and modification features. +func NewAuthRoleAPI(c Client) AuthRoleAPI { + return &httpAuthRoleAPI{ + client: c, + } +} + +type AuthRoleAPI interface { + // AddRole adds a role. + AddRole(ctx context.Context, role string) error + + // RemoveRole removes a role. + RemoveRole(ctx context.Context, role string) error + + // GetRole retrieves role details. + GetRole(ctx context.Context, role string) (*Role, error) + + // GrantRoleKV grants a role some permission prefixes for the KV store. + GrantRoleKV(ctx context.Context, role string, prefixes []string, permType PermissionType) (*Role, error) + + // RevokeRoleKV revokes some permission prefixes for a role on the KV store. + RevokeRoleKV(ctx context.Context, role string, prefixes []string, permType PermissionType) (*Role, error) + + // ListRoles lists roles. + ListRoles(ctx context.Context) ([]string, error) +} + +type httpAuthRoleAPI struct { + client httpClient +} + +type authRoleAPIAction struct { + verb string + name string + role *Role +} + +type authRoleAPIList struct{} + +func (list *authRoleAPIList) HTTPRequest(ep url.URL) *http.Request { + u := v2AuthURL(ep, "roles", "") + req, _ := http.NewRequest("GET", u.String(), nil) + req.Header.Set("Content-Type", "application/json") + return req +} + +func (l *authRoleAPIAction) HTTPRequest(ep url.URL) *http.Request { + u := v2AuthURL(ep, "roles", l.name) + if l.role == nil { + req, _ := http.NewRequest(l.verb, u.String(), nil) + return req + } + b, err := json.Marshal(l.role) + if err != nil { + panic(err) + } + body := bytes.NewReader(b) + req, _ := http.NewRequest(l.verb, u.String(), body) + req.Header.Set("Content-Type", "application/json") + return req +} + +func (r *httpAuthRoleAPI) ListRoles(ctx context.Context) ([]string, error) { + resp, body, err := r.client.Do(ctx, &authRoleAPIList{}) + if err != nil { + return nil, err + } + if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil { + return nil, err + } + var roleList struct { + Roles []Role `json:"roles"` + } + if err = json.Unmarshal(body, &roleList); err != nil { + return nil, err + } + ret := make([]string, 0, len(roleList.Roles)) + for _, r := range roleList.Roles { + ret = append(ret, r.Role) + } + return ret, nil +} + +func (r *httpAuthRoleAPI) AddRole(ctx context.Context, rolename string) error { + role := &Role{ + Role: rolename, + } + return r.addRemoveRole(ctx, &authRoleAPIAction{ + verb: "PUT", + name: rolename, + role: role, + }) +} + +func (r *httpAuthRoleAPI) RemoveRole(ctx context.Context, rolename string) error { + return r.addRemoveRole(ctx, &authRoleAPIAction{ + verb: "DELETE", + name: rolename, + }) +} + +func (r *httpAuthRoleAPI) addRemoveRole(ctx context.Context, req *authRoleAPIAction) error { + resp, body, err := r.client.Do(ctx, req) + if err != nil { + return err + } + if err := assertStatusCode(resp.StatusCode, http.StatusOK, http.StatusCreated); err != nil { + var sec authError + err := json.Unmarshal(body, &sec) + if err != nil { + return err + } + return sec + } + return nil +} + +func (r *httpAuthRoleAPI) GetRole(ctx context.Context, rolename string) (*Role, error) { + return r.modRole(ctx, &authRoleAPIAction{ + verb: "GET", + name: rolename, + }) +} + +func buildRWPermission(prefixes []string, permType PermissionType) rwPermission { + var out rwPermission + switch permType { + case ReadPermission: + out.Read = prefixes + case WritePermission: + out.Write = prefixes + case ReadWritePermission: + out.Read = prefixes + out.Write = prefixes + } + return out +} + +func (r *httpAuthRoleAPI) GrantRoleKV(ctx context.Context, rolename string, prefixes []string, permType PermissionType) (*Role, error) { + rwp := buildRWPermission(prefixes, permType) + role := &Role{ + Role: rolename, + Grant: &Permissions{ + KV: rwp, + }, + } + return r.modRole(ctx, &authRoleAPIAction{ + verb: "PUT", + name: rolename, + role: role, + }) +} + +func (r *httpAuthRoleAPI) RevokeRoleKV(ctx context.Context, rolename string, prefixes []string, permType PermissionType) (*Role, error) { + rwp := buildRWPermission(prefixes, permType) + role := &Role{ + Role: rolename, + Revoke: &Permissions{ + KV: rwp, + }, + } + return r.modRole(ctx, &authRoleAPIAction{ + verb: "PUT", + name: rolename, + role: role, + }) +} + +func (r *httpAuthRoleAPI) modRole(ctx context.Context, req *authRoleAPIAction) (*Role, error) { + resp, body, err := r.client.Do(ctx, req) + if err != nil { + return nil, err + } + if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil { + var sec authError + err = json.Unmarshal(body, &sec) + if err != nil { + return nil, err + } + return nil, sec + } + var role Role + if err = json.Unmarshal(body, &role); err != nil { + return nil, err + } + return &role, nil +} diff --git a/vendor/github.com/coreos/etcd/client/auth_user.go b/vendor/github.com/coreos/etcd/client/auth_user.go new file mode 100644 index 00000000..8e7e2efe --- /dev/null +++ b/vendor/github.com/coreos/etcd/client/auth_user.go @@ -0,0 +1,319 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import ( + "bytes" + "context" + "encoding/json" + "net/http" + "net/url" + "path" +) + +var ( + defaultV2AuthPrefix = "/v2/auth" +) + +type User struct { + User string `json:"user"` + Password string `json:"password,omitempty"` + Roles []string `json:"roles"` + Grant []string `json:"grant,omitempty"` + Revoke []string `json:"revoke,omitempty"` +} + +// userListEntry is the user representation given by the server for ListUsers +type userListEntry struct { + User string `json:"user"` + Roles []Role `json:"roles"` +} + +type UserRoles struct { + User string `json:"user"` + Roles []Role `json:"roles"` +} + +func v2AuthURL(ep url.URL, action string, name string) *url.URL { + if name != "" { + ep.Path = path.Join(ep.Path, defaultV2AuthPrefix, action, name) + return &ep + } + ep.Path = path.Join(ep.Path, defaultV2AuthPrefix, action) + return &ep +} + +// NewAuthAPI constructs a new AuthAPI that uses HTTP to +// interact with etcd's general auth features. +func NewAuthAPI(c Client) AuthAPI { + return &httpAuthAPI{ + client: c, + } +} + +type AuthAPI interface { + // Enable auth. + Enable(ctx context.Context) error + + // Disable auth. + Disable(ctx context.Context) error +} + +type httpAuthAPI struct { + client httpClient +} + +func (s *httpAuthAPI) Enable(ctx context.Context) error { + return s.enableDisable(ctx, &authAPIAction{"PUT"}) +} + +func (s *httpAuthAPI) Disable(ctx context.Context) error { + return s.enableDisable(ctx, &authAPIAction{"DELETE"}) +} + +func (s *httpAuthAPI) enableDisable(ctx context.Context, req httpAction) error { + resp, body, err := s.client.Do(ctx, req) + if err != nil { + return err + } + if err = assertStatusCode(resp.StatusCode, http.StatusOK, http.StatusCreated); err != nil { + var sec authError + err = json.Unmarshal(body, &sec) + if err != nil { + return err + } + return sec + } + return nil +} + +type authAPIAction struct { + verb string +} + +func (l *authAPIAction) HTTPRequest(ep url.URL) *http.Request { + u := v2AuthURL(ep, "enable", "") + req, _ := http.NewRequest(l.verb, u.String(), nil) + return req +} + +type authError struct { + Message string `json:"message"` + Code int `json:"-"` +} + +func (e authError) Error() string { + return e.Message +} + +// NewAuthUserAPI constructs a new AuthUserAPI that uses HTTP to +// interact with etcd's user creation and modification features. +func NewAuthUserAPI(c Client) AuthUserAPI { + return &httpAuthUserAPI{ + client: c, + } +} + +type AuthUserAPI interface { + // AddUser adds a user. + AddUser(ctx context.Context, username string, password string) error + + // RemoveUser removes a user. + RemoveUser(ctx context.Context, username string) error + + // GetUser retrieves user details. + GetUser(ctx context.Context, username string) (*User, error) + + // GrantUser grants a user some permission roles. + GrantUser(ctx context.Context, username string, roles []string) (*User, error) + + // RevokeUser revokes some permission roles from a user. + RevokeUser(ctx context.Context, username string, roles []string) (*User, error) + + // ChangePassword changes the user's password. + ChangePassword(ctx context.Context, username string, password string) (*User, error) + + // ListUsers lists the users. + ListUsers(ctx context.Context) ([]string, error) +} + +type httpAuthUserAPI struct { + client httpClient +} + +type authUserAPIAction struct { + verb string + username string + user *User +} + +type authUserAPIList struct{} + +func (list *authUserAPIList) HTTPRequest(ep url.URL) *http.Request { + u := v2AuthURL(ep, "users", "") + req, _ := http.NewRequest("GET", u.String(), nil) + req.Header.Set("Content-Type", "application/json") + return req +} + +func (l *authUserAPIAction) HTTPRequest(ep url.URL) *http.Request { + u := v2AuthURL(ep, "users", l.username) + if l.user == nil { + req, _ := http.NewRequest(l.verb, u.String(), nil) + return req + } + b, err := json.Marshal(l.user) + if err != nil { + panic(err) + } + body := bytes.NewReader(b) + req, _ := http.NewRequest(l.verb, u.String(), body) + req.Header.Set("Content-Type", "application/json") + return req +} + +func (u *httpAuthUserAPI) ListUsers(ctx context.Context) ([]string, error) { + resp, body, err := u.client.Do(ctx, &authUserAPIList{}) + if err != nil { + return nil, err + } + if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil { + var sec authError + err = json.Unmarshal(body, &sec) + if err != nil { + return nil, err + } + return nil, sec + } + + var userList struct { + Users []userListEntry `json:"users"` + } + + if err = json.Unmarshal(body, &userList); err != nil { + return nil, err + } + + ret := make([]string, 0, len(userList.Users)) + for _, u := range userList.Users { + ret = append(ret, u.User) + } + return ret, nil +} + +func (u *httpAuthUserAPI) AddUser(ctx context.Context, username string, password string) error { + user := &User{ + User: username, + Password: password, + } + return u.addRemoveUser(ctx, &authUserAPIAction{ + verb: "PUT", + username: username, + user: user, + }) +} + +func (u *httpAuthUserAPI) RemoveUser(ctx context.Context, username string) error { + return u.addRemoveUser(ctx, &authUserAPIAction{ + verb: "DELETE", + username: username, + }) +} + +func (u *httpAuthUserAPI) addRemoveUser(ctx context.Context, req *authUserAPIAction) error { + resp, body, err := u.client.Do(ctx, req) + if err != nil { + return err + } + if err = assertStatusCode(resp.StatusCode, http.StatusOK, http.StatusCreated); err != nil { + var sec authError + err = json.Unmarshal(body, &sec) + if err != nil { + return err + } + return sec + } + return nil +} + +func (u *httpAuthUserAPI) GetUser(ctx context.Context, username string) (*User, error) { + return u.modUser(ctx, &authUserAPIAction{ + verb: "GET", + username: username, + }) +} + +func (u *httpAuthUserAPI) GrantUser(ctx context.Context, username string, roles []string) (*User, error) { + user := &User{ + User: username, + Grant: roles, + } + return u.modUser(ctx, &authUserAPIAction{ + verb: "PUT", + username: username, + user: user, + }) +} + +func (u *httpAuthUserAPI) RevokeUser(ctx context.Context, username string, roles []string) (*User, error) { + user := &User{ + User: username, + Revoke: roles, + } + return u.modUser(ctx, &authUserAPIAction{ + verb: "PUT", + username: username, + user: user, + }) +} + +func (u *httpAuthUserAPI) ChangePassword(ctx context.Context, username string, password string) (*User, error) { + user := &User{ + User: username, + Password: password, + } + return u.modUser(ctx, &authUserAPIAction{ + verb: "PUT", + username: username, + user: user, + }) +} + +func (u *httpAuthUserAPI) modUser(ctx context.Context, req *authUserAPIAction) (*User, error) { + resp, body, err := u.client.Do(ctx, req) + if err != nil { + return nil, err + } + if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil { + var sec authError + err = json.Unmarshal(body, &sec) + if err != nil { + return nil, err + } + return nil, sec + } + var user User + if err = json.Unmarshal(body, &user); err != nil { + var userR UserRoles + if urerr := json.Unmarshal(body, &userR); urerr != nil { + return nil, err + } + user.User = userR.User + for _, r := range userR.Roles { + user.Roles = append(user.Roles, r.Role) + } + } + return &user, nil +} diff --git a/vendor/github.com/coreos/etcd/client/cancelreq.go b/vendor/github.com/coreos/etcd/client/cancelreq.go new file mode 100644 index 00000000..76d1f040 --- /dev/null +++ b/vendor/github.com/coreos/etcd/client/cancelreq.go @@ -0,0 +1,18 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// borrowed from golang/net/context/ctxhttp/cancelreq.go + +package client + +import "net/http" + +func requestCanceler(tr CancelableTransport, req *http.Request) func() { + ch := make(chan struct{}) + req.Cancel = ch + + return func() { + close(ch) + } +} diff --git a/vendor/github.com/coreos/etcd/client/client.go b/vendor/github.com/coreos/etcd/client/client.go new file mode 100644 index 00000000..e6874505 --- /dev/null +++ b/vendor/github.com/coreos/etcd/client/client.go @@ -0,0 +1,710 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "math/rand" + "net" + "net/http" + "net/url" + "sort" + "strconv" + "sync" + "time" + + "github.com/coreos/etcd/version" +) + +var ( + ErrNoEndpoints = errors.New("client: no endpoints available") + ErrTooManyRedirects = errors.New("client: too many redirects") + ErrClusterUnavailable = errors.New("client: etcd cluster is unavailable or misconfigured") + ErrNoLeaderEndpoint = errors.New("client: no leader endpoint available") + errTooManyRedirectChecks = errors.New("client: too many redirect checks") + + // oneShotCtxValue is set on a context using WithValue(&oneShotValue) so + // that Do() will not retry a request + oneShotCtxValue interface{} +) + +var DefaultRequestTimeout = 5 * time.Second + +var DefaultTransport CancelableTransport = &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + TLSHandshakeTimeout: 10 * time.Second, +} + +type EndpointSelectionMode int + +const ( + // EndpointSelectionRandom is the default value of the 'SelectionMode'. + // As the name implies, the client object will pick a node from the members + // of the cluster in a random fashion. If the cluster has three members, A, B, + // and C, the client picks any node from its three members as its request + // destination. + EndpointSelectionRandom EndpointSelectionMode = iota + + // If 'SelectionMode' is set to 'EndpointSelectionPrioritizeLeader', + // requests are sent directly to the cluster leader. This reduces + // forwarding roundtrips compared to making requests to etcd followers + // who then forward them to the cluster leader. In the event of a leader + // failure, however, clients configured this way cannot prioritize among + // the remaining etcd followers. Therefore, when a client sets 'SelectionMode' + // to 'EndpointSelectionPrioritizeLeader', it must use 'client.AutoSync()' to + // maintain its knowledge of current cluster state. + // + // This mode should be used with Client.AutoSync(). + EndpointSelectionPrioritizeLeader +) + +type Config struct { + // Endpoints defines a set of URLs (schemes, hosts and ports only) + // that can be used to communicate with a logical etcd cluster. For + // example, a three-node cluster could be provided like so: + // + // Endpoints: []string{ + // "http://node1.example.com:2379", + // "http://node2.example.com:2379", + // "http://node3.example.com:2379", + // } + // + // If multiple endpoints are provided, the Client will attempt to + // use them all in the event that one or more of them are unusable. + // + // If Client.Sync is ever called, the Client may cache an alternate + // set of endpoints to continue operation. + Endpoints []string + + // Transport is used by the Client to drive HTTP requests. If not + // provided, DefaultTransport will be used. + Transport CancelableTransport + + // CheckRedirect specifies the policy for handling HTTP redirects. + // If CheckRedirect is not nil, the Client calls it before + // following an HTTP redirect. The sole argument is the number of + // requests that have already been made. If CheckRedirect returns + // an error, Client.Do will not make any further requests and return + // the error back it to the caller. + // + // If CheckRedirect is nil, the Client uses its default policy, + // which is to stop after 10 consecutive requests. + CheckRedirect CheckRedirectFunc + + // Username specifies the user credential to add as an authorization header + Username string + + // Password is the password for the specified user to add as an authorization header + // to the request. + Password string + + // HeaderTimeoutPerRequest specifies the time limit to wait for response + // header in a single request made by the Client. The timeout includes + // connection time, any redirects, and header wait time. + // + // For non-watch GET request, server returns the response body immediately. + // For PUT/POST/DELETE request, server will attempt to commit request + // before responding, which is expected to take `100ms + 2 * RTT`. + // For watch request, server returns the header immediately to notify Client + // watch start. But if server is behind some kind of proxy, the response + // header may be cached at proxy, and Client cannot rely on this behavior. + // + // Especially, wait request will ignore this timeout. + // + // One API call may send multiple requests to different etcd servers until it + // succeeds. Use context of the API to specify the overall timeout. + // + // A HeaderTimeoutPerRequest of zero means no timeout. + HeaderTimeoutPerRequest time.Duration + + // SelectionMode is an EndpointSelectionMode enum that specifies the + // policy for choosing the etcd cluster node to which requests are sent. + SelectionMode EndpointSelectionMode +} + +func (cfg *Config) transport() CancelableTransport { + if cfg.Transport == nil { + return DefaultTransport + } + return cfg.Transport +} + +func (cfg *Config) checkRedirect() CheckRedirectFunc { + if cfg.CheckRedirect == nil { + return DefaultCheckRedirect + } + return cfg.CheckRedirect +} + +// CancelableTransport mimics net/http.Transport, but requires that +// the object also support request cancellation. +type CancelableTransport interface { + http.RoundTripper + CancelRequest(req *http.Request) +} + +type CheckRedirectFunc func(via int) error + +// DefaultCheckRedirect follows up to 10 redirects, but no more. +var DefaultCheckRedirect CheckRedirectFunc = func(via int) error { + if via > 10 { + return ErrTooManyRedirects + } + return nil +} + +type Client interface { + // Sync updates the internal cache of the etcd cluster's membership. + Sync(context.Context) error + + // AutoSync periodically calls Sync() every given interval. + // The recommended sync interval is 10 seconds to 1 minute, which does + // not bring too much overhead to server and makes client catch up the + // cluster change in time. + // + // The example to use it: + // + // for { + // err := client.AutoSync(ctx, 10*time.Second) + // if err == context.DeadlineExceeded || err == context.Canceled { + // break + // } + // log.Print(err) + // } + AutoSync(context.Context, time.Duration) error + + // Endpoints returns a copy of the current set of API endpoints used + // by Client to resolve HTTP requests. If Sync has ever been called, + // this may differ from the initial Endpoints provided in the Config. + Endpoints() []string + + // SetEndpoints sets the set of API endpoints used by Client to resolve + // HTTP requests. If the given endpoints are not valid, an error will be + // returned + SetEndpoints(eps []string) error + + // GetVersion retrieves the current etcd server and cluster version + GetVersion(ctx context.Context) (*version.Versions, error) + + httpClient +} + +func New(cfg Config) (Client, error) { + c := &httpClusterClient{ + clientFactory: newHTTPClientFactory(cfg.transport(), cfg.checkRedirect(), cfg.HeaderTimeoutPerRequest), + rand: rand.New(rand.NewSource(int64(time.Now().Nanosecond()))), + selectionMode: cfg.SelectionMode, + } + if cfg.Username != "" { + c.credentials = &credentials{ + username: cfg.Username, + password: cfg.Password, + } + } + if err := c.SetEndpoints(cfg.Endpoints); err != nil { + return nil, err + } + return c, nil +} + +type httpClient interface { + Do(context.Context, httpAction) (*http.Response, []byte, error) +} + +func newHTTPClientFactory(tr CancelableTransport, cr CheckRedirectFunc, headerTimeout time.Duration) httpClientFactory { + return func(ep url.URL) httpClient { + return &redirectFollowingHTTPClient{ + checkRedirect: cr, + client: &simpleHTTPClient{ + transport: tr, + endpoint: ep, + headerTimeout: headerTimeout, + }, + } + } +} + +type credentials struct { + username string + password string +} + +type httpClientFactory func(url.URL) httpClient + +type httpAction interface { + HTTPRequest(url.URL) *http.Request +} + +type httpClusterClient struct { + clientFactory httpClientFactory + endpoints []url.URL + pinned int + credentials *credentials + sync.RWMutex + rand *rand.Rand + selectionMode EndpointSelectionMode +} + +func (c *httpClusterClient) getLeaderEndpoint(ctx context.Context, eps []url.URL) (string, error) { + ceps := make([]url.URL, len(eps)) + copy(ceps, eps) + + // To perform a lookup on the new endpoint list without using the current + // client, we'll copy it + clientCopy := &httpClusterClient{ + clientFactory: c.clientFactory, + credentials: c.credentials, + rand: c.rand, + + pinned: 0, + endpoints: ceps, + } + + mAPI := NewMembersAPI(clientCopy) + leader, err := mAPI.Leader(ctx) + if err != nil { + return "", err + } + if len(leader.ClientURLs) == 0 { + return "", ErrNoLeaderEndpoint + } + + return leader.ClientURLs[0], nil // TODO: how to handle multiple client URLs? +} + +func (c *httpClusterClient) parseEndpoints(eps []string) ([]url.URL, error) { + if len(eps) == 0 { + return []url.URL{}, ErrNoEndpoints + } + + neps := make([]url.URL, len(eps)) + for i, ep := range eps { + u, err := url.Parse(ep) + if err != nil { + return []url.URL{}, err + } + neps[i] = *u + } + return neps, nil +} + +func (c *httpClusterClient) SetEndpoints(eps []string) error { + neps, err := c.parseEndpoints(eps) + if err != nil { + return err + } + + c.Lock() + defer c.Unlock() + + c.endpoints = shuffleEndpoints(c.rand, neps) + // We're not doing anything for PrioritizeLeader here. This is + // due to not having a context meaning we can't call getLeaderEndpoint + // However, if you're using PrioritizeLeader, you've already been told + // to regularly call sync, where we do have a ctx, and can figure the + // leader. PrioritizeLeader is also quite a loose guarantee, so deal + // with it + c.pinned = 0 + + return nil +} + +func (c *httpClusterClient) Do(ctx context.Context, act httpAction) (*http.Response, []byte, error) { + action := act + c.RLock() + leps := len(c.endpoints) + eps := make([]url.URL, leps) + n := copy(eps, c.endpoints) + pinned := c.pinned + + if c.credentials != nil { + action = &authedAction{ + act: act, + credentials: *c.credentials, + } + } + c.RUnlock() + + if leps == 0 { + return nil, nil, ErrNoEndpoints + } + + if leps != n { + return nil, nil, errors.New("unable to pick endpoint: copy failed") + } + + var resp *http.Response + var body []byte + var err error + cerr := &ClusterError{} + isOneShot := ctx.Value(&oneShotCtxValue) != nil + + for i := pinned; i < leps+pinned; i++ { + k := i % leps + hc := c.clientFactory(eps[k]) + resp, body, err = hc.Do(ctx, action) + if err != nil { + cerr.Errors = append(cerr.Errors, err) + if err == ctx.Err() { + return nil, nil, ctx.Err() + } + if err == context.Canceled || err == context.DeadlineExceeded { + return nil, nil, err + } + } else if resp.StatusCode/100 == 5 { + switch resp.StatusCode { + case http.StatusInternalServerError, http.StatusServiceUnavailable: + // TODO: make sure this is a no leader response + cerr.Errors = append(cerr.Errors, fmt.Errorf("client: etcd member %s has no leader", eps[k].String())) + default: + cerr.Errors = append(cerr.Errors, fmt.Errorf("client: etcd member %s returns server error [%s]", eps[k].String(), http.StatusText(resp.StatusCode))) + } + err = cerr.Errors[0] + } + if err != nil { + if !isOneShot { + continue + } + c.Lock() + c.pinned = (k + 1) % leps + c.Unlock() + return nil, nil, err + } + if k != pinned { + c.Lock() + c.pinned = k + c.Unlock() + } + return resp, body, nil + } + + return nil, nil, cerr +} + +func (c *httpClusterClient) Endpoints() []string { + c.RLock() + defer c.RUnlock() + + eps := make([]string, len(c.endpoints)) + for i, ep := range c.endpoints { + eps[i] = ep.String() + } + + return eps +} + +func (c *httpClusterClient) Sync(ctx context.Context) error { + mAPI := NewMembersAPI(c) + ms, err := mAPI.List(ctx) + if err != nil { + return err + } + + var eps []string + for _, m := range ms { + eps = append(eps, m.ClientURLs...) + } + + neps, err := c.parseEndpoints(eps) + if err != nil { + return err + } + + npin := 0 + + switch c.selectionMode { + case EndpointSelectionRandom: + c.RLock() + eq := endpointsEqual(c.endpoints, neps) + c.RUnlock() + + if eq { + return nil + } + // When items in the endpoint list changes, we choose a new pin + neps = shuffleEndpoints(c.rand, neps) + case EndpointSelectionPrioritizeLeader: + nle, err := c.getLeaderEndpoint(ctx, neps) + if err != nil { + return ErrNoLeaderEndpoint + } + + for i, n := range neps { + if n.String() == nle { + npin = i + break + } + } + default: + return fmt.Errorf("invalid endpoint selection mode: %d", c.selectionMode) + } + + c.Lock() + defer c.Unlock() + c.endpoints = neps + c.pinned = npin + + return nil +} + +func (c *httpClusterClient) AutoSync(ctx context.Context, interval time.Duration) error { + ticker := time.NewTicker(interval) + defer ticker.Stop() + for { + err := c.Sync(ctx) + if err != nil { + return err + } + select { + case <-ctx.Done(): + return ctx.Err() + case <-ticker.C: + } + } +} + +func (c *httpClusterClient) GetVersion(ctx context.Context) (*version.Versions, error) { + act := &getAction{Prefix: "/version"} + + resp, body, err := c.Do(ctx, act) + if err != nil { + return nil, err + } + + switch resp.StatusCode { + case http.StatusOK: + if len(body) == 0 { + return nil, ErrEmptyBody + } + var vresp version.Versions + if err := json.Unmarshal(body, &vresp); err != nil { + return nil, ErrInvalidJSON + } + return &vresp, nil + default: + var etcdErr Error + if err := json.Unmarshal(body, &etcdErr); err != nil { + return nil, ErrInvalidJSON + } + return nil, etcdErr + } +} + +type roundTripResponse struct { + resp *http.Response + err error +} + +type simpleHTTPClient struct { + transport CancelableTransport + endpoint url.URL + headerTimeout time.Duration +} + +func (c *simpleHTTPClient) Do(ctx context.Context, act httpAction) (*http.Response, []byte, error) { + req := act.HTTPRequest(c.endpoint) + + if err := printcURL(req); err != nil { + return nil, nil, err + } + + isWait := false + if req != nil && req.URL != nil { + ws := req.URL.Query().Get("wait") + if len(ws) != 0 { + var err error + isWait, err = strconv.ParseBool(ws) + if err != nil { + return nil, nil, fmt.Errorf("wrong wait value %s (%v for %+v)", ws, err, req) + } + } + } + + var hctx context.Context + var hcancel context.CancelFunc + if !isWait && c.headerTimeout > 0 { + hctx, hcancel = context.WithTimeout(ctx, c.headerTimeout) + } else { + hctx, hcancel = context.WithCancel(ctx) + } + defer hcancel() + + reqcancel := requestCanceler(c.transport, req) + + rtchan := make(chan roundTripResponse, 1) + go func() { + resp, err := c.transport.RoundTrip(req) + rtchan <- roundTripResponse{resp: resp, err: err} + close(rtchan) + }() + + var resp *http.Response + var err error + + select { + case rtresp := <-rtchan: + resp, err = rtresp.resp, rtresp.err + case <-hctx.Done(): + // cancel and wait for request to actually exit before continuing + reqcancel() + rtresp := <-rtchan + resp = rtresp.resp + switch { + case ctx.Err() != nil: + err = ctx.Err() + case hctx.Err() != nil: + err = fmt.Errorf("client: endpoint %s exceeded header timeout", c.endpoint.String()) + default: + panic("failed to get error from context") + } + } + + // always check for resp nil-ness to deal with possible + // race conditions between channels above + defer func() { + if resp != nil { + resp.Body.Close() + } + }() + + if err != nil { + return nil, nil, err + } + + var body []byte + done := make(chan struct{}) + go func() { + body, err = ioutil.ReadAll(resp.Body) + done <- struct{}{} + }() + + select { + case <-ctx.Done(): + resp.Body.Close() + <-done + return nil, nil, ctx.Err() + case <-done: + } + + return resp, body, err +} + +type authedAction struct { + act httpAction + credentials credentials +} + +func (a *authedAction) HTTPRequest(url url.URL) *http.Request { + r := a.act.HTTPRequest(url) + r.SetBasicAuth(a.credentials.username, a.credentials.password) + return r +} + +type redirectFollowingHTTPClient struct { + client httpClient + checkRedirect CheckRedirectFunc +} + +func (r *redirectFollowingHTTPClient) Do(ctx context.Context, act httpAction) (*http.Response, []byte, error) { + next := act + for i := 0; i < 100; i++ { + if i > 0 { + if err := r.checkRedirect(i); err != nil { + return nil, nil, err + } + } + resp, body, err := r.client.Do(ctx, next) + if err != nil { + return nil, nil, err + } + if resp.StatusCode/100 == 3 { + hdr := resp.Header.Get("Location") + if hdr == "" { + return nil, nil, fmt.Errorf("Location header not set") + } + loc, err := url.Parse(hdr) + if err != nil { + return nil, nil, fmt.Errorf("Location header not valid URL: %s", hdr) + } + next = &redirectedHTTPAction{ + action: act, + location: *loc, + } + continue + } + return resp, body, nil + } + + return nil, nil, errTooManyRedirectChecks +} + +type redirectedHTTPAction struct { + action httpAction + location url.URL +} + +func (r *redirectedHTTPAction) HTTPRequest(ep url.URL) *http.Request { + orig := r.action.HTTPRequest(ep) + orig.URL = &r.location + return orig +} + +func shuffleEndpoints(r *rand.Rand, eps []url.URL) []url.URL { + // copied from Go 1.9<= rand.Rand.Perm + n := len(eps) + p := make([]int, n) + for i := 0; i < n; i++ { + j := r.Intn(i + 1) + p[i] = p[j] + p[j] = i + } + neps := make([]url.URL, n) + for i, k := range p { + neps[i] = eps[k] + } + return neps +} + +func endpointsEqual(left, right []url.URL) bool { + if len(left) != len(right) { + return false + } + + sLeft := make([]string, len(left)) + sRight := make([]string, len(right)) + for i, l := range left { + sLeft[i] = l.String() + } + for i, r := range right { + sRight[i] = r.String() + } + + sort.Strings(sLeft) + sort.Strings(sRight) + for i := range sLeft { + if sLeft[i] != sRight[i] { + return false + } + } + return true +} diff --git a/vendor/github.com/coreos/etcd/client/cluster_error.go b/vendor/github.com/coreos/etcd/client/cluster_error.go new file mode 100644 index 00000000..34618cdb --- /dev/null +++ b/vendor/github.com/coreos/etcd/client/cluster_error.go @@ -0,0 +1,37 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import "fmt" + +type ClusterError struct { + Errors []error +} + +func (ce *ClusterError) Error() string { + s := ErrClusterUnavailable.Error() + for i, e := range ce.Errors { + s += fmt.Sprintf("; error #%d: %s\n", i, e) + } + return s +} + +func (ce *ClusterError) Detail() string { + s := "" + for i, e := range ce.Errors { + s += fmt.Sprintf("error #%d: %s\n", i, e) + } + return s +} diff --git a/vendor/github.com/coreos/etcd/client/curl.go b/vendor/github.com/coreos/etcd/client/curl.go new file mode 100644 index 00000000..c8bc9fba --- /dev/null +++ b/vendor/github.com/coreos/etcd/client/curl.go @@ -0,0 +1,70 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "os" +) + +var ( + cURLDebug = false +) + +func EnablecURLDebug() { + cURLDebug = true +} + +func DisablecURLDebug() { + cURLDebug = false +} + +// printcURL prints the cURL equivalent request to stderr. +// It returns an error if the body of the request cannot +// be read. +// The caller MUST cancel the request if there is an error. +func printcURL(req *http.Request) error { + if !cURLDebug { + return nil + } + var ( + command string + b []byte + err error + ) + + if req.URL != nil { + command = fmt.Sprintf("curl -X %s %s", req.Method, req.URL.String()) + } + + if req.Body != nil { + b, err = ioutil.ReadAll(req.Body) + if err != nil { + return err + } + command += fmt.Sprintf(" -d %q", string(b)) + } + + fmt.Fprintf(os.Stderr, "cURL Command: %s\n", command) + + // reset body + body := bytes.NewBuffer(b) + req.Body = ioutil.NopCloser(body) + + return nil +} diff --git a/vendor/github.com/coreos/etcd/client/discover.go b/vendor/github.com/coreos/etcd/client/discover.go new file mode 100644 index 00000000..442e35fe --- /dev/null +++ b/vendor/github.com/coreos/etcd/client/discover.go @@ -0,0 +1,40 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import ( + "github.com/coreos/etcd/pkg/srv" +) + +// Discoverer is an interface that wraps the Discover method. +type Discoverer interface { + // Discover looks up the etcd servers for the domain. + Discover(domain string) ([]string, error) +} + +type srvDiscover struct{} + +// NewSRVDiscover constructs a new Discoverer that uses the stdlib to lookup SRV records. +func NewSRVDiscover() Discoverer { + return &srvDiscover{} +} + +func (d *srvDiscover) Discover(domain string) ([]string, error) { + srvs, err := srv.GetClient("etcd-client", domain) + if err != nil { + return nil, err + } + return srvs.Endpoints, nil +} diff --git a/vendor/github.com/coreos/etcd/client/doc.go b/vendor/github.com/coreos/etcd/client/doc.go new file mode 100644 index 00000000..ad4eca4e --- /dev/null +++ b/vendor/github.com/coreos/etcd/client/doc.go @@ -0,0 +1,73 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package client provides bindings for the etcd APIs. + +Create a Config and exchange it for a Client: + + import ( + "net/http" + "context" + + "github.com/coreos/etcd/client" + ) + + cfg := client.Config{ + Endpoints: []string{"http://127.0.0.1:2379"}, + Transport: DefaultTransport, + } + + c, err := client.New(cfg) + if err != nil { + // handle error + } + +Clients are safe for concurrent use by multiple goroutines. + +Create a KeysAPI using the Client, then use it to interact with etcd: + + kAPI := client.NewKeysAPI(c) + + // create a new key /foo with the value "bar" + _, err = kAPI.Create(context.Background(), "/foo", "bar") + if err != nil { + // handle error + } + + // delete the newly created key only if the value is still "bar" + _, err = kAPI.Delete(context.Background(), "/foo", &DeleteOptions{PrevValue: "bar"}) + if err != nil { + // handle error + } + +Use a custom context to set timeouts on your operations: + + import "time" + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + // set a new key, ignoring its previous state + _, err := kAPI.Set(ctx, "/ping", "pong", nil) + if err != nil { + if err == context.DeadlineExceeded { + // request took longer than 5s + } else { + // handle error + } + } + +*/ +package client diff --git a/vendor/github.com/coreos/etcd/client/integration/doc.go b/vendor/github.com/coreos/etcd/client/integration/doc.go new file mode 100644 index 00000000..e9c58d67 --- /dev/null +++ b/vendor/github.com/coreos/etcd/client/integration/doc.go @@ -0,0 +1,17 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package integration implements tests built upon embedded etcd, focusing on +// the correctness of the etcd v2 client. +package integration diff --git a/vendor/github.com/coreos/etcd/client/keys.generated.go b/vendor/github.com/coreos/etcd/client/keys.generated.go new file mode 100644 index 00000000..237fdbe8 --- /dev/null +++ b/vendor/github.com/coreos/etcd/client/keys.generated.go @@ -0,0 +1,5218 @@ +// ************************************************************ +// DO NOT EDIT. +// THIS FILE IS AUTO-GENERATED BY codecgen. +// ************************************************************ + +package client + +import ( + "errors" + "fmt" + "reflect" + "runtime" + time "time" + + codec1978 "github.com/ugorji/go/codec" +) + +const ( + // ----- content types ---- + codecSelferC_UTF87612 = 1 + codecSelferC_RAW7612 = 0 + // ----- value types used ---- + codecSelferValueTypeArray7612 = 10 + codecSelferValueTypeMap7612 = 9 + // ----- containerStateValues ---- + codecSelfer_containerMapKey7612 = 2 + codecSelfer_containerMapValue7612 = 3 + codecSelfer_containerMapEnd7612 = 4 + codecSelfer_containerArrayElem7612 = 6 + codecSelfer_containerArrayEnd7612 = 7 +) + +var ( + codecSelferBitsize7612 = uint8(reflect.TypeOf(uint(0)).Bits()) + codecSelferOnlyMapOrArrayEncodeToStructErr7612 = errors.New(`only encoded map or array can be decoded into a struct`) +) + +type codecSelfer7612 struct{} + +func init() { + if codec1978.GenVersion != 8 { + _, file, _, _ := runtime.Caller(0) + err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", + 8, codec1978.GenVersion, file) + panic(err) + } + if false { // reference the types, but skip this branch at build/run time + var v0 time.Duration + _ = v0 + } +} + +func (x *Error) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + _, _ = yysep2, yy2arr2 + const yyr2 bool = false + if yyr2 || yy2arr2 { + r.WriteArrayStart(4) + } else { + r.WriteMapStart(4) + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeInt(int64(x.Code)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("errorCode")) + r.WriteMapElemValue() + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeInt(int64(x.Code)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Message)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("message")) + r.WriteMapElemValue() + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Message)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Cause)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("cause")) + r.WriteMapElemValue() + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Cause)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeUint(uint64(x.Index)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("index")) + r.WriteMapElemValue() + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeUint(uint64(x.Index)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayEnd() + } else { + r.WriteMapEnd() + } + } + } +} + +func (x *Error) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap7612 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + r.ReadMapEnd() + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray7612 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + r.ReadArrayEnd() + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612) + } + } +} + +func (x *Error) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + r.ReadMapElemKey() + yys3Slc = r.DecodeStringAsBytes() + yys3 := string(yys3Slc) + r.ReadMapElemValue() + switch yys3 { + case "errorCode": + if r.TryDecodeAsNil() { + x.Code = 0 + } else { + yyv4 := &x.Code + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*int)(yyv4)) = int(r.DecodeInt(codecSelferBitsize7612)) + } + } + case "message": + if r.TryDecodeAsNil() { + x.Message = "" + } else { + yyv6 := &x.Message + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "cause": + if r.TryDecodeAsNil() { + x.Cause = "" + } else { + yyv8 := &x.Cause + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + case "index": + if r.TryDecodeAsNil() { + x.Index = 0 + } else { + yyv10 := &x.Index + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*uint64)(yyv10)) = uint64(r.DecodeUint(64)) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + r.ReadMapEnd() +} + +func (x *Error) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Code = 0 + } else { + yyv13 := &x.Code + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*int)(yyv13)) = int(r.DecodeInt(codecSelferBitsize7612)) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Message = "" + } else { + yyv15 := &x.Message + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Cause = "" + } else { + yyv17 := &x.Cause + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*string)(yyv17)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Index = 0 + } else { + yyv19 := &x.Index + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*uint64)(yyv19)) = uint64(r.DecodeUint(64)) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + r.ReadArrayElem() + z.DecStructFieldNotFound(yyj12-1, "") + } + r.ReadArrayEnd() +} + +func (x PrevExistType) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x)) + } +} + +func (x *PrevExistType) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *WatcherOptions) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + _, _ = yysep2, yy2arr2 + const yyr2 bool = false + if yyr2 || yy2arr2 { + r.WriteArrayStart(2) + } else { + r.WriteMapStart(2) + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeUint(uint64(x.AfterIndex)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("AfterIndex")) + r.WriteMapElemValue() + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeUint(uint64(x.AfterIndex)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeBool(bool(x.Recursive)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("Recursive")) + r.WriteMapElemValue() + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeBool(bool(x.Recursive)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayEnd() + } else { + r.WriteMapEnd() + } + } + } +} + +func (x *WatcherOptions) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap7612 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + r.ReadMapEnd() + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray7612 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + r.ReadArrayEnd() + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612) + } + } +} + +func (x *WatcherOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + r.ReadMapElemKey() + yys3Slc = r.DecodeStringAsBytes() + yys3 := string(yys3Slc) + r.ReadMapElemValue() + switch yys3 { + case "AfterIndex": + if r.TryDecodeAsNil() { + x.AfterIndex = 0 + } else { + yyv4 := &x.AfterIndex + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*uint64)(yyv4)) = uint64(r.DecodeUint(64)) + } + } + case "Recursive": + if r.TryDecodeAsNil() { + x.Recursive = false + } else { + yyv6 := &x.Recursive + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*bool)(yyv6)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + r.ReadMapEnd() +} + +func (x *WatcherOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.AfterIndex = 0 + } else { + yyv9 := &x.AfterIndex + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*uint64)(yyv9)) = uint64(r.DecodeUint(64)) + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Recursive = false + } else { + yyv11 := &x.Recursive + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*bool)(yyv11)) = r.DecodeBool() + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + r.ReadArrayElem() + z.DecStructFieldNotFound(yyj8-1, "") + } + r.ReadArrayEnd() +} + +func (x *CreateInOrderOptions) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + _, _ = yysep2, yy2arr2 + const yyr2 bool = false + if yyr2 || yy2arr2 { + r.WriteArrayStart(1) + } else { + r.WriteMapStart(1) + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym4 := z.EncBinary() + _ = yym4 + if false { + } else if z.HasExtensions() && z.EncExt(x.TTL) { + } else { + r.EncodeInt(int64(x.TTL)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("TTL")) + r.WriteMapElemValue() + yym5 := z.EncBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.EncExt(x.TTL) { + } else { + r.EncodeInt(int64(x.TTL)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayEnd() + } else { + r.WriteMapEnd() + } + } + } +} + +func (x *CreateInOrderOptions) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap7612 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + r.ReadMapEnd() + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray7612 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + r.ReadArrayEnd() + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612) + } + } +} + +func (x *CreateInOrderOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + r.ReadMapElemKey() + yys3Slc = r.DecodeStringAsBytes() + yys3 := string(yys3Slc) + r.ReadMapElemValue() + switch yys3 { + case "TTL": + if r.TryDecodeAsNil() { + x.TTL = 0 + } else { + yyv4 := &x.TTL + yym5 := z.DecBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.DecExt(yyv4) { + } else { + *((*int64)(yyv4)) = int64(r.DecodeInt(64)) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + r.ReadMapEnd() +} + +func (x *CreateInOrderOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.TTL = 0 + } else { + yyv7 := &x.TTL + yym8 := z.DecBinary() + _ = yym8 + if false { + } else if z.HasExtensions() && z.DecExt(yyv7) { + } else { + *((*int64)(yyv7)) = int64(r.DecodeInt(64)) + } + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + r.ReadArrayElem() + z.DecStructFieldNotFound(yyj6-1, "") + } + r.ReadArrayEnd() +} + +func (x *SetOptions) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + _, _ = yysep2, yy2arr2 + const yyr2 bool = false + if yyr2 || yy2arr2 { + r.WriteArrayStart(7) + } else { + r.WriteMapStart(7) + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.PrevValue)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("PrevValue")) + r.WriteMapElemValue() + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.PrevValue)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeUint(uint64(x.PrevIndex)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("PrevIndex")) + r.WriteMapElemValue() + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeUint(uint64(x.PrevIndex)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + x.PrevExist.CodecEncodeSelf(e) + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("PrevExist")) + r.WriteMapElemValue() + x.PrevExist.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(x.TTL) { + } else { + r.EncodeInt(int64(x.TTL)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("TTL")) + r.WriteMapElemValue() + yym14 := z.EncBinary() + _ = yym14 + if false { + } else if z.HasExtensions() && z.EncExt(x.TTL) { + } else { + r.EncodeInt(int64(x.TTL)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeBool(bool(x.Refresh)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("Refresh")) + r.WriteMapElemValue() + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeBool(bool(x.Refresh)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeBool(bool(x.Dir)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("Dir")) + r.WriteMapElemValue() + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeBool(bool(x.Dir)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym22 := z.EncBinary() + _ = yym22 + if false { + } else { + r.EncodeBool(bool(x.NoValueOnSuccess)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("NoValueOnSuccess")) + r.WriteMapElemValue() + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + r.EncodeBool(bool(x.NoValueOnSuccess)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayEnd() + } else { + r.WriteMapEnd() + } + } + } +} + +func (x *SetOptions) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap7612 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + r.ReadMapEnd() + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray7612 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + r.ReadArrayEnd() + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612) + } + } +} + +func (x *SetOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + r.ReadMapElemKey() + yys3Slc = r.DecodeStringAsBytes() + yys3 := string(yys3Slc) + r.ReadMapElemValue() + switch yys3 { + case "PrevValue": + if r.TryDecodeAsNil() { + x.PrevValue = "" + } else { + yyv4 := &x.PrevValue + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "PrevIndex": + if r.TryDecodeAsNil() { + x.PrevIndex = 0 + } else { + yyv6 := &x.PrevIndex + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*uint64)(yyv6)) = uint64(r.DecodeUint(64)) + } + } + case "PrevExist": + if r.TryDecodeAsNil() { + x.PrevExist = "" + } else { + yyv8 := &x.PrevExist + yyv8.CodecDecodeSelf(d) + } + case "TTL": + if r.TryDecodeAsNil() { + x.TTL = 0 + } else { + yyv9 := &x.TTL + yym10 := z.DecBinary() + _ = yym10 + if false { + } else if z.HasExtensions() && z.DecExt(yyv9) { + } else { + *((*int64)(yyv9)) = int64(r.DecodeInt(64)) + } + } + case "Refresh": + if r.TryDecodeAsNil() { + x.Refresh = false + } else { + yyv11 := &x.Refresh + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*bool)(yyv11)) = r.DecodeBool() + } + } + case "Dir": + if r.TryDecodeAsNil() { + x.Dir = false + } else { + yyv13 := &x.Dir + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*bool)(yyv13)) = r.DecodeBool() + } + } + case "NoValueOnSuccess": + if r.TryDecodeAsNil() { + x.NoValueOnSuccess = false + } else { + yyv15 := &x.NoValueOnSuccess + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*bool)(yyv15)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + r.ReadMapEnd() +} + +func (x *SetOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj17 int + var yyb17 bool + var yyhl17 bool = l >= 0 + yyj17++ + if yyhl17 { + yyb17 = yyj17 > l + } else { + yyb17 = r.CheckBreak() + } + if yyb17 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.PrevValue = "" + } else { + yyv18 := &x.PrevValue + yym19 := z.DecBinary() + _ = yym19 + if false { + } else { + *((*string)(yyv18)) = r.DecodeString() + } + } + yyj17++ + if yyhl17 { + yyb17 = yyj17 > l + } else { + yyb17 = r.CheckBreak() + } + if yyb17 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.PrevIndex = 0 + } else { + yyv20 := &x.PrevIndex + yym21 := z.DecBinary() + _ = yym21 + if false { + } else { + *((*uint64)(yyv20)) = uint64(r.DecodeUint(64)) + } + } + yyj17++ + if yyhl17 { + yyb17 = yyj17 > l + } else { + yyb17 = r.CheckBreak() + } + if yyb17 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.PrevExist = "" + } else { + yyv22 := &x.PrevExist + yyv22.CodecDecodeSelf(d) + } + yyj17++ + if yyhl17 { + yyb17 = yyj17 > l + } else { + yyb17 = r.CheckBreak() + } + if yyb17 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.TTL = 0 + } else { + yyv23 := &x.TTL + yym24 := z.DecBinary() + _ = yym24 + if false { + } else if z.HasExtensions() && z.DecExt(yyv23) { + } else { + *((*int64)(yyv23)) = int64(r.DecodeInt(64)) + } + } + yyj17++ + if yyhl17 { + yyb17 = yyj17 > l + } else { + yyb17 = r.CheckBreak() + } + if yyb17 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Refresh = false + } else { + yyv25 := &x.Refresh + yym26 := z.DecBinary() + _ = yym26 + if false { + } else { + *((*bool)(yyv25)) = r.DecodeBool() + } + } + yyj17++ + if yyhl17 { + yyb17 = yyj17 > l + } else { + yyb17 = r.CheckBreak() + } + if yyb17 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Dir = false + } else { + yyv27 := &x.Dir + yym28 := z.DecBinary() + _ = yym28 + if false { + } else { + *((*bool)(yyv27)) = r.DecodeBool() + } + } + yyj17++ + if yyhl17 { + yyb17 = yyj17 > l + } else { + yyb17 = r.CheckBreak() + } + if yyb17 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.NoValueOnSuccess = false + } else { + yyv29 := &x.NoValueOnSuccess + yym30 := z.DecBinary() + _ = yym30 + if false { + } else { + *((*bool)(yyv29)) = r.DecodeBool() + } + } + for { + yyj17++ + if yyhl17 { + yyb17 = yyj17 > l + } else { + yyb17 = r.CheckBreak() + } + if yyb17 { + break + } + r.ReadArrayElem() + z.DecStructFieldNotFound(yyj17-1, "") + } + r.ReadArrayEnd() +} + +func (x *GetOptions) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + _, _ = yysep2, yy2arr2 + const yyr2 bool = false + if yyr2 || yy2arr2 { + r.WriteArrayStart(3) + } else { + r.WriteMapStart(3) + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeBool(bool(x.Recursive)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("Recursive")) + r.WriteMapElemValue() + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeBool(bool(x.Recursive)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeBool(bool(x.Sort)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("Sort")) + r.WriteMapElemValue() + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeBool(bool(x.Sort)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeBool(bool(x.Quorum)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("Quorum")) + r.WriteMapElemValue() + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeBool(bool(x.Quorum)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayEnd() + } else { + r.WriteMapEnd() + } + } + } +} + +func (x *GetOptions) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap7612 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + r.ReadMapEnd() + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray7612 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + r.ReadArrayEnd() + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612) + } + } +} + +func (x *GetOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + r.ReadMapElemKey() + yys3Slc = r.DecodeStringAsBytes() + yys3 := string(yys3Slc) + r.ReadMapElemValue() + switch yys3 { + case "Recursive": + if r.TryDecodeAsNil() { + x.Recursive = false + } else { + yyv4 := &x.Recursive + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*bool)(yyv4)) = r.DecodeBool() + } + } + case "Sort": + if r.TryDecodeAsNil() { + x.Sort = false + } else { + yyv6 := &x.Sort + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*bool)(yyv6)) = r.DecodeBool() + } + } + case "Quorum": + if r.TryDecodeAsNil() { + x.Quorum = false + } else { + yyv8 := &x.Quorum + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*bool)(yyv8)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + r.ReadMapEnd() +} + +func (x *GetOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Recursive = false + } else { + yyv11 := &x.Recursive + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*bool)(yyv11)) = r.DecodeBool() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Sort = false + } else { + yyv13 := &x.Sort + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*bool)(yyv13)) = r.DecodeBool() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Quorum = false + } else { + yyv15 := &x.Quorum + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*bool)(yyv15)) = r.DecodeBool() + } + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + r.ReadArrayElem() + z.DecStructFieldNotFound(yyj10-1, "") + } + r.ReadArrayEnd() +} + +func (x *DeleteOptions) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + _, _ = yysep2, yy2arr2 + const yyr2 bool = false + if yyr2 || yy2arr2 { + r.WriteArrayStart(4) + } else { + r.WriteMapStart(4) + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.PrevValue)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("PrevValue")) + r.WriteMapElemValue() + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.PrevValue)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeUint(uint64(x.PrevIndex)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("PrevIndex")) + r.WriteMapElemValue() + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeUint(uint64(x.PrevIndex)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeBool(bool(x.Recursive)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("Recursive")) + r.WriteMapElemValue() + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeBool(bool(x.Recursive)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeBool(bool(x.Dir)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("Dir")) + r.WriteMapElemValue() + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeBool(bool(x.Dir)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayEnd() + } else { + r.WriteMapEnd() + } + } + } +} + +func (x *DeleteOptions) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap7612 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + r.ReadMapEnd() + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray7612 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + r.ReadArrayEnd() + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612) + } + } +} + +func (x *DeleteOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + r.ReadMapElemKey() + yys3Slc = r.DecodeStringAsBytes() + yys3 := string(yys3Slc) + r.ReadMapElemValue() + switch yys3 { + case "PrevValue": + if r.TryDecodeAsNil() { + x.PrevValue = "" + } else { + yyv4 := &x.PrevValue + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "PrevIndex": + if r.TryDecodeAsNil() { + x.PrevIndex = 0 + } else { + yyv6 := &x.PrevIndex + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*uint64)(yyv6)) = uint64(r.DecodeUint(64)) + } + } + case "Recursive": + if r.TryDecodeAsNil() { + x.Recursive = false + } else { + yyv8 := &x.Recursive + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*bool)(yyv8)) = r.DecodeBool() + } + } + case "Dir": + if r.TryDecodeAsNil() { + x.Dir = false + } else { + yyv10 := &x.Dir + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*bool)(yyv10)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + r.ReadMapEnd() +} + +func (x *DeleteOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.PrevValue = "" + } else { + yyv13 := &x.PrevValue + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.PrevIndex = 0 + } else { + yyv15 := &x.PrevIndex + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*uint64)(yyv15)) = uint64(r.DecodeUint(64)) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Recursive = false + } else { + yyv17 := &x.Recursive + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*bool)(yyv17)) = r.DecodeBool() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Dir = false + } else { + yyv19 := &x.Dir + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*bool)(yyv19)) = r.DecodeBool() + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + r.ReadArrayElem() + z.DecStructFieldNotFound(yyj12-1, "") + } + r.ReadArrayEnd() +} + +func (x *Response) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + _, _ = yysep2, yy2arr2 + const yyr2 bool = false + if yyr2 || yy2arr2 { + r.WriteArrayStart(3) + } else { + r.WriteMapStart(3) + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Action)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("action")) + r.WriteMapElemValue() + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Action)) + } + } + var yyn6 bool + if x.Node == nil { + yyn6 = true + goto LABEL6 + } + LABEL6: + if yyr2 || yy2arr2 { + if yyn6 { + r.WriteArrayElem() + r.EncodeNil() + } else { + r.WriteArrayElem() + if x.Node == nil { + r.EncodeNil() + } else { + x.Node.CodecEncodeSelf(e) + } + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("node")) + r.WriteMapElemValue() + if yyn6 { + r.EncodeNil() + } else { + if x.Node == nil { + r.EncodeNil() + } else { + x.Node.CodecEncodeSelf(e) + } + } + } + var yyn9 bool + if x.PrevNode == nil { + yyn9 = true + goto LABEL9 + } + LABEL9: + if yyr2 || yy2arr2 { + if yyn9 { + r.WriteArrayElem() + r.EncodeNil() + } else { + r.WriteArrayElem() + if x.PrevNode == nil { + r.EncodeNil() + } else { + x.PrevNode.CodecEncodeSelf(e) + } + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("prevNode")) + r.WriteMapElemValue() + if yyn9 { + r.EncodeNil() + } else { + if x.PrevNode == nil { + r.EncodeNil() + } else { + x.PrevNode.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + r.WriteArrayEnd() + } else { + r.WriteMapEnd() + } + } + } +} + +func (x *Response) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap7612 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + r.ReadMapEnd() + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray7612 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + r.ReadArrayEnd() + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612) + } + } +} + +func (x *Response) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + r.ReadMapElemKey() + yys3Slc = r.DecodeStringAsBytes() + yys3 := string(yys3Slc) + r.ReadMapElemValue() + switch yys3 { + case "action": + if r.TryDecodeAsNil() { + x.Action = "" + } else { + yyv4 := &x.Action + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "node": + if x.Node == nil { + x.Node = new(Node) + } + if r.TryDecodeAsNil() { + if x.Node != nil { + x.Node = nil + } + } else { + if x.Node == nil { + x.Node = new(Node) + } + x.Node.CodecDecodeSelf(d) + } + case "prevNode": + if x.PrevNode == nil { + x.PrevNode = new(Node) + } + if r.TryDecodeAsNil() { + if x.PrevNode != nil { + x.PrevNode = nil + } + } else { + if x.PrevNode == nil { + x.PrevNode = new(Node) + } + x.PrevNode.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + r.ReadMapEnd() +} + +func (x *Response) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Action = "" + } else { + yyv9 := &x.Action + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*string)(yyv9)) = r.DecodeString() + } + } + if x.Node == nil { + x.Node = new(Node) + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + if x.Node != nil { + x.Node = nil + } + } else { + if x.Node == nil { + x.Node = new(Node) + } + x.Node.CodecDecodeSelf(d) + } + if x.PrevNode == nil { + x.PrevNode = new(Node) + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + if x.PrevNode != nil { + x.PrevNode = nil + } + } else { + if x.PrevNode == nil { + x.PrevNode = new(Node) + } + x.PrevNode.CodecDecodeSelf(d) + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + r.ReadArrayElem() + z.DecStructFieldNotFound(yyj8-1, "") + } + r.ReadArrayEnd() +} + +func (x *Node) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [8]bool + _ = yyq2 + _, _ = yysep2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.Dir != false + yyq2[6] = x.Expiration != nil + yyq2[7] = x.TTL != 0 + if yyr2 || yy2arr2 { + r.WriteArrayStart(8) + } else { + var yynn2 = 5 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.WriteMapStart(yynn2) + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Key)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("key")) + r.WriteMapElemValue() + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Key)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeBool(bool(x.Dir)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[1] { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("dir")) + r.WriteMapElemValue() + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeBool(bool(x.Dir)) + } + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Value)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("value")) + r.WriteMapElemValue() + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Value)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + if x.Nodes == nil { + r.EncodeNil() + } else { + x.Nodes.CodecEncodeSelf(e) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("nodes")) + r.WriteMapElemValue() + if x.Nodes == nil { + r.EncodeNil() + } else { + x.Nodes.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeUint(uint64(x.CreatedIndex)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("createdIndex")) + r.WriteMapElemValue() + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeUint(uint64(x.CreatedIndex)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeUint(uint64(x.ModifiedIndex)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("modifiedIndex")) + r.WriteMapElemValue() + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeUint(uint64(x.ModifiedIndex)) + } + } + var yyn21 bool + if x.Expiration == nil { + yyn21 = true + goto LABEL21 + } + LABEL21: + if yyr2 || yy2arr2 { + if yyn21 { + r.WriteArrayElem() + r.EncodeNil() + } else { + r.WriteArrayElem() + if yyq2[6] { + if x.Expiration == nil { + r.EncodeNil() + } else { + yym22 := z.EncBinary() + _ = yym22 + if false { + } else if yym23 := z.TimeRtidIfBinc(); yym23 != 0 { + r.EncodeBuiltin(yym23, x.Expiration) + } else if z.HasExtensions() && z.EncExt(x.Expiration) { + } else if yym22 { + z.EncBinaryMarshal(x.Expiration) + } else if !yym22 && z.IsJSONHandle() { + z.EncJSONMarshal(x.Expiration) + } else { + z.EncFallback(x.Expiration) + } + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[6] { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("expiration")) + r.WriteMapElemValue() + if yyn21 { + r.EncodeNil() + } else { + if x.Expiration == nil { + r.EncodeNil() + } else { + yym24 := z.EncBinary() + _ = yym24 + if false { + } else if yym25 := z.TimeRtidIfBinc(); yym25 != 0 { + r.EncodeBuiltin(yym25, x.Expiration) + } else if z.HasExtensions() && z.EncExt(x.Expiration) { + } else if yym24 { + z.EncBinaryMarshal(x.Expiration) + } else if !yym24 && z.IsJSONHandle() { + z.EncJSONMarshal(x.Expiration) + } else { + z.EncFallback(x.Expiration) + } + } + } + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + if yyq2[7] { + yym27 := z.EncBinary() + _ = yym27 + if false { + } else { + r.EncodeInt(int64(x.TTL)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[7] { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("ttl")) + r.WriteMapElemValue() + yym28 := z.EncBinary() + _ = yym28 + if false { + } else { + r.EncodeInt(int64(x.TTL)) + } + } + } + if yyr2 || yy2arr2 { + r.WriteArrayEnd() + } else { + r.WriteMapEnd() + } + } + } +} + +func (x *Node) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap7612 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + r.ReadMapEnd() + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray7612 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + r.ReadArrayEnd() + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612) + } + } +} + +func (x *Node) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + r.ReadMapElemKey() + yys3Slc = r.DecodeStringAsBytes() + yys3 := string(yys3Slc) + r.ReadMapElemValue() + switch yys3 { + case "key": + if r.TryDecodeAsNil() { + x.Key = "" + } else { + yyv4 := &x.Key + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "dir": + if r.TryDecodeAsNil() { + x.Dir = false + } else { + yyv6 := &x.Dir + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*bool)(yyv6)) = r.DecodeBool() + } + } + case "value": + if r.TryDecodeAsNil() { + x.Value = "" + } else { + yyv8 := &x.Value + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + case "nodes": + if r.TryDecodeAsNil() { + x.Nodes = nil + } else { + yyv10 := &x.Nodes + yyv10.CodecDecodeSelf(d) + } + case "createdIndex": + if r.TryDecodeAsNil() { + x.CreatedIndex = 0 + } else { + yyv11 := &x.CreatedIndex + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*uint64)(yyv11)) = uint64(r.DecodeUint(64)) + } + } + case "modifiedIndex": + if r.TryDecodeAsNil() { + x.ModifiedIndex = 0 + } else { + yyv13 := &x.ModifiedIndex + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*uint64)(yyv13)) = uint64(r.DecodeUint(64)) + } + } + case "expiration": + if x.Expiration == nil { + x.Expiration = new(time.Time) + } + if r.TryDecodeAsNil() { + if x.Expiration != nil { + x.Expiration = nil + } + } else { + if x.Expiration == nil { + x.Expiration = new(time.Time) + } + yym16 := z.DecBinary() + _ = yym16 + if false { + } else if yym17 := z.TimeRtidIfBinc(); yym17 != 0 { + r.DecodeBuiltin(yym17, x.Expiration) + } else if z.HasExtensions() && z.DecExt(x.Expiration) { + } else if yym16 { + z.DecBinaryUnmarshal(x.Expiration) + } else if !yym16 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.Expiration) + } else { + z.DecFallback(x.Expiration, false) + } + } + case "ttl": + if r.TryDecodeAsNil() { + x.TTL = 0 + } else { + yyv18 := &x.TTL + yym19 := z.DecBinary() + _ = yym19 + if false { + } else { + *((*int64)(yyv18)) = int64(r.DecodeInt(64)) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + r.ReadMapEnd() +} + +func (x *Node) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj20 int + var yyb20 bool + var yyhl20 bool = l >= 0 + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l + } else { + yyb20 = r.CheckBreak() + } + if yyb20 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Key = "" + } else { + yyv21 := &x.Key + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*string)(yyv21)) = r.DecodeString() + } + } + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l + } else { + yyb20 = r.CheckBreak() + } + if yyb20 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Dir = false + } else { + yyv23 := &x.Dir + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *((*bool)(yyv23)) = r.DecodeBool() + } + } + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l + } else { + yyb20 = r.CheckBreak() + } + if yyb20 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Value = "" + } else { + yyv25 := &x.Value + yym26 := z.DecBinary() + _ = yym26 + if false { + } else { + *((*string)(yyv25)) = r.DecodeString() + } + } + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l + } else { + yyb20 = r.CheckBreak() + } + if yyb20 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Nodes = nil + } else { + yyv27 := &x.Nodes + yyv27.CodecDecodeSelf(d) + } + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l + } else { + yyb20 = r.CheckBreak() + } + if yyb20 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.CreatedIndex = 0 + } else { + yyv28 := &x.CreatedIndex + yym29 := z.DecBinary() + _ = yym29 + if false { + } else { + *((*uint64)(yyv28)) = uint64(r.DecodeUint(64)) + } + } + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l + } else { + yyb20 = r.CheckBreak() + } + if yyb20 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.ModifiedIndex = 0 + } else { + yyv30 := &x.ModifiedIndex + yym31 := z.DecBinary() + _ = yym31 + if false { + } else { + *((*uint64)(yyv30)) = uint64(r.DecodeUint(64)) + } + } + if x.Expiration == nil { + x.Expiration = new(time.Time) + } + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l + } else { + yyb20 = r.CheckBreak() + } + if yyb20 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + if x.Expiration != nil { + x.Expiration = nil + } + } else { + if x.Expiration == nil { + x.Expiration = new(time.Time) + } + yym33 := z.DecBinary() + _ = yym33 + if false { + } else if yym34 := z.TimeRtidIfBinc(); yym34 != 0 { + r.DecodeBuiltin(yym34, x.Expiration) + } else if z.HasExtensions() && z.DecExt(x.Expiration) { + } else if yym33 { + z.DecBinaryUnmarshal(x.Expiration) + } else if !yym33 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.Expiration) + } else { + z.DecFallback(x.Expiration, false) + } + } + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l + } else { + yyb20 = r.CheckBreak() + } + if yyb20 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.TTL = 0 + } else { + yyv35 := &x.TTL + yym36 := z.DecBinary() + _ = yym36 + if false { + } else { + *((*int64)(yyv35)) = int64(r.DecodeInt(64)) + } + } + for { + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l + } else { + yyb20 = r.CheckBreak() + } + if yyb20 { + break + } + r.ReadArrayElem() + z.DecStructFieldNotFound(yyj20-1, "") + } + r.ReadArrayEnd() +} + +func (x Nodes) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + h.encNodes((Nodes)(x), e) + } + } +} + +func (x *Nodes) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + h.decNodes((*Nodes)(x), d) + } +} + +func (x *httpKeysAPI) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + _, _ = yysep2, yy2arr2 + const yyr2 bool = false + if yyr2 || yy2arr2 { + r.WriteArrayStart(0) + } else { + r.WriteMapStart(0) + } + if yyr2 || yy2arr2 { + r.WriteArrayEnd() + } else { + r.WriteMapEnd() + } + } + } +} + +func (x *httpKeysAPI) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap7612 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + r.ReadMapEnd() + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray7612 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + r.ReadArrayEnd() + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612) + } + } +} + +func (x *httpKeysAPI) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + r.ReadMapElemKey() + yys3Slc = r.DecodeStringAsBytes() + yys3 := string(yys3Slc) + r.ReadMapElemValue() + switch yys3 { + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + r.ReadMapEnd() +} + +func (x *httpKeysAPI) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj4 int + var yyb4 bool + var yyhl4 bool = l >= 0 + for { + yyj4++ + if yyhl4 { + yyb4 = yyj4 > l + } else { + yyb4 = r.CheckBreak() + } + if yyb4 { + break + } + r.ReadArrayElem() + z.DecStructFieldNotFound(yyj4-1, "") + } + r.ReadArrayEnd() +} + +func (x *httpWatcher) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + _, _ = yysep2, yy2arr2 + const yyr2 bool = false + if yyr2 || yy2arr2 { + r.WriteArrayStart(0) + } else { + r.WriteMapStart(0) + } + if yyr2 || yy2arr2 { + r.WriteArrayEnd() + } else { + r.WriteMapEnd() + } + } + } +} + +func (x *httpWatcher) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap7612 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + r.ReadMapEnd() + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray7612 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + r.ReadArrayEnd() + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612) + } + } +} + +func (x *httpWatcher) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + r.ReadMapElemKey() + yys3Slc = r.DecodeStringAsBytes() + yys3 := string(yys3Slc) + r.ReadMapElemValue() + switch yys3 { + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + r.ReadMapEnd() +} + +func (x *httpWatcher) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj4 int + var yyb4 bool + var yyhl4 bool = l >= 0 + for { + yyj4++ + if yyhl4 { + yyb4 = yyj4 > l + } else { + yyb4 = r.CheckBreak() + } + if yyb4 { + break + } + r.ReadArrayElem() + z.DecStructFieldNotFound(yyj4-1, "") + } + r.ReadArrayEnd() +} + +func (x *getAction) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + _, _ = yysep2, yy2arr2 + const yyr2 bool = false + if yyr2 || yy2arr2 { + r.WriteArrayStart(5) + } else { + r.WriteMapStart(5) + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Prefix)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("Prefix")) + r.WriteMapElemValue() + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Prefix)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Key)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("Key")) + r.WriteMapElemValue() + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Key)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeBool(bool(x.Recursive)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("Recursive")) + r.WriteMapElemValue() + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeBool(bool(x.Recursive)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeBool(bool(x.Sorted)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("Sorted")) + r.WriteMapElemValue() + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeBool(bool(x.Sorted)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeBool(bool(x.Quorum)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("Quorum")) + r.WriteMapElemValue() + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeBool(bool(x.Quorum)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayEnd() + } else { + r.WriteMapEnd() + } + } + } +} + +func (x *getAction) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap7612 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + r.ReadMapEnd() + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray7612 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + r.ReadArrayEnd() + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612) + } + } +} + +func (x *getAction) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + r.ReadMapElemKey() + yys3Slc = r.DecodeStringAsBytes() + yys3 := string(yys3Slc) + r.ReadMapElemValue() + switch yys3 { + case "Prefix": + if r.TryDecodeAsNil() { + x.Prefix = "" + } else { + yyv4 := &x.Prefix + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "Key": + if r.TryDecodeAsNil() { + x.Key = "" + } else { + yyv6 := &x.Key + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "Recursive": + if r.TryDecodeAsNil() { + x.Recursive = false + } else { + yyv8 := &x.Recursive + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*bool)(yyv8)) = r.DecodeBool() + } + } + case "Sorted": + if r.TryDecodeAsNil() { + x.Sorted = false + } else { + yyv10 := &x.Sorted + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*bool)(yyv10)) = r.DecodeBool() + } + } + case "Quorum": + if r.TryDecodeAsNil() { + x.Quorum = false + } else { + yyv12 := &x.Quorum + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*bool)(yyv12)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + r.ReadMapEnd() +} + +func (x *getAction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj14 int + var yyb14 bool + var yyhl14 bool = l >= 0 + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Prefix = "" + } else { + yyv15 := &x.Prefix + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Key = "" + } else { + yyv17 := &x.Key + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*string)(yyv17)) = r.DecodeString() + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Recursive = false + } else { + yyv19 := &x.Recursive + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*bool)(yyv19)) = r.DecodeBool() + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Sorted = false + } else { + yyv21 := &x.Sorted + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*bool)(yyv21)) = r.DecodeBool() + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Quorum = false + } else { + yyv23 := &x.Quorum + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *((*bool)(yyv23)) = r.DecodeBool() + } + } + for { + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + break + } + r.ReadArrayElem() + z.DecStructFieldNotFound(yyj14-1, "") + } + r.ReadArrayEnd() +} + +func (x *waitAction) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + _, _ = yysep2, yy2arr2 + const yyr2 bool = false + if yyr2 || yy2arr2 { + r.WriteArrayStart(4) + } else { + r.WriteMapStart(4) + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Prefix)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("Prefix")) + r.WriteMapElemValue() + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Prefix)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Key)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("Key")) + r.WriteMapElemValue() + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Key)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeUint(uint64(x.WaitIndex)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("WaitIndex")) + r.WriteMapElemValue() + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeUint(uint64(x.WaitIndex)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeBool(bool(x.Recursive)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("Recursive")) + r.WriteMapElemValue() + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeBool(bool(x.Recursive)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayEnd() + } else { + r.WriteMapEnd() + } + } + } +} + +func (x *waitAction) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap7612 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + r.ReadMapEnd() + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray7612 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + r.ReadArrayEnd() + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612) + } + } +} + +func (x *waitAction) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + r.ReadMapElemKey() + yys3Slc = r.DecodeStringAsBytes() + yys3 := string(yys3Slc) + r.ReadMapElemValue() + switch yys3 { + case "Prefix": + if r.TryDecodeAsNil() { + x.Prefix = "" + } else { + yyv4 := &x.Prefix + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "Key": + if r.TryDecodeAsNil() { + x.Key = "" + } else { + yyv6 := &x.Key + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "WaitIndex": + if r.TryDecodeAsNil() { + x.WaitIndex = 0 + } else { + yyv8 := &x.WaitIndex + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*uint64)(yyv8)) = uint64(r.DecodeUint(64)) + } + } + case "Recursive": + if r.TryDecodeAsNil() { + x.Recursive = false + } else { + yyv10 := &x.Recursive + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*bool)(yyv10)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + r.ReadMapEnd() +} + +func (x *waitAction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Prefix = "" + } else { + yyv13 := &x.Prefix + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Key = "" + } else { + yyv15 := &x.Key + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.WaitIndex = 0 + } else { + yyv17 := &x.WaitIndex + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*uint64)(yyv17)) = uint64(r.DecodeUint(64)) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Recursive = false + } else { + yyv19 := &x.Recursive + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*bool)(yyv19)) = r.DecodeBool() + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + r.ReadArrayElem() + z.DecStructFieldNotFound(yyj12-1, "") + } + r.ReadArrayEnd() +} + +func (x *setAction) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + _, _ = yysep2, yy2arr2 + const yyr2 bool = false + if yyr2 || yy2arr2 { + r.WriteArrayStart(10) + } else { + r.WriteMapStart(10) + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Prefix)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("Prefix")) + r.WriteMapElemValue() + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Prefix)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Key)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("Key")) + r.WriteMapElemValue() + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Key)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Value)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("Value")) + r.WriteMapElemValue() + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Value)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.PrevValue)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("PrevValue")) + r.WriteMapElemValue() + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.PrevValue)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeUint(uint64(x.PrevIndex)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("PrevIndex")) + r.WriteMapElemValue() + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeUint(uint64(x.PrevIndex)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + x.PrevExist.CodecEncodeSelf(e) + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("PrevExist")) + r.WriteMapElemValue() + x.PrevExist.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym22 := z.EncBinary() + _ = yym22 + if false { + } else if z.HasExtensions() && z.EncExt(x.TTL) { + } else { + r.EncodeInt(int64(x.TTL)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("TTL")) + r.WriteMapElemValue() + yym23 := z.EncBinary() + _ = yym23 + if false { + } else if z.HasExtensions() && z.EncExt(x.TTL) { + } else { + r.EncodeInt(int64(x.TTL)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym25 := z.EncBinary() + _ = yym25 + if false { + } else { + r.EncodeBool(bool(x.Refresh)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("Refresh")) + r.WriteMapElemValue() + yym26 := z.EncBinary() + _ = yym26 + if false { + } else { + r.EncodeBool(bool(x.Refresh)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym28 := z.EncBinary() + _ = yym28 + if false { + } else { + r.EncodeBool(bool(x.Dir)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("Dir")) + r.WriteMapElemValue() + yym29 := z.EncBinary() + _ = yym29 + if false { + } else { + r.EncodeBool(bool(x.Dir)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym31 := z.EncBinary() + _ = yym31 + if false { + } else { + r.EncodeBool(bool(x.NoValueOnSuccess)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("NoValueOnSuccess")) + r.WriteMapElemValue() + yym32 := z.EncBinary() + _ = yym32 + if false { + } else { + r.EncodeBool(bool(x.NoValueOnSuccess)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayEnd() + } else { + r.WriteMapEnd() + } + } + } +} + +func (x *setAction) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap7612 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + r.ReadMapEnd() + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray7612 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + r.ReadArrayEnd() + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612) + } + } +} + +func (x *setAction) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + r.ReadMapElemKey() + yys3Slc = r.DecodeStringAsBytes() + yys3 := string(yys3Slc) + r.ReadMapElemValue() + switch yys3 { + case "Prefix": + if r.TryDecodeAsNil() { + x.Prefix = "" + } else { + yyv4 := &x.Prefix + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "Key": + if r.TryDecodeAsNil() { + x.Key = "" + } else { + yyv6 := &x.Key + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "Value": + if r.TryDecodeAsNil() { + x.Value = "" + } else { + yyv8 := &x.Value + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + case "PrevValue": + if r.TryDecodeAsNil() { + x.PrevValue = "" + } else { + yyv10 := &x.PrevValue + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } + } + case "PrevIndex": + if r.TryDecodeAsNil() { + x.PrevIndex = 0 + } else { + yyv12 := &x.PrevIndex + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*uint64)(yyv12)) = uint64(r.DecodeUint(64)) + } + } + case "PrevExist": + if r.TryDecodeAsNil() { + x.PrevExist = "" + } else { + yyv14 := &x.PrevExist + yyv14.CodecDecodeSelf(d) + } + case "TTL": + if r.TryDecodeAsNil() { + x.TTL = 0 + } else { + yyv15 := &x.TTL + yym16 := z.DecBinary() + _ = yym16 + if false { + } else if z.HasExtensions() && z.DecExt(yyv15) { + } else { + *((*int64)(yyv15)) = int64(r.DecodeInt(64)) + } + } + case "Refresh": + if r.TryDecodeAsNil() { + x.Refresh = false + } else { + yyv17 := &x.Refresh + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*bool)(yyv17)) = r.DecodeBool() + } + } + case "Dir": + if r.TryDecodeAsNil() { + x.Dir = false + } else { + yyv19 := &x.Dir + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*bool)(yyv19)) = r.DecodeBool() + } + } + case "NoValueOnSuccess": + if r.TryDecodeAsNil() { + x.NoValueOnSuccess = false + } else { + yyv21 := &x.NoValueOnSuccess + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*bool)(yyv21)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + r.ReadMapEnd() +} + +func (x *setAction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj23 int + var yyb23 bool + var yyhl23 bool = l >= 0 + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Prefix = "" + } else { + yyv24 := &x.Prefix + yym25 := z.DecBinary() + _ = yym25 + if false { + } else { + *((*string)(yyv24)) = r.DecodeString() + } + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Key = "" + } else { + yyv26 := &x.Key + yym27 := z.DecBinary() + _ = yym27 + if false { + } else { + *((*string)(yyv26)) = r.DecodeString() + } + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Value = "" + } else { + yyv28 := &x.Value + yym29 := z.DecBinary() + _ = yym29 + if false { + } else { + *((*string)(yyv28)) = r.DecodeString() + } + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.PrevValue = "" + } else { + yyv30 := &x.PrevValue + yym31 := z.DecBinary() + _ = yym31 + if false { + } else { + *((*string)(yyv30)) = r.DecodeString() + } + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.PrevIndex = 0 + } else { + yyv32 := &x.PrevIndex + yym33 := z.DecBinary() + _ = yym33 + if false { + } else { + *((*uint64)(yyv32)) = uint64(r.DecodeUint(64)) + } + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.PrevExist = "" + } else { + yyv34 := &x.PrevExist + yyv34.CodecDecodeSelf(d) + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.TTL = 0 + } else { + yyv35 := &x.TTL + yym36 := z.DecBinary() + _ = yym36 + if false { + } else if z.HasExtensions() && z.DecExt(yyv35) { + } else { + *((*int64)(yyv35)) = int64(r.DecodeInt(64)) + } + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Refresh = false + } else { + yyv37 := &x.Refresh + yym38 := z.DecBinary() + _ = yym38 + if false { + } else { + *((*bool)(yyv37)) = r.DecodeBool() + } + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Dir = false + } else { + yyv39 := &x.Dir + yym40 := z.DecBinary() + _ = yym40 + if false { + } else { + *((*bool)(yyv39)) = r.DecodeBool() + } + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.NoValueOnSuccess = false + } else { + yyv41 := &x.NoValueOnSuccess + yym42 := z.DecBinary() + _ = yym42 + if false { + } else { + *((*bool)(yyv41)) = r.DecodeBool() + } + } + for { + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + break + } + r.ReadArrayElem() + z.DecStructFieldNotFound(yyj23-1, "") + } + r.ReadArrayEnd() +} + +func (x *deleteAction) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + _, _ = yysep2, yy2arr2 + const yyr2 bool = false + if yyr2 || yy2arr2 { + r.WriteArrayStart(6) + } else { + r.WriteMapStart(6) + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Prefix)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("Prefix")) + r.WriteMapElemValue() + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Prefix)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Key)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("Key")) + r.WriteMapElemValue() + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Key)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.PrevValue)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("PrevValue")) + r.WriteMapElemValue() + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.PrevValue)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeUint(uint64(x.PrevIndex)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("PrevIndex")) + r.WriteMapElemValue() + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeUint(uint64(x.PrevIndex)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeBool(bool(x.Dir)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("Dir")) + r.WriteMapElemValue() + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeBool(bool(x.Dir)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeBool(bool(x.Recursive)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("Recursive")) + r.WriteMapElemValue() + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeBool(bool(x.Recursive)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayEnd() + } else { + r.WriteMapEnd() + } + } + } +} + +func (x *deleteAction) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap7612 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + r.ReadMapEnd() + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray7612 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + r.ReadArrayEnd() + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612) + } + } +} + +func (x *deleteAction) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + r.ReadMapElemKey() + yys3Slc = r.DecodeStringAsBytes() + yys3 := string(yys3Slc) + r.ReadMapElemValue() + switch yys3 { + case "Prefix": + if r.TryDecodeAsNil() { + x.Prefix = "" + } else { + yyv4 := &x.Prefix + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "Key": + if r.TryDecodeAsNil() { + x.Key = "" + } else { + yyv6 := &x.Key + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "PrevValue": + if r.TryDecodeAsNil() { + x.PrevValue = "" + } else { + yyv8 := &x.PrevValue + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + case "PrevIndex": + if r.TryDecodeAsNil() { + x.PrevIndex = 0 + } else { + yyv10 := &x.PrevIndex + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*uint64)(yyv10)) = uint64(r.DecodeUint(64)) + } + } + case "Dir": + if r.TryDecodeAsNil() { + x.Dir = false + } else { + yyv12 := &x.Dir + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*bool)(yyv12)) = r.DecodeBool() + } + } + case "Recursive": + if r.TryDecodeAsNil() { + x.Recursive = false + } else { + yyv14 := &x.Recursive + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*bool)(yyv14)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + r.ReadMapEnd() +} + +func (x *deleteAction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj16 int + var yyb16 bool + var yyhl16 bool = l >= 0 + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Prefix = "" + } else { + yyv17 := &x.Prefix + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*string)(yyv17)) = r.DecodeString() + } + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Key = "" + } else { + yyv19 := &x.Key + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*string)(yyv19)) = r.DecodeString() + } + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.PrevValue = "" + } else { + yyv21 := &x.PrevValue + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*string)(yyv21)) = r.DecodeString() + } + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.PrevIndex = 0 + } else { + yyv23 := &x.PrevIndex + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *((*uint64)(yyv23)) = uint64(r.DecodeUint(64)) + } + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Dir = false + } else { + yyv25 := &x.Dir + yym26 := z.DecBinary() + _ = yym26 + if false { + } else { + *((*bool)(yyv25)) = r.DecodeBool() + } + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Recursive = false + } else { + yyv27 := &x.Recursive + yym28 := z.DecBinary() + _ = yym28 + if false { + } else { + *((*bool)(yyv27)) = r.DecodeBool() + } + } + for { + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + break + } + r.ReadArrayElem() + z.DecStructFieldNotFound(yyj16-1, "") + } + r.ReadArrayEnd() +} + +func (x *createInOrderAction) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + _, _ = yysep2, yy2arr2 + const yyr2 bool = false + if yyr2 || yy2arr2 { + r.WriteArrayStart(4) + } else { + r.WriteMapStart(4) + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Prefix)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("Prefix")) + r.WriteMapElemValue() + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Prefix)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Dir)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("Dir")) + r.WriteMapElemValue() + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Dir)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Value)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("Value")) + r.WriteMapElemValue() + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Value)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(x.TTL) { + } else { + r.EncodeInt(int64(x.TTL)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("TTL")) + r.WriteMapElemValue() + yym14 := z.EncBinary() + _ = yym14 + if false { + } else if z.HasExtensions() && z.EncExt(x.TTL) { + } else { + r.EncodeInt(int64(x.TTL)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayEnd() + } else { + r.WriteMapEnd() + } + } + } +} + +func (x *createInOrderAction) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap7612 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + r.ReadMapEnd() + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray7612 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + r.ReadArrayEnd() + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612) + } + } +} + +func (x *createInOrderAction) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + r.ReadMapElemKey() + yys3Slc = r.DecodeStringAsBytes() + yys3 := string(yys3Slc) + r.ReadMapElemValue() + switch yys3 { + case "Prefix": + if r.TryDecodeAsNil() { + x.Prefix = "" + } else { + yyv4 := &x.Prefix + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "Dir": + if r.TryDecodeAsNil() { + x.Dir = "" + } else { + yyv6 := &x.Dir + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "Value": + if r.TryDecodeAsNil() { + x.Value = "" + } else { + yyv8 := &x.Value + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + case "TTL": + if r.TryDecodeAsNil() { + x.TTL = 0 + } else { + yyv10 := &x.TTL + yym11 := z.DecBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.DecExt(yyv10) { + } else { + *((*int64)(yyv10)) = int64(r.DecodeInt(64)) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + r.ReadMapEnd() +} + +func (x *createInOrderAction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Prefix = "" + } else { + yyv13 := &x.Prefix + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Dir = "" + } else { + yyv15 := &x.Dir + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Value = "" + } else { + yyv17 := &x.Value + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*string)(yyv17)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.TTL = 0 + } else { + yyv19 := &x.TTL + yym20 := z.DecBinary() + _ = yym20 + if false { + } else if z.HasExtensions() && z.DecExt(yyv19) { + } else { + *((*int64)(yyv19)) = int64(r.DecodeInt(64)) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + r.ReadArrayElem() + z.DecStructFieldNotFound(yyj12-1, "") + } + r.ReadArrayEnd() +} + +func (x codecSelfer7612) encNodes(v Nodes, e *codec1978.Encoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.WriteArrayStart(len(v)) + for _, yyv1 := range v { + r.WriteArrayElem() + if yyv1 == nil { + r.EncodeNil() + } else { + yyv1.CodecEncodeSelf(e) + } + } + r.WriteArrayEnd() +} + +func (x codecSelfer7612) decNodes(v *Nodes, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []*Node{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else { + yyhl1 := yyl1 > 0 + var yyrl1 int + _ = yyrl1 + if yyhl1 { + if yyl1 > cap(yyv1) { + yyrl1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 8) + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]*Node, yyrl1) + } + yyc1 = true + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + } + var yyj1 int + // var yydn1 bool + for ; (yyhl1 && yyj1 < yyl1) || !(yyhl1 || r.CheckBreak()); yyj1++ { + if yyj1 == 0 && len(yyv1) == 0 { + if yyhl1 { + yyrl1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 8) + } else { + yyrl1 = 8 + } + yyv1 = make([]*Node, yyrl1) + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + // yydn1 = r.TryDecodeAsNil() + + // if indefinite, etc, then expand the slice if necessary + var yydb1 bool + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, nil) + yyc1 = true + + } + if yydb1 { + z.DecSwallow() + } else { + if r.TryDecodeAsNil() { + if yyv1[yyj1] != nil { + *yyv1[yyj1] = Node{} + } + } else { + if yyv1[yyj1] == nil { + yyv1[yyj1] = new(Node) + } + yyw2 := yyv1[yyj1] + yyw2.CodecDecodeSelf(d) + } + + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = make([]*Node, 0) + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } + +} diff --git a/vendor/github.com/coreos/etcd/client/keys.go b/vendor/github.com/coreos/etcd/client/keys.go new file mode 100644 index 00000000..8b9fd3f8 --- /dev/null +++ b/vendor/github.com/coreos/etcd/client/keys.go @@ -0,0 +1,681 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +//go:generate codecgen -d 1819 -r "Node|Response|Nodes" -o keys.generated.go keys.go + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "github.com/coreos/etcd/pkg/pathutil" + "github.com/ugorji/go/codec" +) + +const ( + ErrorCodeKeyNotFound = 100 + ErrorCodeTestFailed = 101 + ErrorCodeNotFile = 102 + ErrorCodeNotDir = 104 + ErrorCodeNodeExist = 105 + ErrorCodeRootROnly = 107 + ErrorCodeDirNotEmpty = 108 + ErrorCodeUnauthorized = 110 + + ErrorCodePrevValueRequired = 201 + ErrorCodeTTLNaN = 202 + ErrorCodeIndexNaN = 203 + ErrorCodeInvalidField = 209 + ErrorCodeInvalidForm = 210 + + ErrorCodeRaftInternal = 300 + ErrorCodeLeaderElect = 301 + + ErrorCodeWatcherCleared = 400 + ErrorCodeEventIndexCleared = 401 +) + +type Error struct { + Code int `json:"errorCode"` + Message string `json:"message"` + Cause string `json:"cause"` + Index uint64 `json:"index"` +} + +func (e Error) Error() string { + return fmt.Sprintf("%v: %v (%v) [%v]", e.Code, e.Message, e.Cause, e.Index) +} + +var ( + ErrInvalidJSON = errors.New("client: response is invalid json. The endpoint is probably not valid etcd cluster endpoint.") + ErrEmptyBody = errors.New("client: response body is empty") +) + +// PrevExistType is used to define an existence condition when setting +// or deleting Nodes. +type PrevExistType string + +const ( + PrevIgnore = PrevExistType("") + PrevExist = PrevExistType("true") + PrevNoExist = PrevExistType("false") +) + +var ( + defaultV2KeysPrefix = "/v2/keys" +) + +// NewKeysAPI builds a KeysAPI that interacts with etcd's key-value +// API over HTTP. +func NewKeysAPI(c Client) KeysAPI { + return NewKeysAPIWithPrefix(c, defaultV2KeysPrefix) +} + +// NewKeysAPIWithPrefix acts like NewKeysAPI, but allows the caller +// to provide a custom base URL path. This should only be used in +// very rare cases. +func NewKeysAPIWithPrefix(c Client, p string) KeysAPI { + return &httpKeysAPI{ + client: c, + prefix: p, + } +} + +type KeysAPI interface { + // Get retrieves a set of Nodes from etcd + Get(ctx context.Context, key string, opts *GetOptions) (*Response, error) + + // Set assigns a new value to a Node identified by a given key. The caller + // may define a set of conditions in the SetOptions. If SetOptions.Dir=true + // then value is ignored. + Set(ctx context.Context, key, value string, opts *SetOptions) (*Response, error) + + // Delete removes a Node identified by the given key, optionally destroying + // all of its children as well. The caller may define a set of required + // conditions in an DeleteOptions object. + Delete(ctx context.Context, key string, opts *DeleteOptions) (*Response, error) + + // Create is an alias for Set w/ PrevExist=false + Create(ctx context.Context, key, value string) (*Response, error) + + // CreateInOrder is used to atomically create in-order keys within the given directory. + CreateInOrder(ctx context.Context, dir, value string, opts *CreateInOrderOptions) (*Response, error) + + // Update is an alias for Set w/ PrevExist=true + Update(ctx context.Context, key, value string) (*Response, error) + + // Watcher builds a new Watcher targeted at a specific Node identified + // by the given key. The Watcher may be configured at creation time + // through a WatcherOptions object. The returned Watcher is designed + // to emit events that happen to a Node, and optionally to its children. + Watcher(key string, opts *WatcherOptions) Watcher +} + +type WatcherOptions struct { + // AfterIndex defines the index after-which the Watcher should + // start emitting events. For example, if a value of 5 is + // provided, the first event will have an index >= 6. + // + // Setting AfterIndex to 0 (default) means that the Watcher + // should start watching for events starting at the current + // index, whatever that may be. + AfterIndex uint64 + + // Recursive specifies whether or not the Watcher should emit + // events that occur in children of the given keyspace. If set + // to false (default), events will be limited to those that + // occur for the exact key. + Recursive bool +} + +type CreateInOrderOptions struct { + // TTL defines a period of time after-which the Node should + // expire and no longer exist. Values <= 0 are ignored. Given + // that the zero-value is ignored, TTL cannot be used to set + // a TTL of 0. + TTL time.Duration +} + +type SetOptions struct { + // PrevValue specifies what the current value of the Node must + // be in order for the Set operation to succeed. + // + // Leaving this field empty means that the caller wishes to + // ignore the current value of the Node. This cannot be used + // to compare the Node's current value to an empty string. + // + // PrevValue is ignored if Dir=true + PrevValue string + + // PrevIndex indicates what the current ModifiedIndex of the + // Node must be in order for the Set operation to succeed. + // + // If PrevIndex is set to 0 (default), no comparison is made. + PrevIndex uint64 + + // PrevExist specifies whether the Node must currently exist + // (PrevExist) or not (PrevNoExist). If the caller does not + // care about existence, set PrevExist to PrevIgnore, or simply + // leave it unset. + PrevExist PrevExistType + + // TTL defines a period of time after-which the Node should + // expire and no longer exist. Values <= 0 are ignored. Given + // that the zero-value is ignored, TTL cannot be used to set + // a TTL of 0. + TTL time.Duration + + // Refresh set to true means a TTL value can be updated + // without firing a watch or changing the node value. A + // value must not be provided when refreshing a key. + Refresh bool + + // Dir specifies whether or not this Node should be created as a directory. + Dir bool + + // NoValueOnSuccess specifies whether the response contains the current value of the Node. + // If set, the response will only contain the current value when the request fails. + NoValueOnSuccess bool +} + +type GetOptions struct { + // Recursive defines whether or not all children of the Node + // should be returned. + Recursive bool + + // Sort instructs the server whether or not to sort the Nodes. + // If true, the Nodes are sorted alphabetically by key in + // ascending order (A to z). If false (default), the Nodes will + // not be sorted and the ordering used should not be considered + // predictable. + Sort bool + + // Quorum specifies whether it gets the latest committed value that + // has been applied in quorum of members, which ensures external + // consistency (or linearizability). + Quorum bool +} + +type DeleteOptions struct { + // PrevValue specifies what the current value of the Node must + // be in order for the Delete operation to succeed. + // + // Leaving this field empty means that the caller wishes to + // ignore the current value of the Node. This cannot be used + // to compare the Node's current value to an empty string. + PrevValue string + + // PrevIndex indicates what the current ModifiedIndex of the + // Node must be in order for the Delete operation to succeed. + // + // If PrevIndex is set to 0 (default), no comparison is made. + PrevIndex uint64 + + // Recursive defines whether or not all children of the Node + // should be deleted. If set to true, all children of the Node + // identified by the given key will be deleted. If left unset + // or explicitly set to false, only a single Node will be + // deleted. + Recursive bool + + // Dir specifies whether or not this Node should be removed as a directory. + Dir bool +} + +type Watcher interface { + // Next blocks until an etcd event occurs, then returns a Response + // representing that event. The behavior of Next depends on the + // WatcherOptions used to construct the Watcher. Next is designed to + // be called repeatedly, each time blocking until a subsequent event + // is available. + // + // If the provided context is cancelled, Next will return a non-nil + // error. Any other failures encountered while waiting for the next + // event (connection issues, deserialization failures, etc) will + // also result in a non-nil error. + Next(context.Context) (*Response, error) +} + +type Response struct { + // Action is the name of the operation that occurred. Possible values + // include get, set, delete, update, create, compareAndSwap, + // compareAndDelete and expire. + Action string `json:"action"` + + // Node represents the state of the relevant etcd Node. + Node *Node `json:"node"` + + // PrevNode represents the previous state of the Node. PrevNode is non-nil + // only if the Node existed before the action occurred and the action + // caused a change to the Node. + PrevNode *Node `json:"prevNode"` + + // Index holds the cluster-level index at the time the Response was generated. + // This index is not tied to the Node(s) contained in this Response. + Index uint64 `json:"-"` + + // ClusterID holds the cluster-level ID reported by the server. This + // should be different for different etcd clusters. + ClusterID string `json:"-"` +} + +type Node struct { + // Key represents the unique location of this Node (e.g. "/foo/bar"). + Key string `json:"key"` + + // Dir reports whether node describes a directory. + Dir bool `json:"dir,omitempty"` + + // Value is the current data stored on this Node. If this Node + // is a directory, Value will be empty. + Value string `json:"value"` + + // Nodes holds the children of this Node, only if this Node is a directory. + // This slice of will be arbitrarily deep (children, grandchildren, great- + // grandchildren, etc.) if a recursive Get or Watch request were made. + Nodes Nodes `json:"nodes"` + + // CreatedIndex is the etcd index at-which this Node was created. + CreatedIndex uint64 `json:"createdIndex"` + + // ModifiedIndex is the etcd index at-which this Node was last modified. + ModifiedIndex uint64 `json:"modifiedIndex"` + + // Expiration is the server side expiration time of the key. + Expiration *time.Time `json:"expiration,omitempty"` + + // TTL is the time to live of the key in second. + TTL int64 `json:"ttl,omitempty"` +} + +func (n *Node) String() string { + return fmt.Sprintf("{Key: %s, CreatedIndex: %d, ModifiedIndex: %d, TTL: %d}", n.Key, n.CreatedIndex, n.ModifiedIndex, n.TTL) +} + +// TTLDuration returns the Node's TTL as a time.Duration object +func (n *Node) TTLDuration() time.Duration { + return time.Duration(n.TTL) * time.Second +} + +type Nodes []*Node + +// interfaces for sorting + +func (ns Nodes) Len() int { return len(ns) } +func (ns Nodes) Less(i, j int) bool { return ns[i].Key < ns[j].Key } +func (ns Nodes) Swap(i, j int) { ns[i], ns[j] = ns[j], ns[i] } + +type httpKeysAPI struct { + client httpClient + prefix string +} + +func (k *httpKeysAPI) Set(ctx context.Context, key, val string, opts *SetOptions) (*Response, error) { + act := &setAction{ + Prefix: k.prefix, + Key: key, + Value: val, + } + + if opts != nil { + act.PrevValue = opts.PrevValue + act.PrevIndex = opts.PrevIndex + act.PrevExist = opts.PrevExist + act.TTL = opts.TTL + act.Refresh = opts.Refresh + act.Dir = opts.Dir + act.NoValueOnSuccess = opts.NoValueOnSuccess + } + + doCtx := ctx + if act.PrevExist == PrevNoExist { + doCtx = context.WithValue(doCtx, &oneShotCtxValue, &oneShotCtxValue) + } + resp, body, err := k.client.Do(doCtx, act) + if err != nil { + return nil, err + } + + return unmarshalHTTPResponse(resp.StatusCode, resp.Header, body) +} + +func (k *httpKeysAPI) Create(ctx context.Context, key, val string) (*Response, error) { + return k.Set(ctx, key, val, &SetOptions{PrevExist: PrevNoExist}) +} + +func (k *httpKeysAPI) CreateInOrder(ctx context.Context, dir, val string, opts *CreateInOrderOptions) (*Response, error) { + act := &createInOrderAction{ + Prefix: k.prefix, + Dir: dir, + Value: val, + } + + if opts != nil { + act.TTL = opts.TTL + } + + resp, body, err := k.client.Do(ctx, act) + if err != nil { + return nil, err + } + + return unmarshalHTTPResponse(resp.StatusCode, resp.Header, body) +} + +func (k *httpKeysAPI) Update(ctx context.Context, key, val string) (*Response, error) { + return k.Set(ctx, key, val, &SetOptions{PrevExist: PrevExist}) +} + +func (k *httpKeysAPI) Delete(ctx context.Context, key string, opts *DeleteOptions) (*Response, error) { + act := &deleteAction{ + Prefix: k.prefix, + Key: key, + } + + if opts != nil { + act.PrevValue = opts.PrevValue + act.PrevIndex = opts.PrevIndex + act.Dir = opts.Dir + act.Recursive = opts.Recursive + } + + doCtx := context.WithValue(ctx, &oneShotCtxValue, &oneShotCtxValue) + resp, body, err := k.client.Do(doCtx, act) + if err != nil { + return nil, err + } + + return unmarshalHTTPResponse(resp.StatusCode, resp.Header, body) +} + +func (k *httpKeysAPI) Get(ctx context.Context, key string, opts *GetOptions) (*Response, error) { + act := &getAction{ + Prefix: k.prefix, + Key: key, + } + + if opts != nil { + act.Recursive = opts.Recursive + act.Sorted = opts.Sort + act.Quorum = opts.Quorum + } + + resp, body, err := k.client.Do(ctx, act) + if err != nil { + return nil, err + } + + return unmarshalHTTPResponse(resp.StatusCode, resp.Header, body) +} + +func (k *httpKeysAPI) Watcher(key string, opts *WatcherOptions) Watcher { + act := waitAction{ + Prefix: k.prefix, + Key: key, + } + + if opts != nil { + act.Recursive = opts.Recursive + if opts.AfterIndex > 0 { + act.WaitIndex = opts.AfterIndex + 1 + } + } + + return &httpWatcher{ + client: k.client, + nextWait: act, + } +} + +type httpWatcher struct { + client httpClient + nextWait waitAction +} + +func (hw *httpWatcher) Next(ctx context.Context) (*Response, error) { + for { + httpresp, body, err := hw.client.Do(ctx, &hw.nextWait) + if err != nil { + return nil, err + } + + resp, err := unmarshalHTTPResponse(httpresp.StatusCode, httpresp.Header, body) + if err != nil { + if err == ErrEmptyBody { + continue + } + return nil, err + } + + hw.nextWait.WaitIndex = resp.Node.ModifiedIndex + 1 + return resp, nil + } +} + +// v2KeysURL forms a URL representing the location of a key. +// The endpoint argument represents the base URL of an etcd +// server. The prefix is the path needed to route from the +// provided endpoint's path to the root of the keys API +// (typically "/v2/keys"). +func v2KeysURL(ep url.URL, prefix, key string) *url.URL { + // We concatenate all parts together manually. We cannot use + // path.Join because it does not reserve trailing slash. + // We call CanonicalURLPath to further cleanup the path. + if prefix != "" && prefix[0] != '/' { + prefix = "/" + prefix + } + if key != "" && key[0] != '/' { + key = "/" + key + } + ep.Path = pathutil.CanonicalURLPath(ep.Path + prefix + key) + return &ep +} + +type getAction struct { + Prefix string + Key string + Recursive bool + Sorted bool + Quorum bool +} + +func (g *getAction) HTTPRequest(ep url.URL) *http.Request { + u := v2KeysURL(ep, g.Prefix, g.Key) + + params := u.Query() + params.Set("recursive", strconv.FormatBool(g.Recursive)) + params.Set("sorted", strconv.FormatBool(g.Sorted)) + params.Set("quorum", strconv.FormatBool(g.Quorum)) + u.RawQuery = params.Encode() + + req, _ := http.NewRequest("GET", u.String(), nil) + return req +} + +type waitAction struct { + Prefix string + Key string + WaitIndex uint64 + Recursive bool +} + +func (w *waitAction) HTTPRequest(ep url.URL) *http.Request { + u := v2KeysURL(ep, w.Prefix, w.Key) + + params := u.Query() + params.Set("wait", "true") + params.Set("waitIndex", strconv.FormatUint(w.WaitIndex, 10)) + params.Set("recursive", strconv.FormatBool(w.Recursive)) + u.RawQuery = params.Encode() + + req, _ := http.NewRequest("GET", u.String(), nil) + return req +} + +type setAction struct { + Prefix string + Key string + Value string + PrevValue string + PrevIndex uint64 + PrevExist PrevExistType + TTL time.Duration + Refresh bool + Dir bool + NoValueOnSuccess bool +} + +func (a *setAction) HTTPRequest(ep url.URL) *http.Request { + u := v2KeysURL(ep, a.Prefix, a.Key) + + params := u.Query() + form := url.Values{} + + // we're either creating a directory or setting a key + if a.Dir { + params.Set("dir", strconv.FormatBool(a.Dir)) + } else { + // These options are only valid for setting a key + if a.PrevValue != "" { + params.Set("prevValue", a.PrevValue) + } + form.Add("value", a.Value) + } + + // Options which apply to both setting a key and creating a dir + if a.PrevIndex != 0 { + params.Set("prevIndex", strconv.FormatUint(a.PrevIndex, 10)) + } + if a.PrevExist != PrevIgnore { + params.Set("prevExist", string(a.PrevExist)) + } + if a.TTL > 0 { + form.Add("ttl", strconv.FormatUint(uint64(a.TTL.Seconds()), 10)) + } + + if a.Refresh { + form.Add("refresh", "true") + } + if a.NoValueOnSuccess { + params.Set("noValueOnSuccess", strconv.FormatBool(a.NoValueOnSuccess)) + } + + u.RawQuery = params.Encode() + body := strings.NewReader(form.Encode()) + + req, _ := http.NewRequest("PUT", u.String(), body) + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + + return req +} + +type deleteAction struct { + Prefix string + Key string + PrevValue string + PrevIndex uint64 + Dir bool + Recursive bool +} + +func (a *deleteAction) HTTPRequest(ep url.URL) *http.Request { + u := v2KeysURL(ep, a.Prefix, a.Key) + + params := u.Query() + if a.PrevValue != "" { + params.Set("prevValue", a.PrevValue) + } + if a.PrevIndex != 0 { + params.Set("prevIndex", strconv.FormatUint(a.PrevIndex, 10)) + } + if a.Dir { + params.Set("dir", "true") + } + if a.Recursive { + params.Set("recursive", "true") + } + u.RawQuery = params.Encode() + + req, _ := http.NewRequest("DELETE", u.String(), nil) + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + + return req +} + +type createInOrderAction struct { + Prefix string + Dir string + Value string + TTL time.Duration +} + +func (a *createInOrderAction) HTTPRequest(ep url.URL) *http.Request { + u := v2KeysURL(ep, a.Prefix, a.Dir) + + form := url.Values{} + form.Add("value", a.Value) + if a.TTL > 0 { + form.Add("ttl", strconv.FormatUint(uint64(a.TTL.Seconds()), 10)) + } + body := strings.NewReader(form.Encode()) + + req, _ := http.NewRequest("POST", u.String(), body) + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + return req +} + +func unmarshalHTTPResponse(code int, header http.Header, body []byte) (res *Response, err error) { + switch code { + case http.StatusOK, http.StatusCreated: + if len(body) == 0 { + return nil, ErrEmptyBody + } + res, err = unmarshalSuccessfulKeysResponse(header, body) + default: + err = unmarshalFailedKeysResponse(body) + } + return res, err +} + +func unmarshalSuccessfulKeysResponse(header http.Header, body []byte) (*Response, error) { + var res Response + err := codec.NewDecoderBytes(body, new(codec.JsonHandle)).Decode(&res) + if err != nil { + return nil, ErrInvalidJSON + } + if header.Get("X-Etcd-Index") != "" { + res.Index, err = strconv.ParseUint(header.Get("X-Etcd-Index"), 10, 64) + if err != nil { + return nil, err + } + } + res.ClusterID = header.Get("X-Etcd-Cluster-ID") + return &res, nil +} + +func unmarshalFailedKeysResponse(body []byte) error { + var etcdErr Error + if err := json.Unmarshal(body, &etcdErr); err != nil { + return ErrInvalidJSON + } + return etcdErr +} diff --git a/vendor/github.com/coreos/etcd/client/members.go b/vendor/github.com/coreos/etcd/client/members.go new file mode 100644 index 00000000..aafa3d1b --- /dev/null +++ b/vendor/github.com/coreos/etcd/client/members.go @@ -0,0 +1,303 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "net/http" + "net/url" + "path" + + "github.com/coreos/etcd/pkg/types" +) + +var ( + defaultV2MembersPrefix = "/v2/members" + defaultLeaderSuffix = "/leader" +) + +type Member struct { + // ID is the unique identifier of this Member. + ID string `json:"id"` + + // Name is a human-readable, non-unique identifier of this Member. + Name string `json:"name"` + + // PeerURLs represents the HTTP(S) endpoints this Member uses to + // participate in etcd's consensus protocol. + PeerURLs []string `json:"peerURLs"` + + // ClientURLs represents the HTTP(S) endpoints on which this Member + // serves its client-facing APIs. + ClientURLs []string `json:"clientURLs"` +} + +type memberCollection []Member + +func (c *memberCollection) UnmarshalJSON(data []byte) error { + d := struct { + Members []Member + }{} + + if err := json.Unmarshal(data, &d); err != nil { + return err + } + + if d.Members == nil { + *c = make([]Member, 0) + return nil + } + + *c = d.Members + return nil +} + +type memberCreateOrUpdateRequest struct { + PeerURLs types.URLs +} + +func (m *memberCreateOrUpdateRequest) MarshalJSON() ([]byte, error) { + s := struct { + PeerURLs []string `json:"peerURLs"` + }{ + PeerURLs: make([]string, len(m.PeerURLs)), + } + + for i, u := range m.PeerURLs { + s.PeerURLs[i] = u.String() + } + + return json.Marshal(&s) +} + +// NewMembersAPI constructs a new MembersAPI that uses HTTP to +// interact with etcd's membership API. +func NewMembersAPI(c Client) MembersAPI { + return &httpMembersAPI{ + client: c, + } +} + +type MembersAPI interface { + // List enumerates the current cluster membership. + List(ctx context.Context) ([]Member, error) + + // Add instructs etcd to accept a new Member into the cluster. + Add(ctx context.Context, peerURL string) (*Member, error) + + // Remove demotes an existing Member out of the cluster. + Remove(ctx context.Context, mID string) error + + // Update instructs etcd to update an existing Member in the cluster. + Update(ctx context.Context, mID string, peerURLs []string) error + + // Leader gets current leader of the cluster + Leader(ctx context.Context) (*Member, error) +} + +type httpMembersAPI struct { + client httpClient +} + +func (m *httpMembersAPI) List(ctx context.Context) ([]Member, error) { + req := &membersAPIActionList{} + resp, body, err := m.client.Do(ctx, req) + if err != nil { + return nil, err + } + + if err := assertStatusCode(resp.StatusCode, http.StatusOK); err != nil { + return nil, err + } + + var mCollection memberCollection + if err := json.Unmarshal(body, &mCollection); err != nil { + return nil, err + } + + return []Member(mCollection), nil +} + +func (m *httpMembersAPI) Add(ctx context.Context, peerURL string) (*Member, error) { + urls, err := types.NewURLs([]string{peerURL}) + if err != nil { + return nil, err + } + + req := &membersAPIActionAdd{peerURLs: urls} + resp, body, err := m.client.Do(ctx, req) + if err != nil { + return nil, err + } + + if err := assertStatusCode(resp.StatusCode, http.StatusCreated, http.StatusConflict); err != nil { + return nil, err + } + + if resp.StatusCode != http.StatusCreated { + var merr membersError + if err := json.Unmarshal(body, &merr); err != nil { + return nil, err + } + return nil, merr + } + + var memb Member + if err := json.Unmarshal(body, &memb); err != nil { + return nil, err + } + + return &memb, nil +} + +func (m *httpMembersAPI) Update(ctx context.Context, memberID string, peerURLs []string) error { + urls, err := types.NewURLs(peerURLs) + if err != nil { + return err + } + + req := &membersAPIActionUpdate{peerURLs: urls, memberID: memberID} + resp, body, err := m.client.Do(ctx, req) + if err != nil { + return err + } + + if err := assertStatusCode(resp.StatusCode, http.StatusNoContent, http.StatusNotFound, http.StatusConflict); err != nil { + return err + } + + if resp.StatusCode != http.StatusNoContent { + var merr membersError + if err := json.Unmarshal(body, &merr); err != nil { + return err + } + return merr + } + + return nil +} + +func (m *httpMembersAPI) Remove(ctx context.Context, memberID string) error { + req := &membersAPIActionRemove{memberID: memberID} + resp, _, err := m.client.Do(ctx, req) + if err != nil { + return err + } + + return assertStatusCode(resp.StatusCode, http.StatusNoContent, http.StatusGone) +} + +func (m *httpMembersAPI) Leader(ctx context.Context) (*Member, error) { + req := &membersAPIActionLeader{} + resp, body, err := m.client.Do(ctx, req) + if err != nil { + return nil, err + } + + if err := assertStatusCode(resp.StatusCode, http.StatusOK); err != nil { + return nil, err + } + + var leader Member + if err := json.Unmarshal(body, &leader); err != nil { + return nil, err + } + + return &leader, nil +} + +type membersAPIActionList struct{} + +func (l *membersAPIActionList) HTTPRequest(ep url.URL) *http.Request { + u := v2MembersURL(ep) + req, _ := http.NewRequest("GET", u.String(), nil) + return req +} + +type membersAPIActionRemove struct { + memberID string +} + +func (d *membersAPIActionRemove) HTTPRequest(ep url.URL) *http.Request { + u := v2MembersURL(ep) + u.Path = path.Join(u.Path, d.memberID) + req, _ := http.NewRequest("DELETE", u.String(), nil) + return req +} + +type membersAPIActionAdd struct { + peerURLs types.URLs +} + +func (a *membersAPIActionAdd) HTTPRequest(ep url.URL) *http.Request { + u := v2MembersURL(ep) + m := memberCreateOrUpdateRequest{PeerURLs: a.peerURLs} + b, _ := json.Marshal(&m) + req, _ := http.NewRequest("POST", u.String(), bytes.NewReader(b)) + req.Header.Set("Content-Type", "application/json") + return req +} + +type membersAPIActionUpdate struct { + memberID string + peerURLs types.URLs +} + +func (a *membersAPIActionUpdate) HTTPRequest(ep url.URL) *http.Request { + u := v2MembersURL(ep) + m := memberCreateOrUpdateRequest{PeerURLs: a.peerURLs} + u.Path = path.Join(u.Path, a.memberID) + b, _ := json.Marshal(&m) + req, _ := http.NewRequest("PUT", u.String(), bytes.NewReader(b)) + req.Header.Set("Content-Type", "application/json") + return req +} + +func assertStatusCode(got int, want ...int) (err error) { + for _, w := range want { + if w == got { + return nil + } + } + return fmt.Errorf("unexpected status code %d", got) +} + +type membersAPIActionLeader struct{} + +func (l *membersAPIActionLeader) HTTPRequest(ep url.URL) *http.Request { + u := v2MembersURL(ep) + u.Path = path.Join(u.Path, defaultLeaderSuffix) + req, _ := http.NewRequest("GET", u.String(), nil) + return req +} + +// v2MembersURL add the necessary path to the provided endpoint +// to route requests to the default v2 members API. +func v2MembersURL(ep url.URL) *url.URL { + ep.Path = path.Join(ep.Path, defaultV2MembersPrefix) + return &ep +} + +type membersError struct { + Message string `json:"message"` + Code int `json:"-"` +} + +func (e membersError) Error() string { + return e.Message +} diff --git a/vendor/github.com/coreos/etcd/client/util.go b/vendor/github.com/coreos/etcd/client/util.go new file mode 100644 index 00000000..15a8babf --- /dev/null +++ b/vendor/github.com/coreos/etcd/client/util.go @@ -0,0 +1,53 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import ( + "regexp" +) + +var ( + roleNotFoundRegExp *regexp.Regexp + userNotFoundRegExp *regexp.Regexp +) + +func init() { + roleNotFoundRegExp = regexp.MustCompile("auth: Role .* does not exist.") + userNotFoundRegExp = regexp.MustCompile("auth: User .* does not exist.") +} + +// IsKeyNotFound returns true if the error code is ErrorCodeKeyNotFound. +func IsKeyNotFound(err error) bool { + if cErr, ok := err.(Error); ok { + return cErr.Code == ErrorCodeKeyNotFound + } + return false +} + +// IsRoleNotFound returns true if the error means role not found of v2 API. +func IsRoleNotFound(err error) bool { + if ae, ok := err.(authError); ok { + return roleNotFoundRegExp.MatchString(ae.Message) + } + return false +} + +// IsUserNotFound returns true if the error means user not found of v2 API. +func IsUserNotFound(err error) bool { + if ae, ok := err.(authError); ok { + return userNotFoundRegExp.MatchString(ae.Message) + } + return false +} diff --git a/vendor/github.com/coreos/etcd/pkg/pathutil/LICENSE b/vendor/github.com/coreos/etcd/pkg/pathutil/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/pathutil/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/coreos/etcd/pkg/pathutil/path.go b/vendor/github.com/coreos/etcd/pkg/pathutil/path.go new file mode 100644 index 00000000..f26254ba --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/pathutil/path.go @@ -0,0 +1,31 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package pathutil implements utility functions for handling slash-separated +// paths. +package pathutil + +import "path" + +// CanonicalURLPath returns the canonical url path for p, which follows the rules: +// 1. the path always starts with "/" +// 2. replace multiple slashes with a single slash +// 3. replace each '.' '..' path name element with equivalent one +// 4. keep the trailing slash +// The function is borrowed from stdlib http.cleanPath in server.go. +func CanonicalURLPath(p string) string { + if p == "" { + return "/" + } + if p[0] != '/' { + p = "/" + p + } + np := path.Clean(p) + // path.Clean removes trailing slash except for root, + // put the trailing slash back if necessary. + if p[len(p)-1] == '/' && np != "/" { + np += "/" + } + return np +} diff --git a/vendor/github.com/coreos/etcd/pkg/srv/LICENSE b/vendor/github.com/coreos/etcd/pkg/srv/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/srv/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/coreos/etcd/pkg/srv/srv.go b/vendor/github.com/coreos/etcd/pkg/srv/srv.go new file mode 100644 index 00000000..e1df5254 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/srv/srv.go @@ -0,0 +1,130 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package srv looks up DNS SRV records. +package srv + +import ( + "fmt" + "net" + "net/url" + "strings" + + "github.com/coreos/etcd/pkg/types" +) + +var ( + // indirection for testing + lookupSRV = net.LookupSRV // net.DefaultResolver.LookupSRV when ctxs don't conflict + resolveTCPAddr = net.ResolveTCPAddr +) + +// GetCluster gets the cluster information via DNS discovery. +// Also sees each entry as a separate instance. +func GetCluster(serviceScheme, service, name, dns string, apurls types.URLs) ([]string, error) { + tempName := int(0) + tcp2ap := make(map[string]url.URL) + + // First, resolve the apurls + for _, url := range apurls { + tcpAddr, err := resolveTCPAddr("tcp", url.Host) + if err != nil { + return nil, err + } + tcp2ap[tcpAddr.String()] = url + } + + stringParts := []string{} + updateNodeMap := func(service, scheme string) error { + _, addrs, err := lookupSRV(service, "tcp", dns) + if err != nil { + return err + } + for _, srv := range addrs { + port := fmt.Sprintf("%d", srv.Port) + host := net.JoinHostPort(srv.Target, port) + tcpAddr, terr := resolveTCPAddr("tcp", host) + if terr != nil { + err = terr + continue + } + n := "" + url, ok := tcp2ap[tcpAddr.String()] + if ok { + n = name + } + if n == "" { + n = fmt.Sprintf("%d", tempName) + tempName++ + } + // SRV records have a trailing dot but URL shouldn't. + shortHost := strings.TrimSuffix(srv.Target, ".") + urlHost := net.JoinHostPort(shortHost, port) + if ok && url.Scheme != scheme { + err = fmt.Errorf("bootstrap at %s from DNS for %s has scheme mismatch with expected peer %s", scheme+"://"+urlHost, service, url.String()) + } else { + stringParts = append(stringParts, fmt.Sprintf("%s=%s://%s", n, scheme, urlHost)) + } + } + if len(stringParts) == 0 { + return err + } + return nil + } + + err := updateNodeMap(service, serviceScheme) + if err != nil { + return nil, fmt.Errorf("error querying DNS SRV records for _%s %s", service, err) + } + return stringParts, nil +} + +type SRVClients struct { + Endpoints []string + SRVs []*net.SRV +} + +// GetClient looks up the client endpoints for a service and domain. +func GetClient(service, domain string) (*SRVClients, error) { + var urls []*url.URL + var srvs []*net.SRV + + updateURLs := func(service, scheme string) error { + _, addrs, err := lookupSRV(service, "tcp", domain) + if err != nil { + return err + } + for _, srv := range addrs { + urls = append(urls, &url.URL{ + Scheme: scheme, + Host: net.JoinHostPort(srv.Target, fmt.Sprintf("%d", srv.Port)), + }) + } + srvs = append(srvs, addrs...) + return nil + } + + errHTTPS := updateURLs(service+"-ssl", "https") + errHTTP := updateURLs(service, "http") + + if errHTTPS != nil && errHTTP != nil { + return nil, fmt.Errorf("dns lookup errors: %s and %s", errHTTPS, errHTTP) + } + + endpoints := make([]string, len(urls)) + for i := range urls { + endpoints[i] = urls[i].String() + } + return &SRVClients{Endpoints: endpoints, SRVs: srvs}, nil +} diff --git a/vendor/github.com/coreos/etcd/pkg/types/LICENSE b/vendor/github.com/coreos/etcd/pkg/types/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/types/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/coreos/etcd/pkg/types/doc.go b/vendor/github.com/coreos/etcd/pkg/types/doc.go new file mode 100644 index 00000000..de8ef0bd --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/types/doc.go @@ -0,0 +1,17 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package types declares various data types and implements type-checking +// functions. +package types diff --git a/vendor/github.com/coreos/etcd/pkg/types/id.go b/vendor/github.com/coreos/etcd/pkg/types/id.go new file mode 100644 index 00000000..1b042d9c --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/types/id.go @@ -0,0 +1,41 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "strconv" +) + +// ID represents a generic identifier which is canonically +// stored as a uint64 but is typically represented as a +// base-16 string for input/output +type ID uint64 + +func (i ID) String() string { + return strconv.FormatUint(uint64(i), 16) +} + +// IDFromString attempts to create an ID from a base-16 string. +func IDFromString(s string) (ID, error) { + i, err := strconv.ParseUint(s, 16, 64) + return ID(i), err +} + +// IDSlice implements the sort interface +type IDSlice []ID + +func (p IDSlice) Len() int { return len(p) } +func (p IDSlice) Less(i, j int) bool { return uint64(p[i]) < uint64(p[j]) } +func (p IDSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } diff --git a/vendor/github.com/coreos/etcd/pkg/types/set.go b/vendor/github.com/coreos/etcd/pkg/types/set.go new file mode 100644 index 00000000..c111b0c0 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/types/set.go @@ -0,0 +1,178 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "reflect" + "sort" + "sync" +) + +type Set interface { + Add(string) + Remove(string) + Contains(string) bool + Equals(Set) bool + Length() int + Values() []string + Copy() Set + Sub(Set) Set +} + +func NewUnsafeSet(values ...string) *unsafeSet { + set := &unsafeSet{make(map[string]struct{})} + for _, v := range values { + set.Add(v) + } + return set +} + +func NewThreadsafeSet(values ...string) *tsafeSet { + us := NewUnsafeSet(values...) + return &tsafeSet{us, sync.RWMutex{}} +} + +type unsafeSet struct { + d map[string]struct{} +} + +// Add adds a new value to the set (no-op if the value is already present) +func (us *unsafeSet) Add(value string) { + us.d[value] = struct{}{} +} + +// Remove removes the given value from the set +func (us *unsafeSet) Remove(value string) { + delete(us.d, value) +} + +// Contains returns whether the set contains the given value +func (us *unsafeSet) Contains(value string) (exists bool) { + _, exists = us.d[value] + return exists +} + +// ContainsAll returns whether the set contains all given values +func (us *unsafeSet) ContainsAll(values []string) bool { + for _, s := range values { + if !us.Contains(s) { + return false + } + } + return true +} + +// Equals returns whether the contents of two sets are identical +func (us *unsafeSet) Equals(other Set) bool { + v1 := sort.StringSlice(us.Values()) + v2 := sort.StringSlice(other.Values()) + v1.Sort() + v2.Sort() + return reflect.DeepEqual(v1, v2) +} + +// Length returns the number of elements in the set +func (us *unsafeSet) Length() int { + return len(us.d) +} + +// Values returns the values of the Set in an unspecified order. +func (us *unsafeSet) Values() (values []string) { + values = make([]string, 0) + for val := range us.d { + values = append(values, val) + } + return values +} + +// Copy creates a new Set containing the values of the first +func (us *unsafeSet) Copy() Set { + cp := NewUnsafeSet() + for val := range us.d { + cp.Add(val) + } + + return cp +} + +// Sub removes all elements in other from the set +func (us *unsafeSet) Sub(other Set) Set { + oValues := other.Values() + result := us.Copy().(*unsafeSet) + + for _, val := range oValues { + if _, ok := result.d[val]; !ok { + continue + } + delete(result.d, val) + } + + return result +} + +type tsafeSet struct { + us *unsafeSet + m sync.RWMutex +} + +func (ts *tsafeSet) Add(value string) { + ts.m.Lock() + defer ts.m.Unlock() + ts.us.Add(value) +} + +func (ts *tsafeSet) Remove(value string) { + ts.m.Lock() + defer ts.m.Unlock() + ts.us.Remove(value) +} + +func (ts *tsafeSet) Contains(value string) (exists bool) { + ts.m.RLock() + defer ts.m.RUnlock() + return ts.us.Contains(value) +} + +func (ts *tsafeSet) Equals(other Set) bool { + ts.m.RLock() + defer ts.m.RUnlock() + return ts.us.Equals(other) +} + +func (ts *tsafeSet) Length() int { + ts.m.RLock() + defer ts.m.RUnlock() + return ts.us.Length() +} + +func (ts *tsafeSet) Values() (values []string) { + ts.m.RLock() + defer ts.m.RUnlock() + return ts.us.Values() +} + +func (ts *tsafeSet) Copy() Set { + ts.m.RLock() + defer ts.m.RUnlock() + usResult := ts.us.Copy().(*unsafeSet) + return &tsafeSet{usResult, sync.RWMutex{}} +} + +func (ts *tsafeSet) Sub(other Set) Set { + ts.m.RLock() + defer ts.m.RUnlock() + usResult := ts.us.Sub(other).(*unsafeSet) + return &tsafeSet{usResult, sync.RWMutex{}} +} diff --git a/vendor/github.com/coreos/etcd/pkg/types/slice.go b/vendor/github.com/coreos/etcd/pkg/types/slice.go new file mode 100644 index 00000000..0dd9ca79 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/types/slice.go @@ -0,0 +1,22 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +// Uint64Slice implements sort interface +type Uint64Slice []uint64 + +func (p Uint64Slice) Len() int { return len(p) } +func (p Uint64Slice) Less(i, j int) bool { return p[i] < p[j] } +func (p Uint64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } diff --git a/vendor/github.com/coreos/etcd/pkg/types/urls.go b/vendor/github.com/coreos/etcd/pkg/types/urls.go new file mode 100644 index 00000000..9e5d03ff --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/types/urls.go @@ -0,0 +1,82 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "errors" + "fmt" + "net" + "net/url" + "sort" + "strings" +) + +type URLs []url.URL + +func NewURLs(strs []string) (URLs, error) { + all := make([]url.URL, len(strs)) + if len(all) == 0 { + return nil, errors.New("no valid URLs given") + } + for i, in := range strs { + in = strings.TrimSpace(in) + u, err := url.Parse(in) + if err != nil { + return nil, err + } + if u.Scheme != "http" && u.Scheme != "https" && u.Scheme != "unix" && u.Scheme != "unixs" { + return nil, fmt.Errorf("URL scheme must be http, https, unix, or unixs: %s", in) + } + if _, _, err := net.SplitHostPort(u.Host); err != nil { + return nil, fmt.Errorf(`URL address does not have the form "host:port": %s`, in) + } + if u.Path != "" { + return nil, fmt.Errorf("URL must not contain a path: %s", in) + } + all[i] = *u + } + us := URLs(all) + us.Sort() + + return us, nil +} + +func MustNewURLs(strs []string) URLs { + urls, err := NewURLs(strs) + if err != nil { + panic(err) + } + return urls +} + +func (us URLs) String() string { + return strings.Join(us.StringSlice(), ",") +} + +func (us *URLs) Sort() { + sort.Sort(us) +} +func (us URLs) Len() int { return len(us) } +func (us URLs) Less(i, j int) bool { return us[i].String() < us[j].String() } +func (us URLs) Swap(i, j int) { us[i], us[j] = us[j], us[i] } + +func (us URLs) StringSlice() []string { + out := make([]string, len(us)) + for i := range us { + out[i] = us[i].String() + } + + return out +} diff --git a/vendor/github.com/coreos/etcd/pkg/types/urlsmap.go b/vendor/github.com/coreos/etcd/pkg/types/urlsmap.go new file mode 100644 index 00000000..47690cc3 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/types/urlsmap.go @@ -0,0 +1,107 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "fmt" + "sort" + "strings" +) + +// URLsMap is a map from a name to its URLs. +type URLsMap map[string]URLs + +// NewURLsMap returns a URLsMap instantiated from the given string, +// which consists of discovery-formatted names-to-URLs, like: +// mach0=http://1.1.1.1:2380,mach0=http://2.2.2.2::2380,mach1=http://3.3.3.3:2380,mach2=http://4.4.4.4:2380 +func NewURLsMap(s string) (URLsMap, error) { + m := parse(s) + + cl := URLsMap{} + for name, urls := range m { + us, err := NewURLs(urls) + if err != nil { + return nil, err + } + cl[name] = us + } + return cl, nil +} + +// NewURLsMapFromStringMap takes a map of strings and returns a URLsMap. The +// string values in the map can be multiple values separated by the sep string. +func NewURLsMapFromStringMap(m map[string]string, sep string) (URLsMap, error) { + var err error + um := URLsMap{} + for k, v := range m { + um[k], err = NewURLs(strings.Split(v, sep)) + if err != nil { + return nil, err + } + } + return um, nil +} + +// String turns URLsMap into discovery-formatted name-to-URLs sorted by name. +func (c URLsMap) String() string { + var pairs []string + for name, urls := range c { + for _, url := range urls { + pairs = append(pairs, fmt.Sprintf("%s=%s", name, url.String())) + } + } + sort.Strings(pairs) + return strings.Join(pairs, ",") +} + +// URLs returns a list of all URLs. +// The returned list is sorted in ascending lexicographical order. +func (c URLsMap) URLs() []string { + var urls []string + for _, us := range c { + for _, u := range us { + urls = append(urls, u.String()) + } + } + sort.Strings(urls) + return urls +} + +// Len returns the size of URLsMap. +func (c URLsMap) Len() int { + return len(c) +} + +// parse parses the given string and returns a map listing the values specified for each key. +func parse(s string) map[string][]string { + m := make(map[string][]string) + for s != "" { + key := s + if i := strings.IndexAny(key, ","); i >= 0 { + key, s = key[:i], key[i+1:] + } else { + s = "" + } + if key == "" { + continue + } + value := "" + if i := strings.Index(key, "="); i >= 0 { + key, value = key[:i], key[i+1:] + } + m[key] = append(m[key], value) + } + return m +} diff --git a/vendor/github.com/coreos/etcd/vendor/github.com/coreos/go-semver/semver/LICENSE b/vendor/github.com/coreos/etcd/vendor/github.com/coreos/go-semver/semver/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/vendor/github.com/coreos/etcd/vendor/github.com/coreos/go-semver/semver/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/coreos/etcd/vendor/github.com/coreos/go-semver/semver/semver.go b/vendor/github.com/coreos/etcd/vendor/github.com/coreos/go-semver/semver/semver.go new file mode 100644 index 00000000..110fc23e --- /dev/null +++ b/vendor/github.com/coreos/etcd/vendor/github.com/coreos/go-semver/semver/semver.go @@ -0,0 +1,268 @@ +// Copyright 2013-2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Semantic Versions http://semver.org +package semver + +import ( + "bytes" + "errors" + "fmt" + "strconv" + "strings" +) + +type Version struct { + Major int64 + Minor int64 + Patch int64 + PreRelease PreRelease + Metadata string +} + +type PreRelease string + +func splitOff(input *string, delim string) (val string) { + parts := strings.SplitN(*input, delim, 2) + + if len(parts) == 2 { + *input = parts[0] + val = parts[1] + } + + return val +} + +func New(version string) *Version { + return Must(NewVersion(version)) +} + +func NewVersion(version string) (*Version, error) { + v := Version{} + + if err := v.Set(version); err != nil { + return nil, err + } + + return &v, nil +} + +// Must is a helper for wrapping NewVersion and will panic if err is not nil. +func Must(v *Version, err error) *Version { + if err != nil { + panic(err) + } + return v +} + +// Set parses and updates v from the given version string. Implements flag.Value +func (v *Version) Set(version string) error { + metadata := splitOff(&version, "+") + preRelease := PreRelease(splitOff(&version, "-")) + dotParts := strings.SplitN(version, ".", 3) + + if len(dotParts) != 3 { + return fmt.Errorf("%s is not in dotted-tri format", version) + } + + parsed := make([]int64, 3, 3) + + for i, v := range dotParts[:3] { + val, err := strconv.ParseInt(v, 10, 64) + parsed[i] = val + if err != nil { + return err + } + } + + v.Metadata = metadata + v.PreRelease = preRelease + v.Major = parsed[0] + v.Minor = parsed[1] + v.Patch = parsed[2] + return nil +} + +func (v Version) String() string { + var buffer bytes.Buffer + + fmt.Fprintf(&buffer, "%d.%d.%d", v.Major, v.Minor, v.Patch) + + if v.PreRelease != "" { + fmt.Fprintf(&buffer, "-%s", v.PreRelease) + } + + if v.Metadata != "" { + fmt.Fprintf(&buffer, "+%s", v.Metadata) + } + + return buffer.String() +} + +func (v *Version) UnmarshalYAML(unmarshal func(interface{}) error) error { + var data string + if err := unmarshal(&data); err != nil { + return err + } + return v.Set(data) +} + +func (v Version) MarshalJSON() ([]byte, error) { + return []byte(`"` + v.String() + `"`), nil +} + +func (v *Version) UnmarshalJSON(data []byte) error { + l := len(data) + if l == 0 || string(data) == `""` { + return nil + } + if l < 2 || data[0] != '"' || data[l-1] != '"' { + return errors.New("invalid semver string") + } + return v.Set(string(data[1 : l-1])) +} + +// Compare tests if v is less than, equal to, or greater than versionB, +// returning -1, 0, or +1 respectively. +func (v Version) Compare(versionB Version) int { + if cmp := recursiveCompare(v.Slice(), versionB.Slice()); cmp != 0 { + return cmp + } + return preReleaseCompare(v, versionB) +} + +// Equal tests if v is equal to versionB. +func (v Version) Equal(versionB Version) bool { + return v.Compare(versionB) == 0 +} + +// LessThan tests if v is less than versionB. +func (v Version) LessThan(versionB Version) bool { + return v.Compare(versionB) < 0 +} + +// Slice converts the comparable parts of the semver into a slice of integers. +func (v Version) Slice() []int64 { + return []int64{v.Major, v.Minor, v.Patch} +} + +func (p PreRelease) Slice() []string { + preRelease := string(p) + return strings.Split(preRelease, ".") +} + +func preReleaseCompare(versionA Version, versionB Version) int { + a := versionA.PreRelease + b := versionB.PreRelease + + /* Handle the case where if two versions are otherwise equal it is the + * one without a PreRelease that is greater */ + if len(a) == 0 && (len(b) > 0) { + return 1 + } else if len(b) == 0 && (len(a) > 0) { + return -1 + } + + // If there is a prerelease, check and compare each part. + return recursivePreReleaseCompare(a.Slice(), b.Slice()) +} + +func recursiveCompare(versionA []int64, versionB []int64) int { + if len(versionA) == 0 { + return 0 + } + + a := versionA[0] + b := versionB[0] + + if a > b { + return 1 + } else if a < b { + return -1 + } + + return recursiveCompare(versionA[1:], versionB[1:]) +} + +func recursivePreReleaseCompare(versionA []string, versionB []string) int { + // A larger set of pre-release fields has a higher precedence than a smaller set, + // if all of the preceding identifiers are equal. + if len(versionA) == 0 { + if len(versionB) > 0 { + return -1 + } + return 0 + } else if len(versionB) == 0 { + // We're longer than versionB so return 1. + return 1 + } + + a := versionA[0] + b := versionB[0] + + aInt := false + bInt := false + + aI, err := strconv.Atoi(versionA[0]) + if err == nil { + aInt = true + } + + bI, err := strconv.Atoi(versionB[0]) + if err == nil { + bInt = true + } + + // Handle Integer Comparison + if aInt && bInt { + if aI > bI { + return 1 + } else if aI < bI { + return -1 + } + } + + // Handle String Comparison + if a > b { + return 1 + } else if a < b { + return -1 + } + + return recursivePreReleaseCompare(versionA[1:], versionB[1:]) +} + +// BumpMajor increments the Major field by 1 and resets all other fields to their default values +func (v *Version) BumpMajor() { + v.Major += 1 + v.Minor = 0 + v.Patch = 0 + v.PreRelease = PreRelease("") + v.Metadata = "" +} + +// BumpMinor increments the Minor field by 1 and resets all other fields to their default values +func (v *Version) BumpMinor() { + v.Minor += 1 + v.Patch = 0 + v.PreRelease = PreRelease("") + v.Metadata = "" +} + +// BumpPatch increments the Patch field by 1 and resets all other fields to their default values +func (v *Version) BumpPatch() { + v.Patch += 1 + v.PreRelease = PreRelease("") + v.Metadata = "" +} diff --git a/vendor/github.com/coreos/etcd/vendor/github.com/coreos/go-semver/semver/sort.go b/vendor/github.com/coreos/etcd/vendor/github.com/coreos/go-semver/semver/sort.go new file mode 100644 index 00000000..e256b41a --- /dev/null +++ b/vendor/github.com/coreos/etcd/vendor/github.com/coreos/go-semver/semver/sort.go @@ -0,0 +1,38 @@ +// Copyright 2013-2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package semver + +import ( + "sort" +) + +type Versions []*Version + +func (s Versions) Len() int { + return len(s) +} + +func (s Versions) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s Versions) Less(i, j int) bool { + return s[i].LessThan(*s[j]) +} + +// Sort sorts the given slice of Version +func Sort(versions []*Version) { + sort.Sort(Versions(versions)) +} diff --git a/vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/0doc.go b/vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/0doc.go new file mode 100644 index 00000000..cfb804ec --- /dev/null +++ b/vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/0doc.go @@ -0,0 +1,185 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +/* +High Performance, Feature-Rich Idiomatic Go 1.4+ codec/encoding library for +binc, msgpack, cbor, json + +Supported Serialization formats are: + + - msgpack: https://github.com/msgpack/msgpack + - binc: http://github.com/ugorji/binc + - cbor: http://cbor.io http://tools.ietf.org/html/rfc7049 + - json: http://json.org http://tools.ietf.org/html/rfc7159 + - simple: + +To install: + + go get github.com/ugorji/go/codec + +This package will carefully use 'unsafe' for performance reasons in specific places. +You can build without unsafe use by passing the safe or appengine tag +i.e. 'go install -tags=safe ...'. Note that unsafe is only supported for the last 3 +go sdk versions e.g. current go release is go 1.9, so we support unsafe use only from +go 1.7+ . This is because supporting unsafe requires knowledge of implementation details. + +For detailed usage information, read the primer at http://ugorji.net/blog/go-codec-primer . + +The idiomatic Go support is as seen in other encoding packages in +the standard library (ie json, xml, gob, etc). + +Rich Feature Set includes: + + - Simple but extremely powerful and feature-rich API + - Support for go1.4 and above, while selectively using newer APIs for later releases + - Good code coverage ( > 70% ) + - Very High Performance. + Our extensive benchmarks show us outperforming Gob, Json, Bson, etc by 2-4X. + - Careful selected use of 'unsafe' for targeted performance gains. + 100% mode exists where 'unsafe' is not used at all. + - Lock-free (sans mutex) concurrency for scaling to 100's of cores + - Multiple conversions: + Package coerces types where appropriate + e.g. decode an int in the stream into a float, etc. + - Corner Cases: + Overflows, nil maps/slices, nil values in streams are handled correctly + - Standard field renaming via tags + - Support for omitting empty fields during an encoding + - Encoding from any value and decoding into pointer to any value + (struct, slice, map, primitives, pointers, interface{}, etc) + - Extensions to support efficient encoding/decoding of any named types + - Support encoding.(Binary|Text)(M|Unm)arshaler interfaces + - Decoding without a schema (into a interface{}). + Includes Options to configure what specific map or slice type to use + when decoding an encoded list or map into a nil interface{} + - Encode a struct as an array, and decode struct from an array in the data stream + - Comprehensive support for anonymous fields + - Fast (no-reflection) encoding/decoding of common maps and slices + - Code-generation for faster performance. + - Support binary (e.g. messagepack, cbor) and text (e.g. json) formats + - Support indefinite-length formats to enable true streaming + (for formats which support it e.g. json, cbor) + - Support canonical encoding, where a value is ALWAYS encoded as same sequence of bytes. + This mostly applies to maps, where iteration order is non-deterministic. + - NIL in data stream decoded as zero value + - Never silently skip data when decoding. + User decides whether to return an error or silently skip data when keys or indexes + in the data stream do not map to fields in the struct. + - Detect and error when encoding a cyclic reference (instead of stack overflow shutdown) + - Encode/Decode from/to chan types (for iterative streaming support) + - Drop-in replacement for encoding/json. `json:` key in struct tag supported. + - Provides a RPC Server and Client Codec for net/rpc communication protocol. + - Handle unique idiosyncrasies of codecs e.g. + - For messagepack, configure how ambiguities in handling raw bytes are resolved + - For messagepack, provide rpc server/client codec to support + msgpack-rpc protocol defined at: + https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md + +Extension Support + +Users can register a function to handle the encoding or decoding of +their custom types. + +There are no restrictions on what the custom type can be. Some examples: + + type BisSet []int + type BitSet64 uint64 + type UUID string + type MyStructWithUnexportedFields struct { a int; b bool; c []int; } + type GifImage struct { ... } + +As an illustration, MyStructWithUnexportedFields would normally be +encoded as an empty map because it has no exported fields, while UUID +would be encoded as a string. However, with extension support, you can +encode any of these however you like. + +RPC + +RPC Client and Server Codecs are implemented, so the codecs can be used +with the standard net/rpc package. + +Usage + +The Handle is SAFE for concurrent READ, but NOT SAFE for concurrent modification. + +The Encoder and Decoder are NOT safe for concurrent use. + +Consequently, the usage model is basically: + + - Create and initialize the Handle before any use. + Once created, DO NOT modify it. + - Multiple Encoders or Decoders can now use the Handle concurrently. + They only read information off the Handle (never write). + - However, each Encoder or Decoder MUST not be used concurrently + - To re-use an Encoder/Decoder, call Reset(...) on it first. + This allows you use state maintained on the Encoder/Decoder. + +Sample usage model: + + // create and configure Handle + var ( + bh codec.BincHandle + mh codec.MsgpackHandle + ch codec.CborHandle + ) + + mh.MapType = reflect.TypeOf(map[string]interface{}(nil)) + + // configure extensions + // e.g. for msgpack, define functions and enable Time support for tag 1 + // mh.SetExt(reflect.TypeOf(time.Time{}), 1, myExt) + + // create and use decoder/encoder + var ( + r io.Reader + w io.Writer + b []byte + h = &bh // or mh to use msgpack + ) + + dec = codec.NewDecoder(r, h) + dec = codec.NewDecoderBytes(b, h) + err = dec.Decode(&v) + + enc = codec.NewEncoder(w, h) + enc = codec.NewEncoderBytes(&b, h) + err = enc.Encode(v) + + //RPC Server + go func() { + for { + conn, err := listener.Accept() + rpcCodec := codec.GoRpc.ServerCodec(conn, h) + //OR rpcCodec := codec.MsgpackSpecRpc.ServerCodec(conn, h) + rpc.ServeCodec(rpcCodec) + } + }() + + //RPC Communication (client side) + conn, err = net.Dial("tcp", "localhost:5555") + rpcCodec := codec.GoRpc.ClientCodec(conn, h) + //OR rpcCodec := codec.MsgpackSpecRpc.ClientCodec(conn, h) + client := rpc.NewClientWithCodec(rpcCodec) + +Running Tests + +To run tests, use the following: + + go test + +To run the full suite of tests, use the following: + + go test -tags alltests -run Suite + +You can run the tag 'safe' to run tests or build in safe mode. e.g. + + go test -tags safe -run Json + go test -tags "alltests safe" -run Suite + +Running Benchmarks + +Please see http://github.com/ugorji/go-codec-bench . + +*/ +package codec + diff --git a/vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/LICENSE b/vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/binc.go b/vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/binc.go new file mode 100644 index 00000000..be5b7d33 --- /dev/null +++ b/vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/binc.go @@ -0,0 +1,946 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +import ( + "math" + "reflect" + "time" +) + +const bincDoPrune = true // No longer needed. Needed before as C lib did not support pruning. + +// vd as low 4 bits (there are 16 slots) +const ( + bincVdSpecial byte = iota + bincVdPosInt + bincVdNegInt + bincVdFloat + + bincVdString + bincVdByteArray + bincVdArray + bincVdMap + + bincVdTimestamp + bincVdSmallInt + bincVdUnicodeOther + bincVdSymbol + + bincVdDecimal + _ // open slot + _ // open slot + bincVdCustomExt = 0x0f +) + +const ( + bincSpNil byte = iota + bincSpFalse + bincSpTrue + bincSpNan + bincSpPosInf + bincSpNegInf + bincSpZeroFloat + bincSpZero + bincSpNegOne +) + +const ( + bincFlBin16 byte = iota + bincFlBin32 + _ // bincFlBin32e + bincFlBin64 + _ // bincFlBin64e + // others not currently supported +) + +type bincEncDriver struct { + e *Encoder + w encWriter + m map[string]uint16 // symbols + b [scratchByteArrayLen]byte + s uint16 // symbols sequencer + // encNoSeparator + encDriverNoopContainerWriter +} + +func (e *bincEncDriver) IsBuiltinType(rt uintptr) bool { + return rt == timeTypId +} + +func (e *bincEncDriver) EncodeBuiltin(rt uintptr, v interface{}) { + if rt == timeTypId { + var bs []byte + switch x := v.(type) { + case time.Time: + bs = encodeTime(x) + case *time.Time: + bs = encodeTime(*x) + default: + e.e.errorf("binc error encoding builtin: expect time.Time, received %T", v) + } + e.w.writen1(bincVdTimestamp<<4 | uint8(len(bs))) + e.w.writeb(bs) + } +} + +func (e *bincEncDriver) EncodeNil() { + e.w.writen1(bincVdSpecial<<4 | bincSpNil) +} + +func (e *bincEncDriver) EncodeBool(b bool) { + if b { + e.w.writen1(bincVdSpecial<<4 | bincSpTrue) + } else { + e.w.writen1(bincVdSpecial<<4 | bincSpFalse) + } +} + +func (e *bincEncDriver) EncodeFloat32(f float32) { + if f == 0 { + e.w.writen1(bincVdSpecial<<4 | bincSpZeroFloat) + return + } + e.w.writen1(bincVdFloat<<4 | bincFlBin32) + bigenHelper{e.b[:4], e.w}.writeUint32(math.Float32bits(f)) +} + +func (e *bincEncDriver) EncodeFloat64(f float64) { + if f == 0 { + e.w.writen1(bincVdSpecial<<4 | bincSpZeroFloat) + return + } + bigen.PutUint64(e.b[:8], math.Float64bits(f)) + if bincDoPrune { + i := 7 + for ; i >= 0 && (e.b[i] == 0); i-- { + } + i++ + if i <= 6 { + e.w.writen1(bincVdFloat<<4 | 0x8 | bincFlBin64) + e.w.writen1(byte(i)) + e.w.writeb(e.b[:i]) + return + } + } + e.w.writen1(bincVdFloat<<4 | bincFlBin64) + e.w.writeb(e.b[:8]) +} + +func (e *bincEncDriver) encIntegerPrune(bd byte, pos bool, v uint64, lim uint8) { + if lim == 4 { + bigen.PutUint32(e.b[:lim], uint32(v)) + } else { + bigen.PutUint64(e.b[:lim], v) + } + if bincDoPrune { + i := pruneSignExt(e.b[:lim], pos) + e.w.writen1(bd | lim - 1 - byte(i)) + e.w.writeb(e.b[i:lim]) + } else { + e.w.writen1(bd | lim - 1) + e.w.writeb(e.b[:lim]) + } +} + +func (e *bincEncDriver) EncodeInt(v int64) { + const nbd byte = bincVdNegInt << 4 + if v >= 0 { + e.encUint(bincVdPosInt<<4, true, uint64(v)) + } else if v == -1 { + e.w.writen1(bincVdSpecial<<4 | bincSpNegOne) + } else { + e.encUint(bincVdNegInt<<4, false, uint64(-v)) + } +} + +func (e *bincEncDriver) EncodeUint(v uint64) { + e.encUint(bincVdPosInt<<4, true, v) +} + +func (e *bincEncDriver) encUint(bd byte, pos bool, v uint64) { + if v == 0 { + e.w.writen1(bincVdSpecial<<4 | bincSpZero) + } else if pos && v >= 1 && v <= 16 { + e.w.writen1(bincVdSmallInt<<4 | byte(v-1)) + } else if v <= math.MaxUint8 { + e.w.writen2(bd|0x0, byte(v)) + } else if v <= math.MaxUint16 { + e.w.writen1(bd | 0x01) + bigenHelper{e.b[:2], e.w}.writeUint16(uint16(v)) + } else if v <= math.MaxUint32 { + e.encIntegerPrune(bd, pos, v, 4) + } else { + e.encIntegerPrune(bd, pos, v, 8) + } +} + +func (e *bincEncDriver) EncodeExt(rv interface{}, xtag uint64, ext Ext, _ *Encoder) { + bs := ext.WriteExt(rv) + if bs == nil { + e.EncodeNil() + return + } + e.encodeExtPreamble(uint8(xtag), len(bs)) + e.w.writeb(bs) +} + +func (e *bincEncDriver) EncodeRawExt(re *RawExt, _ *Encoder) { + e.encodeExtPreamble(uint8(re.Tag), len(re.Data)) + e.w.writeb(re.Data) +} + +func (e *bincEncDriver) encodeExtPreamble(xtag byte, length int) { + e.encLen(bincVdCustomExt<<4, uint64(length)) + e.w.writen1(xtag) +} + +func (e *bincEncDriver) WriteArrayStart(length int) { + e.encLen(bincVdArray<<4, uint64(length)) +} + +func (e *bincEncDriver) WriteMapStart(length int) { + e.encLen(bincVdMap<<4, uint64(length)) +} + +func (e *bincEncDriver) EncodeString(c charEncoding, v string) { + l := uint64(len(v)) + e.encBytesLen(c, l) + if l > 0 { + e.w.writestr(v) + } +} + +func (e *bincEncDriver) EncodeSymbol(v string) { + // if WriteSymbolsNoRefs { + // e.encodeString(c_UTF8, v) + // return + // } + + //symbols only offer benefit when string length > 1. + //This is because strings with length 1 take only 2 bytes to store + //(bd with embedded length, and single byte for string val). + + l := len(v) + if l == 0 { + e.encBytesLen(c_UTF8, 0) + return + } else if l == 1 { + e.encBytesLen(c_UTF8, 1) + e.w.writen1(v[0]) + return + } + if e.m == nil { + e.m = make(map[string]uint16, 16) + } + ui, ok := e.m[v] + if ok { + if ui <= math.MaxUint8 { + e.w.writen2(bincVdSymbol<<4, byte(ui)) + } else { + e.w.writen1(bincVdSymbol<<4 | 0x8) + bigenHelper{e.b[:2], e.w}.writeUint16(ui) + } + } else { + e.s++ + ui = e.s + //ui = uint16(atomic.AddUint32(&e.s, 1)) + e.m[v] = ui + var lenprec uint8 + if l <= math.MaxUint8 { + // lenprec = 0 + } else if l <= math.MaxUint16 { + lenprec = 1 + } else if int64(l) <= math.MaxUint32 { + lenprec = 2 + } else { + lenprec = 3 + } + if ui <= math.MaxUint8 { + e.w.writen2(bincVdSymbol<<4|0x0|0x4|lenprec, byte(ui)) + } else { + e.w.writen1(bincVdSymbol<<4 | 0x8 | 0x4 | lenprec) + bigenHelper{e.b[:2], e.w}.writeUint16(ui) + } + if lenprec == 0 { + e.w.writen1(byte(l)) + } else if lenprec == 1 { + bigenHelper{e.b[:2], e.w}.writeUint16(uint16(l)) + } else if lenprec == 2 { + bigenHelper{e.b[:4], e.w}.writeUint32(uint32(l)) + } else { + bigenHelper{e.b[:8], e.w}.writeUint64(uint64(l)) + } + e.w.writestr(v) + } +} + +func (e *bincEncDriver) EncodeStringBytes(c charEncoding, v []byte) { + l := uint64(len(v)) + e.encBytesLen(c, l) + if l > 0 { + e.w.writeb(v) + } +} + +func (e *bincEncDriver) encBytesLen(c charEncoding, length uint64) { + //TODO: support bincUnicodeOther (for now, just use string or bytearray) + if c == c_RAW { + e.encLen(bincVdByteArray<<4, length) + } else { + e.encLen(bincVdString<<4, length) + } +} + +func (e *bincEncDriver) encLen(bd byte, l uint64) { + if l < 12 { + e.w.writen1(bd | uint8(l+4)) + } else { + e.encLenNumber(bd, l) + } +} + +func (e *bincEncDriver) encLenNumber(bd byte, v uint64) { + if v <= math.MaxUint8 { + e.w.writen2(bd, byte(v)) + } else if v <= math.MaxUint16 { + e.w.writen1(bd | 0x01) + bigenHelper{e.b[:2], e.w}.writeUint16(uint16(v)) + } else if v <= math.MaxUint32 { + e.w.writen1(bd | 0x02) + bigenHelper{e.b[:4], e.w}.writeUint32(uint32(v)) + } else { + e.w.writen1(bd | 0x03) + bigenHelper{e.b[:8], e.w}.writeUint64(uint64(v)) + } +} + +//------------------------------------ + +type bincDecSymbol struct { + s string + b []byte + i uint16 +} + +type bincDecDriver struct { + d *Decoder + h *BincHandle + r decReader + br bool // bytes reader + bdRead bool + bd byte + vd byte + vs byte + // noStreamingCodec + // decNoSeparator + b [scratchByteArrayLen]byte + + // linear searching on this slice is ok, + // because we typically expect < 32 symbols in each stream. + s []bincDecSymbol + decDriverNoopContainerReader +} + +func (d *bincDecDriver) readNextBd() { + d.bd = d.r.readn1() + d.vd = d.bd >> 4 + d.vs = d.bd & 0x0f + d.bdRead = true +} + +func (d *bincDecDriver) uncacheRead() { + if d.bdRead { + d.r.unreadn1() + d.bdRead = false + } +} + +func (d *bincDecDriver) ContainerType() (vt valueType) { + if !d.bdRead { + d.readNextBd() + } + if d.vd == bincVdSpecial && d.vs == bincSpNil { + return valueTypeNil + } else if d.vd == bincVdByteArray { + return valueTypeBytes + } else if d.vd == bincVdString { + return valueTypeString + } else if d.vd == bincVdArray { + return valueTypeArray + } else if d.vd == bincVdMap { + return valueTypeMap + } else { + // d.d.errorf("isContainerType: unsupported parameter: %v", vt) + } + return valueTypeUnset +} + +func (d *bincDecDriver) TryDecodeAsNil() bool { + if !d.bdRead { + d.readNextBd() + } + if d.bd == bincVdSpecial<<4|bincSpNil { + d.bdRead = false + return true + } + return false +} + +func (d *bincDecDriver) IsBuiltinType(rt uintptr) bool { + return rt == timeTypId +} + +func (d *bincDecDriver) DecodeBuiltin(rt uintptr, v interface{}) { + if !d.bdRead { + d.readNextBd() + } + if rt == timeTypId { + if d.vd != bincVdTimestamp { + d.d.errorf("Invalid d.vd. Expecting 0x%x. Received: 0x%x", bincVdTimestamp, d.vd) + return + } + tt, err := decodeTime(d.r.readx(int(d.vs))) + if err != nil { + panic(err) + } + var vt *time.Time = v.(*time.Time) + *vt = tt + d.bdRead = false + } +} + +func (d *bincDecDriver) decFloatPre(vs, defaultLen byte) { + if vs&0x8 == 0 { + d.r.readb(d.b[0:defaultLen]) + } else { + l := d.r.readn1() + if l > 8 { + d.d.errorf("At most 8 bytes used to represent float. Received: %v bytes", l) + return + } + for i := l; i < 8; i++ { + d.b[i] = 0 + } + d.r.readb(d.b[0:l]) + } +} + +func (d *bincDecDriver) decFloat() (f float64) { + //if true { f = math.Float64frombits(bigen.Uint64(d.r.readx(8))); break; } + if x := d.vs & 0x7; x == bincFlBin32 { + d.decFloatPre(d.vs, 4) + f = float64(math.Float32frombits(bigen.Uint32(d.b[0:4]))) + } else if x == bincFlBin64 { + d.decFloatPre(d.vs, 8) + f = math.Float64frombits(bigen.Uint64(d.b[0:8])) + } else { + d.d.errorf("only float32 and float64 are supported. d.vd: 0x%x, d.vs: 0x%x", d.vd, d.vs) + return + } + return +} + +func (d *bincDecDriver) decUint() (v uint64) { + // need to inline the code (interface conversion and type assertion expensive) + switch d.vs { + case 0: + v = uint64(d.r.readn1()) + case 1: + d.r.readb(d.b[6:8]) + v = uint64(bigen.Uint16(d.b[6:8])) + case 2: + d.b[4] = 0 + d.r.readb(d.b[5:8]) + v = uint64(bigen.Uint32(d.b[4:8])) + case 3: + d.r.readb(d.b[4:8]) + v = uint64(bigen.Uint32(d.b[4:8])) + case 4, 5, 6: + lim := int(7 - d.vs) + d.r.readb(d.b[lim:8]) + for i := 0; i < lim; i++ { + d.b[i] = 0 + } + v = uint64(bigen.Uint64(d.b[:8])) + case 7: + d.r.readb(d.b[:8]) + v = uint64(bigen.Uint64(d.b[:8])) + default: + d.d.errorf("unsigned integers with greater than 64 bits of precision not supported") + return + } + return +} + +func (d *bincDecDriver) decCheckInteger() (ui uint64, neg bool) { + if !d.bdRead { + d.readNextBd() + } + vd, vs := d.vd, d.vs + if vd == bincVdPosInt { + ui = d.decUint() + } else if vd == bincVdNegInt { + ui = d.decUint() + neg = true + } else if vd == bincVdSmallInt { + ui = uint64(d.vs) + 1 + } else if vd == bincVdSpecial { + if vs == bincSpZero { + //i = 0 + } else if vs == bincSpNegOne { + neg = true + ui = 1 + } else { + d.d.errorf("numeric decode fails for special value: d.vs: 0x%x", d.vs) + return + } + } else { + d.d.errorf("number can only be decoded from uint or int values. d.bd: 0x%x, d.vd: 0x%x", d.bd, d.vd) + return + } + return +} + +func (d *bincDecDriver) DecodeInt(bitsize uint8) (i int64) { + ui, neg := d.decCheckInteger() + i, overflow := chkOvf.SignedInt(ui) + if overflow { + d.d.errorf("simple: overflow converting %v to signed integer", ui) + return + } + if neg { + i = -i + } + if chkOvf.Int(i, bitsize) { + d.d.errorf("binc: overflow integer: %v for num bits: %v", i, bitsize) + return + } + d.bdRead = false + return +} + +func (d *bincDecDriver) DecodeUint(bitsize uint8) (ui uint64) { + ui, neg := d.decCheckInteger() + if neg { + d.d.errorf("Assigning negative signed value to unsigned type") + return + } + if chkOvf.Uint(ui, bitsize) { + d.d.errorf("binc: overflow integer: %v", ui) + return + } + d.bdRead = false + return +} + +func (d *bincDecDriver) DecodeFloat(chkOverflow32 bool) (f float64) { + if !d.bdRead { + d.readNextBd() + } + vd, vs := d.vd, d.vs + if vd == bincVdSpecial { + d.bdRead = false + if vs == bincSpNan { + return math.NaN() + } else if vs == bincSpPosInf { + return math.Inf(1) + } else if vs == bincSpZeroFloat || vs == bincSpZero { + return + } else if vs == bincSpNegInf { + return math.Inf(-1) + } else { + d.d.errorf("Invalid d.vs decoding float where d.vd=bincVdSpecial: %v", d.vs) + return + } + } else if vd == bincVdFloat { + f = d.decFloat() + } else { + f = float64(d.DecodeInt(64)) + } + if chkOverflow32 && chkOvf.Float32(f) { + d.d.errorf("binc: float32 overflow: %v", f) + return + } + d.bdRead = false + return +} + +// bool can be decoded from bool only (single byte). +func (d *bincDecDriver) DecodeBool() (b bool) { + if !d.bdRead { + d.readNextBd() + } + if bd := d.bd; bd == (bincVdSpecial | bincSpFalse) { + // b = false + } else if bd == (bincVdSpecial | bincSpTrue) { + b = true + } else { + d.d.errorf("Invalid single-byte value for bool: %s: %x", msgBadDesc, d.bd) + return + } + d.bdRead = false + return +} + +func (d *bincDecDriver) ReadMapStart() (length int) { + if !d.bdRead { + d.readNextBd() + } + if d.vd != bincVdMap { + d.d.errorf("Invalid d.vd for map. Expecting 0x%x. Got: 0x%x", bincVdMap, d.vd) + return + } + length = d.decLen() + d.bdRead = false + return +} + +func (d *bincDecDriver) ReadArrayStart() (length int) { + if !d.bdRead { + d.readNextBd() + } + if d.vd != bincVdArray { + d.d.errorf("Invalid d.vd for array. Expecting 0x%x. Got: 0x%x", bincVdArray, d.vd) + return + } + length = d.decLen() + d.bdRead = false + return +} + +func (d *bincDecDriver) decLen() int { + if d.vs > 3 { + return int(d.vs - 4) + } + return int(d.decLenNumber()) +} + +func (d *bincDecDriver) decLenNumber() (v uint64) { + if x := d.vs; x == 0 { + v = uint64(d.r.readn1()) + } else if x == 1 { + d.r.readb(d.b[6:8]) + v = uint64(bigen.Uint16(d.b[6:8])) + } else if x == 2 { + d.r.readb(d.b[4:8]) + v = uint64(bigen.Uint32(d.b[4:8])) + } else { + d.r.readb(d.b[:8]) + v = bigen.Uint64(d.b[:8]) + } + return +} + +func (d *bincDecDriver) decStringAndBytes(bs []byte, withString, zerocopy bool) (bs2 []byte, s string) { + if !d.bdRead { + d.readNextBd() + } + if d.bd == bincVdSpecial<<4|bincSpNil { + d.bdRead = false + return + } + var slen int = -1 + // var ok bool + switch d.vd { + case bincVdString, bincVdByteArray: + slen = d.decLen() + if zerocopy { + if d.br { + bs2 = d.r.readx(slen) + } else if len(bs) == 0 { + bs2 = decByteSlice(d.r, slen, d.d.h.MaxInitLen, d.b[:]) + } else { + bs2 = decByteSlice(d.r, slen, d.d.h.MaxInitLen, bs) + } + } else { + bs2 = decByteSlice(d.r, slen, d.d.h.MaxInitLen, bs) + } + if withString { + s = string(bs2) + } + case bincVdSymbol: + // zerocopy doesn't apply for symbols, + // as the values must be stored in a table for later use. + // + //from vs: extract numSymbolBytes, containsStringVal, strLenPrecision, + //extract symbol + //if containsStringVal, read it and put in map + //else look in map for string value + var symbol uint16 + vs := d.vs + if vs&0x8 == 0 { + symbol = uint16(d.r.readn1()) + } else { + symbol = uint16(bigen.Uint16(d.r.readx(2))) + } + if d.s == nil { + d.s = make([]bincDecSymbol, 0, 16) + } + + if vs&0x4 == 0 { + for i := range d.s { + j := &d.s[i] + if j.i == symbol { + bs2 = j.b + if withString { + if j.s == "" && bs2 != nil { + j.s = string(bs2) + } + s = j.s + } + break + } + } + } else { + switch vs & 0x3 { + case 0: + slen = int(d.r.readn1()) + case 1: + slen = int(bigen.Uint16(d.r.readx(2))) + case 2: + slen = int(bigen.Uint32(d.r.readx(4))) + case 3: + slen = int(bigen.Uint64(d.r.readx(8))) + } + // since using symbols, do not store any part of + // the parameter bs in the map, as it might be a shared buffer. + // bs2 = decByteSlice(d.r, slen, bs) + bs2 = decByteSlice(d.r, slen, d.d.h.MaxInitLen, nil) + if withString { + s = string(bs2) + } + d.s = append(d.s, bincDecSymbol{i: symbol, s: s, b: bs2}) + } + default: + d.d.errorf("Invalid d.vd. Expecting string:0x%x, bytearray:0x%x or symbol: 0x%x. Got: 0x%x", + bincVdString, bincVdByteArray, bincVdSymbol, d.vd) + return + } + d.bdRead = false + return +} + +func (d *bincDecDriver) DecodeString() (s string) { + // DecodeBytes does not accommodate symbols, whose impl stores string version in map. + // Use decStringAndBytes directly. + // return string(d.DecodeBytes(d.b[:], true, true)) + _, s = d.decStringAndBytes(d.b[:], true, true) + return +} + +func (d *bincDecDriver) DecodeStringAsBytes() (s []byte) { + s, _ = d.decStringAndBytes(d.b[:], false, true) + return +} + +func (d *bincDecDriver) DecodeBytes(bs []byte, zerocopy bool) (bsOut []byte) { + if !d.bdRead { + d.readNextBd() + } + if d.bd == bincVdSpecial<<4|bincSpNil { + d.bdRead = false + return nil + } + var clen int + if d.vd == bincVdString || d.vd == bincVdByteArray { + clen = d.decLen() + } else { + d.d.errorf("Invalid d.vd for bytes. Expecting string:0x%x or bytearray:0x%x. Got: 0x%x", + bincVdString, bincVdByteArray, d.vd) + return + } + d.bdRead = false + if zerocopy { + if d.br { + return d.r.readx(clen) + } else if len(bs) == 0 { + bs = d.b[:] + } + } + return decByteSlice(d.r, clen, d.d.h.MaxInitLen, bs) +} + +func (d *bincDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) { + if xtag > 0xff { + d.d.errorf("decodeExt: tag must be <= 0xff; got: %v", xtag) + return + } + realxtag1, xbs := d.decodeExtV(ext != nil, uint8(xtag)) + realxtag = uint64(realxtag1) + if ext == nil { + re := rv.(*RawExt) + re.Tag = realxtag + re.Data = detachZeroCopyBytes(d.br, re.Data, xbs) + } else { + ext.ReadExt(rv, xbs) + } + return +} + +func (d *bincDecDriver) decodeExtV(verifyTag bool, tag byte) (xtag byte, xbs []byte) { + if !d.bdRead { + d.readNextBd() + } + if d.vd == bincVdCustomExt { + l := d.decLen() + xtag = d.r.readn1() + if verifyTag && xtag != tag { + d.d.errorf("Wrong extension tag. Got %b. Expecting: %v", xtag, tag) + return + } + xbs = d.r.readx(l) + } else if d.vd == bincVdByteArray { + xbs = d.DecodeBytes(nil, true) + } else { + d.d.errorf("Invalid d.vd for extensions (Expecting extensions or byte array). Got: 0x%x", d.vd) + return + } + d.bdRead = false + return +} + +func (d *bincDecDriver) DecodeNaked() { + if !d.bdRead { + d.readNextBd() + } + + n := d.d.n + var decodeFurther bool + + switch d.vd { + case bincVdSpecial: + switch d.vs { + case bincSpNil: + n.v = valueTypeNil + case bincSpFalse: + n.v = valueTypeBool + n.b = false + case bincSpTrue: + n.v = valueTypeBool + n.b = true + case bincSpNan: + n.v = valueTypeFloat + n.f = math.NaN() + case bincSpPosInf: + n.v = valueTypeFloat + n.f = math.Inf(1) + case bincSpNegInf: + n.v = valueTypeFloat + n.f = math.Inf(-1) + case bincSpZeroFloat: + n.v = valueTypeFloat + n.f = float64(0) + case bincSpZero: + n.v = valueTypeUint + n.u = uint64(0) // int8(0) + case bincSpNegOne: + n.v = valueTypeInt + n.i = int64(-1) // int8(-1) + default: + d.d.errorf("decodeNaked: Unrecognized special value 0x%x", d.vs) + } + case bincVdSmallInt: + n.v = valueTypeUint + n.u = uint64(int8(d.vs)) + 1 // int8(d.vs) + 1 + case bincVdPosInt: + n.v = valueTypeUint + n.u = d.decUint() + case bincVdNegInt: + n.v = valueTypeInt + n.i = -(int64(d.decUint())) + case bincVdFloat: + n.v = valueTypeFloat + n.f = d.decFloat() + case bincVdSymbol: + n.v = valueTypeSymbol + n.s = d.DecodeString() + case bincVdString: + n.v = valueTypeString + n.s = d.DecodeString() + case bincVdByteArray: + n.v = valueTypeBytes + n.l = d.DecodeBytes(nil, false) + case bincVdTimestamp: + n.v = valueTypeTimestamp + tt, err := decodeTime(d.r.readx(int(d.vs))) + if err != nil { + panic(err) + } + n.t = tt + case bincVdCustomExt: + n.v = valueTypeExt + l := d.decLen() + n.u = uint64(d.r.readn1()) + n.l = d.r.readx(l) + case bincVdArray: + n.v = valueTypeArray + decodeFurther = true + case bincVdMap: + n.v = valueTypeMap + decodeFurther = true + default: + d.d.errorf("decodeNaked: Unrecognized d.vd: 0x%x", d.vd) + } + + if !decodeFurther { + d.bdRead = false + } + if n.v == valueTypeUint && d.h.SignedInteger { + n.v = valueTypeInt + n.i = int64(n.u) + } + return +} + +//------------------------------------ + +//BincHandle is a Handle for the Binc Schema-Free Encoding Format +//defined at https://github.com/ugorji/binc . +// +//BincHandle currently supports all Binc features with the following EXCEPTIONS: +// - only integers up to 64 bits of precision are supported. +// big integers are unsupported. +// - Only IEEE 754 binary32 and binary64 floats are supported (ie Go float32 and float64 types). +// extended precision and decimal IEEE 754 floats are unsupported. +// - Only UTF-8 strings supported. +// Unicode_Other Binc types (UTF16, UTF32) are currently unsupported. +// +//Note that these EXCEPTIONS are temporary and full support is possible and may happen soon. +type BincHandle struct { + BasicHandle + binaryEncodingType + noElemSeparators +} + +func (h *BincHandle) SetBytesExt(rt reflect.Type, tag uint64, ext BytesExt) (err error) { + return h.SetExt(rt, tag, &setExtWrapper{b: ext}) +} + +func (h *BincHandle) newEncDriver(e *Encoder) encDriver { + return &bincEncDriver{e: e, w: e.w} +} + +func (h *BincHandle) newDecDriver(d *Decoder) decDriver { + return &bincDecDriver{d: d, h: h, r: d.r, br: d.bytes} +} + +func (_ *BincHandle) IsBuiltinType(rt uintptr) bool { + return rt == timeTypId +} + +func (e *bincEncDriver) reset() { + e.w = e.e.w + e.s = 0 + e.m = nil +} + +func (d *bincDecDriver) reset() { + d.r, d.br = d.d.r, d.d.bytes + d.s = nil + d.bd, d.bdRead, d.vd, d.vs = 0, false, 0, 0 +} + +var _ decDriver = (*bincDecDriver)(nil) +var _ encDriver = (*bincEncDriver)(nil) diff --git a/vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/cbor.go b/vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/cbor.go new file mode 100644 index 00000000..592cee85 --- /dev/null +++ b/vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/cbor.go @@ -0,0 +1,631 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +import ( + "math" + "reflect" +) + +const ( + cborMajorUint byte = iota + cborMajorNegInt + cborMajorBytes + cborMajorText + cborMajorArray + cborMajorMap + cborMajorTag + cborMajorOther +) + +const ( + cborBdFalse byte = 0xf4 + iota + cborBdTrue + cborBdNil + cborBdUndefined + cborBdExt + cborBdFloat16 + cborBdFloat32 + cborBdFloat64 +) + +const ( + cborBdIndefiniteBytes byte = 0x5f + cborBdIndefiniteString = 0x7f + cborBdIndefiniteArray = 0x9f + cborBdIndefiniteMap = 0xbf + cborBdBreak = 0xff +) + +const ( + CborStreamBytes byte = 0x5f + CborStreamString = 0x7f + CborStreamArray = 0x9f + CborStreamMap = 0xbf + CborStreamBreak = 0xff +) + +const ( + cborBaseUint byte = 0x00 + cborBaseNegInt = 0x20 + cborBaseBytes = 0x40 + cborBaseString = 0x60 + cborBaseArray = 0x80 + cborBaseMap = 0xa0 + cborBaseTag = 0xc0 + cborBaseSimple = 0xe0 +) + +// ------------------- + +type cborEncDriver struct { + noBuiltInTypes + encDriverNoopContainerWriter + // encNoSeparator + e *Encoder + w encWriter + h *CborHandle + x [8]byte +} + +func (e *cborEncDriver) EncodeNil() { + e.w.writen1(cborBdNil) +} + +func (e *cborEncDriver) EncodeBool(b bool) { + if b { + e.w.writen1(cborBdTrue) + } else { + e.w.writen1(cborBdFalse) + } +} + +func (e *cborEncDriver) EncodeFloat32(f float32) { + e.w.writen1(cborBdFloat32) + bigenHelper{e.x[:4], e.w}.writeUint32(math.Float32bits(f)) +} + +func (e *cborEncDriver) EncodeFloat64(f float64) { + e.w.writen1(cborBdFloat64) + bigenHelper{e.x[:8], e.w}.writeUint64(math.Float64bits(f)) +} + +func (e *cborEncDriver) encUint(v uint64, bd byte) { + if v <= 0x17 { + e.w.writen1(byte(v) + bd) + } else if v <= math.MaxUint8 { + e.w.writen2(bd+0x18, uint8(v)) + } else if v <= math.MaxUint16 { + e.w.writen1(bd + 0x19) + bigenHelper{e.x[:2], e.w}.writeUint16(uint16(v)) + } else if v <= math.MaxUint32 { + e.w.writen1(bd + 0x1a) + bigenHelper{e.x[:4], e.w}.writeUint32(uint32(v)) + } else { // if v <= math.MaxUint64 { + e.w.writen1(bd + 0x1b) + bigenHelper{e.x[:8], e.w}.writeUint64(v) + } +} + +func (e *cborEncDriver) EncodeInt(v int64) { + if v < 0 { + e.encUint(uint64(-1-v), cborBaseNegInt) + } else { + e.encUint(uint64(v), cborBaseUint) + } +} + +func (e *cborEncDriver) EncodeUint(v uint64) { + e.encUint(v, cborBaseUint) +} + +func (e *cborEncDriver) encLen(bd byte, length int) { + e.encUint(uint64(length), bd) +} + +func (e *cborEncDriver) EncodeExt(rv interface{}, xtag uint64, ext Ext, en *Encoder) { + e.encUint(uint64(xtag), cborBaseTag) + if v := ext.ConvertExt(rv); v == nil { + e.EncodeNil() + } else { + en.encode(v) + } +} + +func (e *cborEncDriver) EncodeRawExt(re *RawExt, en *Encoder) { + e.encUint(uint64(re.Tag), cborBaseTag) + if false && re.Data != nil { + en.encode(re.Data) + } else if re.Value != nil { + en.encode(re.Value) + } else { + e.EncodeNil() + } +} + +func (e *cborEncDriver) WriteArrayStart(length int) { + if e.h.IndefiniteLength { + e.w.writen1(cborBdIndefiniteArray) + } else { + e.encLen(cborBaseArray, length) + } +} + +func (e *cborEncDriver) WriteMapStart(length int) { + if e.h.IndefiniteLength { + e.w.writen1(cborBdIndefiniteMap) + } else { + e.encLen(cborBaseMap, length) + } +} + +func (e *cborEncDriver) WriteMapEnd() { + if e.h.IndefiniteLength { + e.w.writen1(cborBdBreak) + } +} + +func (e *cborEncDriver) WriteArrayEnd() { + if e.h.IndefiniteLength { + e.w.writen1(cborBdBreak) + } +} + +func (e *cborEncDriver) EncodeString(c charEncoding, v string) { + e.encLen(cborBaseString, len(v)) + e.w.writestr(v) +} + +func (e *cborEncDriver) EncodeSymbol(v string) { + e.EncodeString(c_UTF8, v) +} + +func (e *cborEncDriver) EncodeStringBytes(c charEncoding, v []byte) { + if c == c_RAW { + e.encLen(cborBaseBytes, len(v)) + } else { + e.encLen(cborBaseString, len(v)) + } + e.w.writeb(v) +} + +// ---------------------- + +type cborDecDriver struct { + d *Decoder + h *CborHandle + r decReader + b [scratchByteArrayLen]byte + br bool // bytes reader + bdRead bool + bd byte + noBuiltInTypes + // decNoSeparator + decDriverNoopContainerReader +} + +func (d *cborDecDriver) readNextBd() { + d.bd = d.r.readn1() + d.bdRead = true +} + +func (d *cborDecDriver) uncacheRead() { + if d.bdRead { + d.r.unreadn1() + d.bdRead = false + } +} + +func (d *cborDecDriver) ContainerType() (vt valueType) { + if !d.bdRead { + d.readNextBd() + } + if d.bd == cborBdNil { + return valueTypeNil + } else if d.bd == cborBdIndefiniteBytes || (d.bd >= cborBaseBytes && d.bd < cborBaseString) { + return valueTypeBytes + } else if d.bd == cborBdIndefiniteString || (d.bd >= cborBaseString && d.bd < cborBaseArray) { + return valueTypeString + } else if d.bd == cborBdIndefiniteArray || (d.bd >= cborBaseArray && d.bd < cborBaseMap) { + return valueTypeArray + } else if d.bd == cborBdIndefiniteMap || (d.bd >= cborBaseMap && d.bd < cborBaseTag) { + return valueTypeMap + } else { + // d.d.errorf("isContainerType: unsupported parameter: %v", vt) + } + return valueTypeUnset +} + +func (d *cborDecDriver) TryDecodeAsNil() bool { + if !d.bdRead { + d.readNextBd() + } + // treat Nil and Undefined as nil values + if d.bd == cborBdNil || d.bd == cborBdUndefined { + d.bdRead = false + return true + } + return false +} + +func (d *cborDecDriver) CheckBreak() bool { + if !d.bdRead { + d.readNextBd() + } + if d.bd == cborBdBreak { + d.bdRead = false + return true + } + return false +} + +func (d *cborDecDriver) decUint() (ui uint64) { + v := d.bd & 0x1f + if v <= 0x17 { + ui = uint64(v) + } else { + if v == 0x18 { + ui = uint64(d.r.readn1()) + } else if v == 0x19 { + ui = uint64(bigen.Uint16(d.r.readx(2))) + } else if v == 0x1a { + ui = uint64(bigen.Uint32(d.r.readx(4))) + } else if v == 0x1b { + ui = uint64(bigen.Uint64(d.r.readx(8))) + } else { + d.d.errorf("decUint: Invalid descriptor: %v", d.bd) + return + } + } + return +} + +func (d *cborDecDriver) decCheckInteger() (neg bool) { + if !d.bdRead { + d.readNextBd() + } + major := d.bd >> 5 + if major == cborMajorUint { + } else if major == cborMajorNegInt { + neg = true + } else { + d.d.errorf("invalid major: %v (bd: %v)", major, d.bd) + return + } + return +} + +func (d *cborDecDriver) DecodeInt(bitsize uint8) (i int64) { + neg := d.decCheckInteger() + ui := d.decUint() + // check if this number can be converted to an int without overflow + var overflow bool + if neg { + if i, overflow = chkOvf.SignedInt(ui + 1); overflow { + d.d.errorf("cbor: overflow converting %v to signed integer", ui+1) + return + } + i = -i + } else { + if i, overflow = chkOvf.SignedInt(ui); overflow { + d.d.errorf("cbor: overflow converting %v to signed integer", ui) + return + } + } + if chkOvf.Int(i, bitsize) { + d.d.errorf("cbor: overflow integer: %v", i) + return + } + d.bdRead = false + return +} + +func (d *cborDecDriver) DecodeUint(bitsize uint8) (ui uint64) { + if d.decCheckInteger() { + d.d.errorf("Assigning negative signed value to unsigned type") + return + } + ui = d.decUint() + if chkOvf.Uint(ui, bitsize) { + d.d.errorf("cbor: overflow integer: %v", ui) + return + } + d.bdRead = false + return +} + +func (d *cborDecDriver) DecodeFloat(chkOverflow32 bool) (f float64) { + if !d.bdRead { + d.readNextBd() + } + if bd := d.bd; bd == cborBdFloat16 { + f = float64(math.Float32frombits(halfFloatToFloatBits(bigen.Uint16(d.r.readx(2))))) + } else if bd == cborBdFloat32 { + f = float64(math.Float32frombits(bigen.Uint32(d.r.readx(4)))) + } else if bd == cborBdFloat64 { + f = math.Float64frombits(bigen.Uint64(d.r.readx(8))) + } else if bd >= cborBaseUint && bd < cborBaseBytes { + f = float64(d.DecodeInt(64)) + } else { + d.d.errorf("Float only valid from float16/32/64: Invalid descriptor: %v", bd) + return + } + if chkOverflow32 && chkOvf.Float32(f) { + d.d.errorf("cbor: float32 overflow: %v", f) + return + } + d.bdRead = false + return +} + +// bool can be decoded from bool only (single byte). +func (d *cborDecDriver) DecodeBool() (b bool) { + if !d.bdRead { + d.readNextBd() + } + if bd := d.bd; bd == cborBdTrue { + b = true + } else if bd == cborBdFalse { + } else { + d.d.errorf("Invalid single-byte value for bool: %s: %x", msgBadDesc, d.bd) + return + } + d.bdRead = false + return +} + +func (d *cborDecDriver) ReadMapStart() (length int) { + if !d.bdRead { + d.readNextBd() + } + d.bdRead = false + if d.bd == cborBdIndefiniteMap { + return -1 + } + return d.decLen() +} + +func (d *cborDecDriver) ReadArrayStart() (length int) { + if !d.bdRead { + d.readNextBd() + } + d.bdRead = false + if d.bd == cborBdIndefiniteArray { + return -1 + } + return d.decLen() +} + +func (d *cborDecDriver) decLen() int { + return int(d.decUint()) +} + +func (d *cborDecDriver) decAppendIndefiniteBytes(bs []byte) []byte { + d.bdRead = false + for { + if d.CheckBreak() { + break + } + if major := d.bd >> 5; major != cborMajorBytes && major != cborMajorText { + d.d.errorf("cbor: expect bytes or string major type in indefinite string/bytes; got: %v, byte: %v", major, d.bd) + return nil + } + n := d.decLen() + oldLen := len(bs) + newLen := oldLen + n + if newLen > cap(bs) { + bs2 := make([]byte, newLen, 2*cap(bs)+n) + copy(bs2, bs) + bs = bs2 + } else { + bs = bs[:newLen] + } + d.r.readb(bs[oldLen:newLen]) + // bs = append(bs, d.r.readn()...) + d.bdRead = false + } + d.bdRead = false + return bs +} + +func (d *cborDecDriver) DecodeBytes(bs []byte, zerocopy bool) (bsOut []byte) { + if !d.bdRead { + d.readNextBd() + } + if d.bd == cborBdNil || d.bd == cborBdUndefined { + d.bdRead = false + return nil + } + if d.bd == cborBdIndefiniteBytes || d.bd == cborBdIndefiniteString { + if bs == nil { + return d.decAppendIndefiniteBytes(nil) + } + return d.decAppendIndefiniteBytes(bs[:0]) + } + clen := d.decLen() + d.bdRead = false + if zerocopy { + if d.br { + return d.r.readx(clen) + } else if len(bs) == 0 { + bs = d.b[:] + } + } + return decByteSlice(d.r, clen, d.d.h.MaxInitLen, bs) +} + +func (d *cborDecDriver) DecodeString() (s string) { + return string(d.DecodeBytes(d.b[:], true)) +} + +func (d *cborDecDriver) DecodeStringAsBytes() (s []byte) { + return d.DecodeBytes(d.b[:], true) +} + +func (d *cborDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) { + if !d.bdRead { + d.readNextBd() + } + u := d.decUint() + d.bdRead = false + realxtag = u + if ext == nil { + re := rv.(*RawExt) + re.Tag = realxtag + d.d.decode(&re.Value) + } else if xtag != realxtag { + d.d.errorf("Wrong extension tag. Got %b. Expecting: %v", realxtag, xtag) + return + } else { + var v interface{} + d.d.decode(&v) + ext.UpdateExt(rv, v) + } + d.bdRead = false + return +} + +func (d *cborDecDriver) DecodeNaked() { + if !d.bdRead { + d.readNextBd() + } + + n := d.d.n + var decodeFurther bool + + switch d.bd { + case cborBdNil: + n.v = valueTypeNil + case cborBdFalse: + n.v = valueTypeBool + n.b = false + case cborBdTrue: + n.v = valueTypeBool + n.b = true + case cborBdFloat16, cborBdFloat32: + n.v = valueTypeFloat + n.f = d.DecodeFloat(true) + case cborBdFloat64: + n.v = valueTypeFloat + n.f = d.DecodeFloat(false) + case cborBdIndefiniteBytes: + n.v = valueTypeBytes + n.l = d.DecodeBytes(nil, false) + case cborBdIndefiniteString: + n.v = valueTypeString + n.s = d.DecodeString() + case cborBdIndefiniteArray: + n.v = valueTypeArray + decodeFurther = true + case cborBdIndefiniteMap: + n.v = valueTypeMap + decodeFurther = true + default: + switch { + case d.bd >= cborBaseUint && d.bd < cborBaseNegInt: + if d.h.SignedInteger { + n.v = valueTypeInt + n.i = d.DecodeInt(64) + } else { + n.v = valueTypeUint + n.u = d.DecodeUint(64) + } + case d.bd >= cborBaseNegInt && d.bd < cborBaseBytes: + n.v = valueTypeInt + n.i = d.DecodeInt(64) + case d.bd >= cborBaseBytes && d.bd < cborBaseString: + n.v = valueTypeBytes + n.l = d.DecodeBytes(nil, false) + case d.bd >= cborBaseString && d.bd < cborBaseArray: + n.v = valueTypeString + n.s = d.DecodeString() + case d.bd >= cborBaseArray && d.bd < cborBaseMap: + n.v = valueTypeArray + decodeFurther = true + case d.bd >= cborBaseMap && d.bd < cborBaseTag: + n.v = valueTypeMap + decodeFurther = true + case d.bd >= cborBaseTag && d.bd < cborBaseSimple: + n.v = valueTypeExt + n.u = d.decUint() + n.l = nil + // d.bdRead = false + // d.d.decode(&re.Value) // handled by decode itself. + // decodeFurther = true + default: + d.d.errorf("decodeNaked: Unrecognized d.bd: 0x%x", d.bd) + return + } + } + + if !decodeFurther { + d.bdRead = false + } + return +} + +// ------------------------- + +// CborHandle is a Handle for the CBOR encoding format, +// defined at http://tools.ietf.org/html/rfc7049 and documented further at http://cbor.io . +// +// CBOR is comprehensively supported, including support for: +// - indefinite-length arrays/maps/bytes/strings +// - (extension) tags in range 0..0xffff (0 .. 65535) +// - half, single and double-precision floats +// - all numbers (1, 2, 4 and 8-byte signed and unsigned integers) +// - nil, true, false, ... +// - arrays and maps, bytes and text strings +// +// None of the optional extensions (with tags) defined in the spec are supported out-of-the-box. +// Users can implement them as needed (using SetExt), including spec-documented ones: +// - timestamp, BigNum, BigFloat, Decimals, Encoded Text (e.g. URL, regexp, base64, MIME Message), etc. +// +// To encode with indefinite lengths (streaming), users will use +// (Must)Encode methods of *Encoder, along with writing CborStreamXXX constants. +// +// For example, to encode "one-byte" as an indefinite length string: +// var buf bytes.Buffer +// e := NewEncoder(&buf, new(CborHandle)) +// buf.WriteByte(CborStreamString) +// e.MustEncode("one-") +// e.MustEncode("byte") +// buf.WriteByte(CborStreamBreak) +// encodedBytes := buf.Bytes() +// var vv interface{} +// NewDecoderBytes(buf.Bytes(), new(CborHandle)).MustDecode(&vv) +// // Now, vv contains the same string "one-byte" +// +type CborHandle struct { + binaryEncodingType + noElemSeparators + BasicHandle + + // IndefiniteLength=true, means that we encode using indefinitelength + IndefiniteLength bool +} + +func (h *CborHandle) SetInterfaceExt(rt reflect.Type, tag uint64, ext InterfaceExt) (err error) { + return h.SetExt(rt, tag, &setExtWrapper{i: ext}) +} + +func (h *CborHandle) newEncDriver(e *Encoder) encDriver { + return &cborEncDriver{e: e, w: e.w, h: h} +} + +func (h *CborHandle) newDecDriver(d *Decoder) decDriver { + return &cborDecDriver{d: d, h: h, r: d.r, br: d.bytes} +} + +func (e *cborEncDriver) reset() { + e.w = e.e.w +} + +func (d *cborDecDriver) reset() { + d.r, d.br = d.d.r, d.d.bytes + d.bd, d.bdRead = 0, false +} + +var _ decDriver = (*cborDecDriver)(nil) +var _ encDriver = (*cborEncDriver)(nil) diff --git a/vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/decode.go b/vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/decode.go new file mode 100644 index 00000000..246533f3 --- /dev/null +++ b/vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/decode.go @@ -0,0 +1,2520 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +import ( + "encoding" + "errors" + "fmt" + "io" + "reflect" + "sync" + "time" +) + +// Some tagging information for error messages. +const ( + msgBadDesc = "Unrecognized descriptor byte" + msgDecCannotExpandArr = "cannot expand go array from %v to stream length: %v" +) + +var ( + onlyMapOrArrayCanDecodeIntoStructErr = errors.New("only encoded map or array can be decoded into a struct") + cannotDecodeIntoNilErr = errors.New("cannot decode into nil") + + decUnreadByteNothingToReadErr = errors.New("cannot unread - nothing has been read") + decUnreadByteLastByteNotReadErr = errors.New("cannot unread - last byte has not been read") + decUnreadByteUnknownErr = errors.New("cannot unread - reason unknown") +) + +// decReader abstracts the reading source, allowing implementations that can +// read from an io.Reader or directly off a byte slice with zero-copying. +type decReader interface { + unreadn1() + + // readx will use the implementation scratch buffer if possible i.e. n < len(scratchbuf), OR + // just return a view of the []byte being decoded from. + // Ensure you call detachZeroCopyBytes later if this needs to be sent outside codec control. + readx(n int) []byte + readb([]byte) + readn1() uint8 + readn3() (uint8, uint8, uint8) + readn4() (uint8, uint8, uint8, uint8) + // readn1eof() (v uint8, eof bool) + numread() int // number of bytes read + track() + stopTrack() []byte + + // skip will skip any byte that matches, and return the first non-matching byte + skip(accept *bitset256) (token byte) + // readTo will read any byte that matches, stopping once no-longer matching. + readTo(in []byte, accept *bitset256) (out []byte) + // readUntil will read, only stopping once it matches the 'stop' byte. + readUntil(in []byte, stop byte) (out []byte) +} + +// type decReaderByteScanner interface { +// io.Reader +// io.ByteScanner +// } + +type decDriver interface { + // this will check if the next token is a break. + CheckBreak() bool + // Note: TryDecodeAsNil should be careful not to share any temporary []byte with + // the rest of the decDriver. This is because sometimes, we optimize by holding onto + // a transient []byte, and ensuring the only other call we make to the decDriver + // during that time is maybe a TryDecodeAsNil() call. + TryDecodeAsNil() bool + // vt is one of: Bytes, String, Nil, Slice or Map. Return unSet if not known. + ContainerType() (vt valueType) + // IsBuiltinType(rt uintptr) bool + DecodeBuiltin(rt uintptr, v interface{}) + + // DecodeNaked will decode primitives (number, bool, string, []byte) and RawExt. + // For maps and arrays, it will not do the decoding in-band, but will signal + // the decoder, so that is done later, by setting the decNaked.valueType field. + // + // Note: Numbers are decoded as int64, uint64, float64 only (no smaller sized number types). + // for extensions, DecodeNaked must read the tag and the []byte if it exists. + // if the []byte is not read, then kInterfaceNaked will treat it as a Handle + // that stores the subsequent value in-band, and complete reading the RawExt. + // + // extensions should also use readx to decode them, for efficiency. + // kInterface will extract the detached byte slice if it has to pass it outside its realm. + DecodeNaked() + DecodeInt(bitsize uint8) (i int64) + DecodeUint(bitsize uint8) (ui uint64) + DecodeFloat(chkOverflow32 bool) (f float64) + DecodeBool() (b bool) + // DecodeString can also decode symbols. + // It looks redundant as DecodeBytes is available. + // However, some codecs (e.g. binc) support symbols and can + // return a pre-stored string value, meaning that it can bypass + // the cost of []byte->string conversion. + DecodeString() (s string) + DecodeStringAsBytes() (v []byte) + + // DecodeBytes may be called directly, without going through reflection. + // Consequently, it must be designed to handle possible nil. + DecodeBytes(bs []byte, zerocopy bool) (bsOut []byte) + // DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut []byte) + + // decodeExt will decode into a *RawExt or into an extension. + DecodeExt(v interface{}, xtag uint64, ext Ext) (realxtag uint64) + // decodeExt(verifyTag bool, tag byte) (xtag byte, xbs []byte) + ReadArrayStart() int + ReadArrayElem() + ReadArrayEnd() + ReadMapStart() int + ReadMapElemKey() + ReadMapElemValue() + ReadMapEnd() + + reset() + uncacheRead() +} + +// type decNoSeparator struct {} +// func (_ decNoSeparator) ReadEnd() {} + +type decDriverNoopContainerReader struct{} + +func (_ decDriverNoopContainerReader) ReadArrayStart() (v int) { return } +func (_ decDriverNoopContainerReader) ReadArrayElem() {} +func (_ decDriverNoopContainerReader) ReadArrayEnd() {} +func (_ decDriverNoopContainerReader) ReadMapStart() (v int) { return } +func (_ decDriverNoopContainerReader) ReadMapElemKey() {} +func (_ decDriverNoopContainerReader) ReadMapElemValue() {} +func (_ decDriverNoopContainerReader) ReadMapEnd() {} +func (_ decDriverNoopContainerReader) CheckBreak() (v bool) { return } + +// func (_ decNoSeparator) uncacheRead() {} + +type DecodeOptions struct { + // MapType specifies type to use during schema-less decoding of a map in the stream. + // If nil, we use map[interface{}]interface{} + MapType reflect.Type + + // SliceType specifies type to use during schema-less decoding of an array in the stream. + // If nil, we use []interface{} + SliceType reflect.Type + + // MaxInitLen defines the maxinum initial length that we "make" a collection (string, slice, map, chan). + // If 0 or negative, we default to a sensible value based on the size of an element in the collection. + // + // For example, when decoding, a stream may say that it has 2^64 elements. + // We should not auto-matically provision a slice of that length, to prevent Out-Of-Memory crash. + // Instead, we provision up to MaxInitLen, fill that up, and start appending after that. + MaxInitLen int + + // If ErrorIfNoField, return an error when decoding a map + // from a codec stream into a struct, and no matching struct field is found. + ErrorIfNoField bool + + // If ErrorIfNoArrayExpand, return an error when decoding a slice/array that cannot be expanded. + // For example, the stream contains an array of 8 items, but you are decoding into a [4]T array, + // or you are decoding into a slice of length 4 which is non-addressable (and so cannot be set). + ErrorIfNoArrayExpand bool + + // If SignedInteger, use the int64 during schema-less decoding of unsigned values (not uint64). + SignedInteger bool + + // MapValueReset controls how we decode into a map value. + // + // By default, we MAY retrieve the mapping for a key, and then decode into that. + // However, especially with big maps, that retrieval may be expensive and unnecessary + // if the stream already contains all that is necessary to recreate the value. + // + // If true, we will never retrieve the previous mapping, + // but rather decode into a new value and set that in the map. + // + // If false, we will retrieve the previous mapping if necessary e.g. + // the previous mapping is a pointer, or is a struct or array with pre-set state, + // or is an interface. + MapValueReset bool + + // SliceElementReset: on decoding a slice, reset the element to a zero value first. + // + // concern: if the slice already contained some garbage, we will decode into that garbage. + SliceElementReset bool + + // InterfaceReset controls how we decode into an interface. + // + // By default, when we see a field that is an interface{...}, + // or a map with interface{...} value, we will attempt decoding into the + // "contained" value. + // + // However, this prevents us from reading a string into an interface{} + // that formerly contained a number. + // + // If true, we will decode into a new "blank" value, and set that in the interface. + // If false, we will decode into whatever is contained in the interface. + InterfaceReset bool + + // InternString controls interning of strings during decoding. + // + // Some handles, e.g. json, typically will read map keys as strings. + // If the set of keys are finite, it may help reduce allocation to + // look them up from a map (than to allocate them afresh). + // + // Note: Handles will be smart when using the intern functionality. + // Every string should not be interned. + // An excellent use-case for interning is struct field names, + // or map keys where key type is string. + InternString bool + + // PreferArrayOverSlice controls whether to decode to an array or a slice. + // + // This only impacts decoding into a nil interface{}. + // Consequently, it has no effect on codecgen. + // + // *Note*: This only applies if using go1.5 and above, + // as it requires reflect.ArrayOf support which was absent before go1.5. + PreferArrayOverSlice bool + + // DeleteOnNilMapValue controls how to decode a nil value in the stream. + // + // If true, we will delete the mapping of the key. + // Else, just set the mapping to the zero value of the type. + DeleteOnNilMapValue bool + + // ReaderBufferSize is the size of the buffer used when reading. + // + // if > 0, we use a smart buffer internally for performance purposes. + ReaderBufferSize int +} + +// ------------------------------------ + +type bufioDecReader struct { + buf []byte + r io.Reader + + c int // cursor + n int // num read + err error + + trb bool + tr []byte + + b [8]byte +} + +func (z *bufioDecReader) reset(r io.Reader) { + z.r, z.c, z.n, z.err, z.trb = r, 0, 0, nil, false + if z.tr != nil { + z.tr = z.tr[:0] + } +} + +func (z *bufioDecReader) Read(p []byte) (n int, err error) { + if z.err != nil { + return 0, z.err + } + p0 := p + n = copy(p, z.buf[z.c:]) + z.c += n + if z.c == len(z.buf) { + z.c = 0 + } + z.n += n + if len(p) == n { + if z.c == 0 { + z.buf = z.buf[:1] + z.buf[0] = p[len(p)-1] + z.c = 1 + } + if z.trb { + z.tr = append(z.tr, p0[:n]...) + } + return + } + p = p[n:] + var n2 int + // if we are here, then z.buf is all read + if len(p) > len(z.buf) { + n2, err = decReadFull(z.r, p) + n += n2 + z.n += n2 + z.err = err + // don't return EOF if some bytes were read. keep for next time. + if n > 0 && err == io.EOF { + err = nil + } + // always keep last byte in z.buf + z.buf = z.buf[:1] + z.buf[0] = p[len(p)-1] + z.c = 1 + if z.trb { + z.tr = append(z.tr, p0[:n]...) + } + return + } + // z.c is now 0, and len(p) <= len(z.buf) + for len(p) > 0 && z.err == nil { + // println("len(p) loop starting ... ") + z.c = 0 + z.buf = z.buf[0:cap(z.buf)] + n2, err = z.r.Read(z.buf) + if n2 > 0 { + if err == io.EOF { + err = nil + } + z.buf = z.buf[:n2] + n2 = copy(p, z.buf) + z.c = n2 + n += n2 + z.n += n2 + p = p[n2:] + } + z.err = err + // println("... len(p) loop done") + } + if z.c == 0 { + z.buf = z.buf[:1] + z.buf[0] = p[len(p)-1] + z.c = 1 + } + if z.trb { + z.tr = append(z.tr, p0[:n]...) + } + return +} + +func (z *bufioDecReader) ReadByte() (b byte, err error) { + z.b[0] = 0 + _, err = z.Read(z.b[:1]) + b = z.b[0] + return +} + +func (z *bufioDecReader) UnreadByte() (err error) { + if z.err != nil { + return z.err + } + if z.c > 0 { + z.c-- + z.n-- + if z.trb { + z.tr = z.tr[:len(z.tr)-1] + } + return + } + return decUnreadByteNothingToReadErr +} + +func (z *bufioDecReader) numread() int { + return z.n +} + +func (z *bufioDecReader) readx(n int) (bs []byte) { + if n <= 0 || z.err != nil { + return + } + if z.c+n <= len(z.buf) { + bs = z.buf[z.c : z.c+n] + z.n += n + z.c += n + if z.trb { + z.tr = append(z.tr, bs...) + } + return + } + bs = make([]byte, n) + _, err := z.Read(bs) + if err != nil { + panic(err) + } + return +} + +func (z *bufioDecReader) readb(bs []byte) { + _, err := z.Read(bs) + if err != nil { + panic(err) + } +} + +// func (z *bufioDecReader) readn1eof() (b uint8, eof bool) { +// b, err := z.ReadByte() +// if err != nil { +// if err == io.EOF { +// eof = true +// } else { +// panic(err) +// } +// } +// return +// } + +func (z *bufioDecReader) readn1() (b uint8) { + b, err := z.ReadByte() + if err != nil { + panic(err) + } + return +} + +func (z *bufioDecReader) readn3() (b1, b2, b3 uint8) { + z.readb(z.b[:3]) + return z.b[0], z.b[1], z.b[2] +} + +func (z *bufioDecReader) readn4() (b1, b2, b3, b4 uint8) { + z.readb(z.b[:4]) + return z.b[0], z.b[1], z.b[2], z.b[3] +} + +func (z *bufioDecReader) search(in []byte, accept *bitset256, stop, flag uint8) (token byte, out []byte) { + // flag: 1 (skip), 2 (readTo), 4 (readUntil) + if flag == 4 { + for i := z.c; i < len(z.buf); i++ { + if z.buf[i] == stop { + token = z.buf[i] + z.n = z.n + (i - z.c) - 1 + i++ + out = z.buf[z.c:i] + if z.trb { + z.tr = append(z.tr, z.buf[z.c:i]...) + } + z.c = i + return + } + } + } else { + for i := z.c; i < len(z.buf); i++ { + if !accept.isset(z.buf[i]) { + token = z.buf[i] + z.n = z.n + (i - z.c) - 1 + if flag == 1 { + i++ + } else { + out = z.buf[z.c:i] + } + if z.trb { + z.tr = append(z.tr, z.buf[z.c:i]...) + } + z.c = i + return + } + } + } + z.n += len(z.buf) - z.c + if flag != 1 { + out = append(in, z.buf[z.c:]...) + } + if z.trb { + z.tr = append(z.tr, z.buf[z.c:]...) + } + var n2 int + if z.err != nil { + return + } + for { + z.c = 0 + z.buf = z.buf[0:cap(z.buf)] + n2, z.err = z.r.Read(z.buf) + if n2 > 0 && z.err != nil { + z.err = nil + } + z.buf = z.buf[:n2] + if flag == 4 { + for i := 0; i < n2; i++ { + if z.buf[i] == stop { + token = z.buf[i] + z.n += i - 1 + i++ + out = append(out, z.buf[z.c:i]...) + if z.trb { + z.tr = append(z.tr, z.buf[z.c:i]...) + } + z.c = i + return + } + } + } else { + for i := 0; i < n2; i++ { + if !accept.isset(z.buf[i]) { + token = z.buf[i] + z.n += i - 1 + if flag == 1 { + i++ + } + if flag != 1 { + out = append(out, z.buf[z.c:i]...) + } + if z.trb { + z.tr = append(z.tr, z.buf[z.c:i]...) + } + z.c = i + return + } + } + } + if flag != 1 { + out = append(out, z.buf[:n2]...) + } + z.n += n2 + if z.err != nil { + return + } + if z.trb { + z.tr = append(z.tr, z.buf[:n2]...) + } + } +} + +func (z *bufioDecReader) skip(accept *bitset256) (token byte) { + token, _ = z.search(nil, accept, 0, 1) + return +} + +func (z *bufioDecReader) readTo(in []byte, accept *bitset256) (out []byte) { + _, out = z.search(in, accept, 0, 2) + return +} + +func (z *bufioDecReader) readUntil(in []byte, stop byte) (out []byte) { + _, out = z.search(in, nil, stop, 4) + return +} + +func (z *bufioDecReader) unreadn1() { + err := z.UnreadByte() + if err != nil { + panic(err) + } +} + +func (z *bufioDecReader) track() { + if z.tr != nil { + z.tr = z.tr[:0] + } + z.trb = true +} + +func (z *bufioDecReader) stopTrack() (bs []byte) { + z.trb = false + return z.tr +} + +// ioDecReader is a decReader that reads off an io.Reader. +// +// It also has a fallback implementation of ByteScanner if needed. +type ioDecReader struct { + r io.Reader // the reader passed in + + rr io.Reader + br io.ByteScanner + + l byte // last byte + ls byte // last byte status. 0: init-canDoNothing, 1: canRead, 2: canUnread + b [4]byte // tiny buffer for reading single bytes + trb bool // tracking bytes turned on + + // temp byte array re-used internally for efficiency during read. + // shares buffer with Decoder, so we keep size of struct within 8 words. + x *[scratchByteArrayLen]byte + n int // num read + tr []byte // tracking bytes read +} + +func (z *ioDecReader) reset(r io.Reader) { + z.r = r + z.rr = r + z.l, z.ls, z.n, z.trb = 0, 0, 0, false + if z.tr != nil { + z.tr = z.tr[:0] + } + var ok bool + if z.br, ok = r.(io.ByteScanner); !ok { + z.br = z + z.rr = z + } +} + +func (z *ioDecReader) Read(p []byte) (n int, err error) { + if len(p) == 0 { + return + } + var firstByte bool + if z.ls == 1 { + z.ls = 2 + p[0] = z.l + if len(p) == 1 { + n = 1 + return + } + firstByte = true + p = p[1:] + } + n, err = z.r.Read(p) + if n > 0 { + if err == io.EOF && n == len(p) { + err = nil // read was successful, so postpone EOF (till next time) + } + z.l = p[n-1] + z.ls = 2 + } + if firstByte { + n++ + } + return +} + +func (z *ioDecReader) ReadByte() (c byte, err error) { + n, err := z.Read(z.b[:1]) + if n == 1 { + c = z.b[0] + if err == io.EOF { + err = nil // read was successful, so postpone EOF (till next time) + } + } + return +} + +func (z *ioDecReader) UnreadByte() (err error) { + switch z.ls { + case 2: + z.ls = 1 + case 0: + err = decUnreadByteNothingToReadErr + case 1: + err = decUnreadByteLastByteNotReadErr + default: + err = decUnreadByteUnknownErr + } + return +} + +func (z *ioDecReader) numread() int { + return z.n +} + +func (z *ioDecReader) readx(n int) (bs []byte) { + if n <= 0 { + return + } + if n < len(z.x) { + bs = z.x[:n] + } else { + bs = make([]byte, n) + } + if _, err := decReadFull(z.rr, bs); err != nil { + panic(err) + } + z.n += len(bs) + if z.trb { + z.tr = append(z.tr, bs...) + } + return +} + +func (z *ioDecReader) readb(bs []byte) { + // if len(bs) == 0 { + // return + // } + if _, err := decReadFull(z.rr, bs); err != nil { + panic(err) + } + z.n += len(bs) + if z.trb { + z.tr = append(z.tr, bs...) + } +} + +func (z *ioDecReader) readn1eof() (b uint8, eof bool) { + b, err := z.br.ReadByte() + if err == nil { + z.n++ + if z.trb { + z.tr = append(z.tr, b) + } + } else if err == io.EOF { + eof = true + } else { + panic(err) + } + return +} + +func (z *ioDecReader) readn1() (b uint8) { + var err error + if b, err = z.br.ReadByte(); err == nil { + z.n++ + if z.trb { + z.tr = append(z.tr, b) + } + return + } + panic(err) +} + +func (z *ioDecReader) readn3() (b1, b2, b3 uint8) { + z.readb(z.b[:3]) + return z.b[0], z.b[1], z.b[2] +} + +func (z *ioDecReader) readn4() (b1, b2, b3, b4 uint8) { + z.readb(z.b[:4]) + return z.b[0], z.b[1], z.b[2], z.b[3] +} + +func (z *ioDecReader) skip(accept *bitset256) (token byte) { + for { + var eof bool + token, eof = z.readn1eof() + if eof { + return + } + if accept.isset(token) { + continue + } + return + } +} + +func (z *ioDecReader) readTo(in []byte, accept *bitset256) (out []byte) { + out = in + for { + token, eof := z.readn1eof() + if eof { + return + } + if accept.isset(token) { + out = append(out, token) + } else { + z.unreadn1() + return + } + } +} + +func (z *ioDecReader) readUntil(in []byte, stop byte) (out []byte) { + out = in + for { + token, eof := z.readn1eof() + if eof { + panic(io.EOF) + } + out = append(out, token) + if token == stop { + return + } + } +} + +func (z *ioDecReader) unreadn1() { + err := z.br.UnreadByte() + if err != nil { + panic(err) + } + z.n-- + if z.trb { + if l := len(z.tr) - 1; l >= 0 { + z.tr = z.tr[:l] + } + } +} + +func (z *ioDecReader) track() { + if z.tr != nil { + z.tr = z.tr[:0] + } + z.trb = true +} + +func (z *ioDecReader) stopTrack() (bs []byte) { + z.trb = false + return z.tr +} + +// ------------------------------------ + +var bytesDecReaderCannotUnreadErr = errors.New("cannot unread last byte read") + +// bytesDecReader is a decReader that reads off a byte slice with zero copying +type bytesDecReader struct { + b []byte // data + c int // cursor + a int // available + t int // track start +} + +func (z *bytesDecReader) reset(in []byte) { + z.b = in + z.a = len(in) + z.c = 0 + z.t = 0 +} + +func (z *bytesDecReader) numread() int { + return z.c +} + +func (z *bytesDecReader) unreadn1() { + if z.c == 0 || len(z.b) == 0 { + panic(bytesDecReaderCannotUnreadErr) + } + z.c-- + z.a++ + return +} + +func (z *bytesDecReader) readx(n int) (bs []byte) { + // slicing from a non-constant start position is more expensive, + // as more computation is required to decipher the pointer start position. + // However, we do it only once, and it's better than reslicing both z.b and return value. + + if n <= 0 { + } else if z.a == 0 { + panic(io.EOF) + } else if n > z.a { + panic(io.ErrUnexpectedEOF) + } else { + c0 := z.c + z.c = c0 + n + z.a = z.a - n + bs = z.b[c0:z.c] + } + return +} + +func (z *bytesDecReader) readb(bs []byte) { + copy(bs, z.readx(len(bs))) +} + +func (z *bytesDecReader) readn1() (v uint8) { + if z.a == 0 { + panic(io.EOF) + } + v = z.b[z.c] + z.c++ + z.a-- + return +} + +func (z *bytesDecReader) readn3() (b1, b2, b3 uint8) { + if 3 > z.a { + panic(io.ErrUnexpectedEOF) + } + b3 = z.b[z.c+2] + b2 = z.b[z.c+1] + b1 = z.b[z.c] + z.c += 3 + z.a -= 3 + return +} + +func (z *bytesDecReader) readn4() (b1, b2, b3, b4 uint8) { + if 4 > z.a { + panic(io.ErrUnexpectedEOF) + } + b4 = z.b[z.c+3] + b3 = z.b[z.c+2] + b2 = z.b[z.c+1] + b1 = z.b[z.c] + z.c += 4 + z.a -= 4 + return +} + +// func (z *bytesDecReader) readn1eof() (v uint8, eof bool) { +// if z.a == 0 { +// eof = true +// return +// } +// v = z.b[z.c] +// z.c++ +// z.a-- +// return +// } + +func (z *bytesDecReader) skip(accept *bitset256) (token byte) { + if z.a == 0 { + return + } + blen := len(z.b) + for i := z.c; i < blen; i++ { + if !accept.isset(z.b[i]) { + token = z.b[i] + i++ + z.a -= (i - z.c) + z.c = i + return + } + } + z.a, z.c = 0, blen + return +} + +func (z *bytesDecReader) readTo(_ []byte, accept *bitset256) (out []byte) { + if z.a == 0 { + return + } + blen := len(z.b) + for i := z.c; i < blen; i++ { + if !accept.isset(z.b[i]) { + out = z.b[z.c:i] + z.a -= (i - z.c) + z.c = i + return + } + } + out = z.b[z.c:] + z.a, z.c = 0, blen + return +} + +func (z *bytesDecReader) readUntil(_ []byte, stop byte) (out []byte) { + if z.a == 0 { + panic(io.EOF) + } + blen := len(z.b) + for i := z.c; i < blen; i++ { + if z.b[i] == stop { + i++ + out = z.b[z.c:i] + z.a -= (i - z.c) + z.c = i + return + } + } + z.a, z.c = 0, blen + panic(io.EOF) +} + +func (z *bytesDecReader) track() { + z.t = z.c +} + +func (z *bytesDecReader) stopTrack() (bs []byte) { + return z.b[z.t:z.c] +} + +// ---------------------------------------- + +func (d *Decoder) builtin(f *codecFnInfo, rv reflect.Value) { + d.d.DecodeBuiltin(f.ti.rtid, rv2i(rv)) +} + +func (d *Decoder) rawExt(f *codecFnInfo, rv reflect.Value) { + d.d.DecodeExt(rv2i(rv), 0, nil) +} + +func (d *Decoder) ext(f *codecFnInfo, rv reflect.Value) { + d.d.DecodeExt(rv2i(rv), f.xfTag, f.xfFn) +} + +func (d *Decoder) getValueForUnmarshalInterface(rv reflect.Value, indir int8) (v interface{}) { + if indir == -1 { + v = rv2i(rv.Addr()) + } else if indir == 0 { + v = rv2i(rv) + } else { + for j := int8(0); j < indir; j++ { + if rv.IsNil() { + rv.Set(reflect.New(rv.Type().Elem())) + } + rv = rv.Elem() + } + v = rv2i(rv) + } + return +} + +func (d *Decoder) selferUnmarshal(f *codecFnInfo, rv reflect.Value) { + d.getValueForUnmarshalInterface(rv, f.ti.csIndir).(Selfer).CodecDecodeSelf(d) +} + +func (d *Decoder) binaryUnmarshal(f *codecFnInfo, rv reflect.Value) { + bm := d.getValueForUnmarshalInterface(rv, f.ti.bunmIndir).(encoding.BinaryUnmarshaler) + xbs := d.d.DecodeBytes(nil, true) + if fnerr := bm.UnmarshalBinary(xbs); fnerr != nil { + panic(fnerr) + } +} + +func (d *Decoder) textUnmarshal(f *codecFnInfo, rv reflect.Value) { + tm := d.getValueForUnmarshalInterface(rv, f.ti.tunmIndir).(encoding.TextUnmarshaler) + fnerr := tm.UnmarshalText(d.d.DecodeStringAsBytes()) + if fnerr != nil { + panic(fnerr) + } +} + +func (d *Decoder) jsonUnmarshal(f *codecFnInfo, rv reflect.Value) { + tm := d.getValueForUnmarshalInterface(rv, f.ti.junmIndir).(jsonUnmarshaler) + // bs := d.d.DecodeBytes(d.b[:], true, true) + // grab the bytes to be read, as UnmarshalJSON needs the full JSON so as to unmarshal it itself. + fnerr := tm.UnmarshalJSON(d.nextValueBytes()) + if fnerr != nil { + panic(fnerr) + } +} + +func (d *Decoder) kErr(f *codecFnInfo, rv reflect.Value) { + d.errorf("no decoding function defined for kind %v", rv.Kind()) +} + +// var kIntfCtr uint64 + +func (d *Decoder) kInterfaceNaked(f *codecFnInfo) (rvn reflect.Value) { + // nil interface: + // use some hieristics to decode it appropriately + // based on the detected next value in the stream. + n := d.naked() + d.d.DecodeNaked() + if n.v == valueTypeNil { + return + } + // We cannot decode non-nil stream value into nil interface with methods (e.g. io.Reader). + // if num := f.ti.rt.NumMethod(); num > 0 { + if f.ti.numMeth > 0 { + d.errorf("cannot decode non-nil codec value into nil %v (%v methods)", f.ti.rt, f.ti.numMeth) + return + } + // var useRvn bool + switch n.v { + case valueTypeMap: + if d.mtid == 0 || d.mtid == mapIntfIntfTypId { + if n.lm < arrayCacheLen { + n.ma[n.lm] = nil + rvn = n.rr[decNakedMapIntfIntfIdx*arrayCacheLen+n.lm] + n.lm++ + d.decode(&n.ma[n.lm-1]) + n.lm-- + } else { + var v2 map[interface{}]interface{} + d.decode(&v2) + rvn = reflect.ValueOf(&v2).Elem() + } + } else if d.mtid == mapStrIntfTypId { // for json performance + if n.ln < arrayCacheLen { + n.na[n.ln] = nil + rvn = n.rr[decNakedMapStrIntfIdx*arrayCacheLen+n.ln] + n.ln++ + d.decode(&n.na[n.ln-1]) + n.ln-- + } else { + var v2 map[string]interface{} + d.decode(&v2) + rvn = reflect.ValueOf(&v2).Elem() + } + } else { + rvn = reflect.New(d.h.MapType) + if useLookupRecognizedTypes && d.mtr { // isRecognizedRtid(d.mtid) { + d.decode(rv2i(rvn)) + rvn = rvn.Elem() + } else { + rvn = rvn.Elem() + d.decodeValue(rvn, nil, false, true) + } + } + case valueTypeArray: + if d.stid == 0 || d.stid == intfSliceTypId { + if n.ls < arrayCacheLen { + n.sa[n.ls] = nil + rvn = n.rr[decNakedSliceIntfIdx*arrayCacheLen+n.ls] + n.ls++ + d.decode(&n.sa[n.ls-1]) + n.ls-- + } else { + var v2 []interface{} + d.decode(&v2) + rvn = reflect.ValueOf(&v2).Elem() + } + if reflectArrayOfSupported && d.stid == 0 && d.h.PreferArrayOverSlice { + rvn2 := reflect.New(reflectArrayOf(rvn.Len(), intfTyp)).Elem() + reflect.Copy(rvn2, rvn) + rvn = rvn2 + } + } else { + rvn = reflect.New(d.h.SliceType) + if useLookupRecognizedTypes && d.str { // isRecognizedRtid(d.stid) { + d.decode(rv2i(rvn)) + rvn = rvn.Elem() + } else { + rvn = rvn.Elem() + d.decodeValue(rvn, nil, false, true) + } + } + case valueTypeExt: + var v interface{} + tag, bytes := n.u, n.l // calling decode below might taint the values + if bytes == nil { + if n.li < arrayCacheLen { + n.ia[n.li] = nil + n.li++ + d.decode(&n.ia[n.li-1]) + // v = *(&n.ia[l]) + n.li-- + v = n.ia[n.li] + n.ia[n.li] = nil + } else { + d.decode(&v) + } + } + bfn := d.h.getExtForTag(tag) + if bfn == nil { + var re RawExt + re.Tag = tag + re.Data = detachZeroCopyBytes(d.bytes, nil, bytes) + re.Value = v + rvn = reflect.ValueOf(&re).Elem() + } else { + rvnA := reflect.New(bfn.rt) + if bytes != nil { + bfn.ext.ReadExt(rv2i(rvnA), bytes) + } else { + bfn.ext.UpdateExt(rv2i(rvnA), v) + } + rvn = rvnA.Elem() + } + case valueTypeNil: + // no-op + case valueTypeInt: + rvn = n.rr[decNakedIntIdx] // d.np.get(&n.i) + case valueTypeUint: + rvn = n.rr[decNakedUintIdx] // d.np.get(&n.u) + case valueTypeFloat: + rvn = n.rr[decNakedFloatIdx] // d.np.get(&n.f) + case valueTypeBool: + rvn = n.rr[decNakedBoolIdx] // d.np.get(&n.b) + case valueTypeString, valueTypeSymbol: + rvn = n.rr[decNakedStringIdx] // d.np.get(&n.s) + case valueTypeBytes: + rvn = n.rr[decNakedBytesIdx] // d.np.get(&n.l) + case valueTypeTimestamp: + rvn = n.rr[decNakedTimeIdx] // d.np.get(&n.t) + default: + panic(fmt.Errorf("kInterfaceNaked: unexpected valueType: %d", n.v)) + } + return +} + +func (d *Decoder) kInterface(f *codecFnInfo, rv reflect.Value) { + // Note: + // A consequence of how kInterface works, is that + // if an interface already contains something, we try + // to decode into what was there before. + // We do not replace with a generic value (as got from decodeNaked). + + // every interface passed here MUST be settable. + var rvn reflect.Value + if rv.IsNil() { + if rvn = d.kInterfaceNaked(f); rvn.IsValid() { + rv.Set(rvn) + } + return + } + if d.h.InterfaceReset { + if rvn = d.kInterfaceNaked(f); rvn.IsValid() { + rv.Set(rvn) + } else { + // reset to zero value based on current type in there. + rv.Set(reflect.Zero(rv.Elem().Type())) + } + return + } + + // now we have a non-nil interface value, meaning it contains a type + rvn = rv.Elem() + if d.d.TryDecodeAsNil() { + rv.Set(reflect.Zero(rvn.Type())) + return + } + + // Note: interface{} is settable, but underlying type may not be. + // Consequently, we MAY have to create a decodable value out of the underlying value, + // decode into it, and reset the interface itself. + // fmt.Printf(">>>> kInterface: rvn type: %v, rv type: %v\n", rvn.Type(), rv.Type()) + + rvn2, canDecode := isDecodeable(rvn) + if canDecode { + d.decodeValue(rvn2, nil, true, true) + return + } + + rvn2 = reflect.New(rvn.Type()).Elem() + rvn2.Set(rvn) + d.decodeValue(rvn2, nil, true, true) + rv.Set(rvn2) +} + +func (d *Decoder) kStruct(f *codecFnInfo, rv reflect.Value) { + // checking if recognized within kstruct is too expensive. + // only check where you can determine if valid outside the loop + // ie on homogenous collections: slices, arrays and maps. + // + // if true, we don't create too many decFn's. + // It's a delicate balance. + const checkRecognized bool = false // false: TODO + + fti := f.ti + dd := d.d + elemsep := d.hh.hasElemSeparators() + sfn := structFieldNode{v: rv, update: true} + ctyp := dd.ContainerType() + if ctyp == valueTypeMap { + containerLen := dd.ReadMapStart() + if containerLen == 0 { + dd.ReadMapEnd() + return + } + tisfi := fti.sfi + hasLen := containerLen >= 0 + + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + // rvkencname := dd.DecodeString() + if elemsep { + dd.ReadMapElemKey() + } + rvkencnameB := dd.DecodeStringAsBytes() + rvkencname := stringView(rvkencnameB) + // rvksi := ti.getForEncName(rvkencname) + if elemsep { + dd.ReadMapElemValue() + } + if k := fti.indexForEncName(rvkencname); k > -1 { + si := tisfi[k] + if dd.TryDecodeAsNil() { + si.setToZeroValue(rv) + } else { + d.decodeValue(sfn.field(si), nil, checkRecognized, true) + } + } else { + d.structFieldNotFound(-1, rvkencname) + } + // keepAlive4StringView(rvkencnameB) // maintain ref 4 stringView // not needed, as reference is outside loop + } + dd.ReadMapEnd() + } else if ctyp == valueTypeArray { + containerLen := dd.ReadArrayStart() + if containerLen == 0 { + dd.ReadArrayEnd() + return + } + // Not much gain from doing it two ways for array. + // Arrays are not used as much for structs. + hasLen := containerLen >= 0 + for j, si := range fti.sfip { + if (hasLen && j == containerLen) || (!hasLen && dd.CheckBreak()) { + break + } + if elemsep { + dd.ReadArrayElem() + } + if dd.TryDecodeAsNil() { + si.setToZeroValue(rv) + } else { + d.decodeValue(sfn.field(si), nil, checkRecognized, true) + } + } + if containerLen > len(fti.sfip) { + // read remaining values and throw away + for j := len(fti.sfip); j < containerLen; j++ { + if elemsep { + dd.ReadArrayElem() + } + d.structFieldNotFound(j, "") + } + } + dd.ReadArrayEnd() + } else { + d.error(onlyMapOrArrayCanDecodeIntoStructErr) + return + } +} + +func (d *Decoder) kSlice(f *codecFnInfo, rv reflect.Value) { + // A slice can be set from a map or array in stream. + // This way, the order can be kept (as order is lost with map). + ti := f.ti + dd := d.d + rtelem0 := ti.rt.Elem() + ctyp := dd.ContainerType() + if ctyp == valueTypeBytes || ctyp == valueTypeString { + // you can only decode bytes or string in the stream into a slice or array of bytes + if !(ti.rtid == uint8SliceTypId || rtelem0.Kind() == reflect.Uint8) { + d.errorf("bytes or string in the stream must be decoded into a slice or array of bytes, not %v", ti.rt) + } + if f.seq == seqTypeChan { + bs2 := dd.DecodeBytes(nil, true) + ch := rv2i(rv).(chan<- byte) + for _, b := range bs2 { + ch <- b + } + } else { + rvbs := rv.Bytes() + bs2 := dd.DecodeBytes(rvbs, false) + if rvbs == nil && bs2 != nil || rvbs != nil && bs2 == nil || len(bs2) != len(rvbs) { + if rv.CanSet() { + rv.SetBytes(bs2) + } else { + copy(rvbs, bs2) + } + } + } + return + } + + // array := f.seq == seqTypeChan + + slh, containerLenS := d.decSliceHelperStart() // only expects valueType(Array|Map) + + // an array can never return a nil slice. so no need to check f.array here. + if containerLenS == 0 { + if rv.CanSet() { + if f.seq == seqTypeSlice { + if rv.IsNil() { + rv.Set(reflect.MakeSlice(ti.rt, 0, 0)) + } else { + rv.SetLen(0) + } + } else if f.seq == seqTypeChan { + if rv.IsNil() { + rv.Set(reflect.MakeChan(ti.rt, 0)) + } + } + } + slh.End() + return + } + + rtelem0Size := int(rtelem0.Size()) + rtElem0Kind := rtelem0.Kind() + rtElem0Id := rt2id(rtelem0) + rtelem0Mut := !isImmutableKind(rtElem0Kind) + rtelem := rtelem0 + rtelemkind := rtelem.Kind() + for rtelemkind == reflect.Ptr { + rtelem = rtelem.Elem() + rtelemkind = rtelem.Kind() + } + + var fn *codecFn + + var rv0, rv9 reflect.Value + rv0 = rv + rvChanged := false + + rvlen := rv.Len() + rvcap := rv.Cap() + hasLen := containerLenS > 0 + if hasLen && f.seq == seqTypeSlice { + if containerLenS > rvcap { + oldRvlenGtZero := rvlen > 0 + rvlen = decInferLen(containerLenS, d.h.MaxInitLen, int(rtelem0.Size())) + if rvlen <= rvcap { + if rv.CanSet() { + rv.SetLen(rvlen) + } else { + rv = rv.Slice(0, rvlen) + rvChanged = true + } + } else { + rv = reflect.MakeSlice(ti.rt, rvlen, rvlen) + rvcap = rvlen + rvChanged = true + } + if rvChanged && oldRvlenGtZero && !isImmutableKind(rtelem0.Kind()) { + reflect.Copy(rv, rv0) // only copy up to length NOT cap i.e. rv0.Slice(0, rvcap) + } + } else if containerLenS != rvlen { + rvlen = containerLenS + if rv.CanSet() { + rv.SetLen(rvlen) + } else { + rv = rv.Slice(0, rvlen) + rvChanged = true + } + } + } + + var recognizedRtid, recognizedRtidPtr bool + if useLookupRecognizedTypes { + recognizedRtid = isRecognizedRtid(rtElem0Id) + recognizedRtidPtr = isRecognizedRtidPtr(rtElem0Id) + } + + // consider creating new element once, and just decoding into it. + var rtelem0Zero reflect.Value + var rtelem0ZeroValid bool + var decodeAsNil bool + var j int + for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { + if j == 0 && (f.seq == seqTypeSlice || f.seq == seqTypeChan) && rv.IsNil() { + if hasLen { + rvlen = decInferLen(containerLenS, d.h.MaxInitLen, rtelem0Size) + } else { + rvlen = 8 + } + if f.seq == seqTypeSlice { + rv = reflect.MakeSlice(ti.rt, rvlen, rvlen) + rvChanged = true + } else if f.seq == seqTypeChan { + rv.Set(reflect.MakeChan(ti.rt, rvlen)) + } + } + slh.ElemContainerState(j) + decodeAsNil = dd.TryDecodeAsNil() + if f.seq == seqTypeChan { + if decodeAsNil { + rv.Send(reflect.Zero(rtelem0)) + continue + } + if rtelem0Mut || !rv9.IsValid() { // || (rtElem0Kind == reflect.Ptr && rv9.IsNil()) { + rv9 = reflect.New(rtelem0).Elem() + } + if useLookupRecognizedTypes && (recognizedRtid || recognizedRtidPtr) { + d.decode(rv2i(rv9.Addr())) + } else { + if fn == nil { + fn = d.cf.get(rtelem, true, true) + } + d.decodeValue(rv9, fn, false, true) + } + rv.Send(rv9) + } else { + // if indefinite, etc, then expand the slice if necessary + var decodeIntoBlank bool + if j >= rvlen { + if f.seq == seqTypeArray { + d.arrayCannotExpand(rvlen, j+1) + decodeIntoBlank = true + } else { // if f.seq == seqTypeSlice + // rv = reflect.Append(rv, reflect.Zero(rtelem0)) // uses append logic, plus varargs + var rvcap2 int + rv9, rvcap2, rvChanged = decExpandSliceRV(rv, ti.rt, rtelem0Size, 1, rvlen, rvcap) + rvlen++ + if rvChanged { + rv = rv9 + rvcap = rvcap2 + } + } + } + if decodeIntoBlank { + if !decodeAsNil { + d.swallow() + } + } else { + rv9 = rv.Index(j) + if d.h.SliceElementReset || decodeAsNil { + if !rtelem0ZeroValid { + rtelem0ZeroValid = true + rtelem0Zero = reflect.Zero(rtelem0) + } + rv9.Set(rtelem0Zero) + } + if decodeAsNil { + continue + } + + if useLookupRecognizedTypes && recognizedRtid { + d.decode(rv2i(rv9.Addr())) + } else if useLookupRecognizedTypes && recognizedRtidPtr { // && !rv9.IsNil() { + if rv9.IsNil() { + rv9.Set(reflect.New(rtelem)) + } + d.decode(rv2i(rv9)) + } else { + if fn == nil { + fn = d.cf.get(rtelem, true, true) + } + d.decodeValue(rv9, fn, false, true) + } + } + } + } + if f.seq == seqTypeSlice { + if j < rvlen { + if rv.CanSet() { + rv.SetLen(j) + } else { + rv = rv.Slice(0, j) + rvChanged = true + } + rvlen = j + } else if j == 0 && rv.IsNil() { + rv = reflect.MakeSlice(ti.rt, 0, 0) + rvChanged = true + } + } + slh.End() + + if rvChanged { + rv0.Set(rv) + } +} + +// func (d *Decoder) kArray(f *codecFnInfo, rv reflect.Value) { +// // d.decodeValueFn(rv.Slice(0, rv.Len())) +// f.kSlice(rv.Slice(0, rv.Len())) +// } + +func (d *Decoder) kMap(f *codecFnInfo, rv reflect.Value) { + dd := d.d + containerLen := dd.ReadMapStart() + elemsep := d.hh.hasElemSeparators() + ti := f.ti + if rv.IsNil() { + rv.Set(makeMapReflect(ti.rt, containerLen)) + } + + if containerLen == 0 { + dd.ReadMapEnd() + return + } + + ktype, vtype := ti.rt.Key(), ti.rt.Elem() + ktypeId := rt2id(ktype) + vtypeId := rt2id(vtype) + vtypeKind := vtype.Kind() + var recognizedKtyp, recognizedVtyp, recognizedPtrKtyp, recognizedPtrVtyp bool + if useLookupRecognizedTypes { + recognizedKtyp = isRecognizedRtid(ktypeId) + recognizedVtyp = isRecognizedRtid(vtypeId) + recognizedPtrKtyp = isRecognizedRtidPtr(ktypeId) + recognizedPtrVtyp = isRecognizedRtidPtr(vtypeId) + } + + var keyFn, valFn *codecFn + var ktypeLo, vtypeLo reflect.Type + for ktypeLo = ktype; ktypeLo.Kind() == reflect.Ptr; ktypeLo = ktypeLo.Elem() { + } + + for vtypeLo = vtype; vtypeLo.Kind() == reflect.Ptr; vtypeLo = vtypeLo.Elem() { + } + + var mapGet, mapSet bool + rvvImmut := isImmutableKind(vtypeKind) + if !d.h.MapValueReset { + // if pointer, mapGet = true + // if interface, mapGet = true if !DecodeNakedAlways (else false) + // if builtin, mapGet = false + // else mapGet = true + if vtypeKind == reflect.Ptr { + mapGet = true + } else if vtypeKind == reflect.Interface { + if !d.h.InterfaceReset { + mapGet = true + } + } else if !rvvImmut { + mapGet = true + } + } + + var rvk, rvkp, rvv, rvz reflect.Value + rvkMut := !isImmutableKind(ktype.Kind()) // if ktype is immutable, then re-use the same rvk. + ktypeIsString := ktypeId == stringTypId + ktypeIsIntf := ktypeId == intfTypId + hasLen := containerLen > 0 + var kstrbs []byte + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if elemsep { + dd.ReadMapElemKey() + } + // if a nil key, just ignore the mapped value and continue + if dd.TryDecodeAsNil() { + if elemsep { + dd.ReadMapElemValue() + } + d.swallow() + continue + } + if rvkMut || !rvkp.IsValid() { + rvkp = reflect.New(ktype) + rvk = rvkp.Elem() + } + if ktypeIsString { + kstrbs = dd.DecodeStringAsBytes() + rvk.SetString(stringView(kstrbs)) + // NOTE: if doing an insert, you MUST use a real string (not stringview) + } else if useLookupRecognizedTypes && recognizedKtyp { + d.decode(rv2i(rvkp)) + // rvk = rvkp.Elem() //TODO: remove, unnecessary + } else if useLookupRecognizedTypes && recognizedPtrKtyp { + if rvk.IsNil() { + rvk = reflect.New(ktypeLo) + } + d.decode(rv2i(rvk)) + } else { + if keyFn == nil { + keyFn = d.cf.get(ktypeLo, true, true) + } + d.decodeValue(rvk, keyFn, false, true) + } + // special case if a byte array. + if ktypeIsIntf { + rvk = rvk.Elem() + if rvk.Type() == uint8SliceTyp { + rvk = reflect.ValueOf(d.string(rvk.Bytes())) + } + } + + if elemsep { + dd.ReadMapElemValue() + } + + // Brittle, but OK per TryDecodeAsNil() contract. + // i.e. TryDecodeAsNil never shares slices with other decDriver procedures + if dd.TryDecodeAsNil() { + if ktypeIsString { + rvk.SetString(d.string(kstrbs)) + } + if d.h.DeleteOnNilMapValue { + rv.SetMapIndex(rvk, reflect.Value{}) + } else { + rv.SetMapIndex(rvk, reflect.Zero(vtype)) + } + continue + } + + mapSet = true // set to false if u do a get, and its a non-nil pointer + if mapGet { + // mapGet true only in case where kind=Ptr|Interface or kind is otherwise mutable. + rvv = rv.MapIndex(rvk) + if !rvv.IsValid() { + rvv = reflect.New(vtype).Elem() + } else if vtypeKind == reflect.Ptr { + if rvv.IsNil() { + rvv = reflect.New(vtype).Elem() + } else { + mapSet = false + } + } else if vtypeKind == reflect.Interface { + // not addressable, and thus not settable. + // e MUST create a settable/addressable variant + rvv2 := reflect.New(rvv.Type()).Elem() + if !rvv.IsNil() { + rvv2.Set(rvv) + } + rvv = rvv2 + } + // else it is ~mutable, and we can just decode into it directly + } else if rvvImmut { + if !rvz.IsValid() { + rvz = reflect.New(vtype).Elem() + } + rvv = rvz + } else { + rvv = reflect.New(vtype).Elem() + } + + // We MUST be done with the stringview of the key, before decoding the value + // so that we don't bastardize the reused byte array. + if mapSet && ktypeIsString { + rvk.SetString(d.string(kstrbs)) + } + if useLookupRecognizedTypes && recognizedVtyp && rvv.CanAddr() { + d.decode(rv2i(rvv.Addr())) + } else if useLookupRecognizedTypes && recognizedPtrVtyp { + if rvv.IsNil() { + rvv = reflect.New(vtypeLo) + mapSet = true + } + d.decode(rv2i(rvv)) + } else { + if valFn == nil { + valFn = d.cf.get(vtypeLo, true, true) + } + d.decodeValue(rvv, valFn, false, true) + // d.decodeValueFn(rvv, valFn) + } + if mapSet { + rv.SetMapIndex(rvk, rvv) + } + // if ktypeIsString { + // // keepAlive4StringView(kstrbs) // not needed, as reference is outside loop + // } + } + + dd.ReadMapEnd() +} + +// decNaked is used to keep track of the primitives decoded. +// Without it, we would have to decode each primitive and wrap it +// in an interface{}, causing an allocation. +// In this model, the primitives are decoded in a "pseudo-atomic" fashion, +// so we can rest assured that no other decoding happens while these +// primitives are being decoded. +// +// maps and arrays are not handled by this mechanism. +// However, RawExt is, and we accommodate for extensions that decode +// RawExt from DecodeNaked, but need to decode the value subsequently. +// kInterfaceNaked and swallow, which call DecodeNaked, handle this caveat. +// +// However, decNaked also keeps some arrays of default maps and slices +// used in DecodeNaked. This way, we can get a pointer to it +// without causing a new heap allocation. +// +// kInterfaceNaked will ensure that there is no allocation for the common +// uses. +type decNaked struct { + // r RawExt // used for RawExt, uint, []byte. + u uint64 + i int64 + f float64 + l []byte + s string + t time.Time + + b bool + + inited bool + + v valueType + + li, lm, ln, ls int8 + + // array/stacks for reducing allocation + // keep arrays at the bottom? Chance is that they are not used much. + ia [arrayCacheLen]interface{} + ma [arrayCacheLen]map[interface{}]interface{} + na [arrayCacheLen]map[string]interface{} + sa [arrayCacheLen][]interface{} + // ra [2]RawExt + + rr [5 * arrayCacheLen]reflect.Value +} + +const ( + decNakedUintIdx = iota + decNakedIntIdx + decNakedFloatIdx + decNakedBytesIdx + decNakedStringIdx + decNakedTimeIdx + decNakedBoolIdx +) +const ( + _ = iota // maps to the scalars above + decNakedIntfIdx + decNakedMapIntfIntfIdx + decNakedMapStrIntfIdx + decNakedSliceIntfIdx +) + +func (n *decNaked) init() { + if n.inited { + return + } + // n.ms = n.ma[:0] + // n.is = n.ia[:0] + // n.ns = n.na[:0] + // n.ss = n.sa[:0] + + n.rr[decNakedUintIdx] = reflect.ValueOf(&n.u).Elem() + n.rr[decNakedIntIdx] = reflect.ValueOf(&n.i).Elem() + n.rr[decNakedFloatIdx] = reflect.ValueOf(&n.f).Elem() + n.rr[decNakedBytesIdx] = reflect.ValueOf(&n.l).Elem() + n.rr[decNakedStringIdx] = reflect.ValueOf(&n.s).Elem() + n.rr[decNakedTimeIdx] = reflect.ValueOf(&n.t).Elem() + n.rr[decNakedBoolIdx] = reflect.ValueOf(&n.b).Elem() + + for i := range [arrayCacheLen]struct{}{} { + n.rr[decNakedIntfIdx*arrayCacheLen+i] = reflect.ValueOf(&(n.ia[i])).Elem() + n.rr[decNakedMapIntfIntfIdx*arrayCacheLen+i] = reflect.ValueOf(&(n.ma[i])).Elem() + n.rr[decNakedMapStrIntfIdx*arrayCacheLen+i] = reflect.ValueOf(&(n.na[i])).Elem() + n.rr[decNakedSliceIntfIdx*arrayCacheLen+i] = reflect.ValueOf(&(n.sa[i])).Elem() + } + n.inited = true + // n.rr[] = reflect.ValueOf(&n.) +} + +func (n *decNaked) reset() { + if n == nil { + return + } + n.li, n.lm, n.ln, n.ls = 0, 0, 0, 0 +} + +type rtid2rv struct { + rtid uintptr + rv reflect.Value +} + +// A Decoder reads and decodes an object from an input stream in the codec format. +type Decoder struct { + // hopefully, reduce derefencing cost by laying the decReader inside the Decoder. + // Try to put things that go together to fit within a cache line (8 words). + + d decDriver + // NOTE: Decoder shouldn't call it's read methods, + // as the handler MAY need to do some coordination. + r decReader + hh Handle + h *BasicHandle + + mtr, mtrp, str, strp bool // + + be bool // is binary encoding + bytes bool // is bytes reader + js bool // is json handle + + // ---- cpu cache line boundary? + + rb bytesDecReader + ri ioDecReader + bi bufioDecReader + + // cr containerStateRecv + + n *decNaked + nsp *sync.Pool + + // ---- cpu cache line boundary? + + is map[string]string // used for interning strings + + // cache the mapTypeId and sliceTypeId for faster comparisons + mtid uintptr + stid uintptr + + b [scratchByteArrayLen]byte + // _ uintptr // for alignment purposes, so next one starts from a cache line + + err error + // ---- cpu cache line boundary? + + cf codecFner + // _ [64]byte // force alignment??? +} + +// NewDecoder returns a Decoder for decoding a stream of bytes from an io.Reader. +// +// For efficiency, Users are encouraged to pass in a memory buffered reader +// (eg bufio.Reader, bytes.Buffer). +func NewDecoder(r io.Reader, h Handle) *Decoder { + d := newDecoder(h) + d.Reset(r) + return d +} + +// NewDecoderBytes returns a Decoder which efficiently decodes directly +// from a byte slice with zero copying. +func NewDecoderBytes(in []byte, h Handle) *Decoder { + d := newDecoder(h) + d.ResetBytes(in) + return d +} + +var defaultDecNaked decNaked + +func newDecoder(h Handle) *Decoder { + d := &Decoder{hh: h, h: h.getBasicHandle(), be: h.isBinary()} + + // NOTE: do not initialize d.n here. It is lazily initialized in d.naked() + + _, d.js = h.(*JsonHandle) + if d.h.InternString { + d.is = make(map[string]string, 32) + } + d.d = h.newDecDriver(d) + // d.cr, _ = d.d.(containerStateRecv) + return d +} + +// naked must be called before each call to .DecodeNaked, +// as they will use it. +func (d *Decoder) naked() *decNaked { + if d.n == nil { + // consider one of: + // - get from sync.Pool (if GC is frequent, there's no value here) + // - new alloc (safest. only init'ed if it a naked decode will be done) + // - field in Decoder (makes the Decoder struct very big) + // To support using a decoder where a DecodeNaked is not needed, + // we prefer #1 or #2. + // d.n = new(decNaked) // &d.nv // new(decNaked) // grab from a sync.Pool + // d.n.init() + var v interface{} + d.nsp, v = pool.decNaked() + d.n = v.(*decNaked) + } + return d.n +} + +func (d *Decoder) resetCommon() { + d.n.reset() + d.d.reset() + d.cf.reset(d.hh) + d.err = nil + // reset all things which were cached from the Handle, + // but could be changed. + d.mtid, d.stid = 0, 0 + d.mtr, d.mtrp, d.str, d.strp = false, false, false, false + if d.h.MapType != nil { + d.mtid = rt2id(d.h.MapType) + if useLookupRecognizedTypes { + d.mtr = isRecognizedRtid(d.mtid) + d.mtrp = isRecognizedRtidPtr(d.mtid) + } + } + if d.h.SliceType != nil { + d.stid = rt2id(d.h.SliceType) + if useLookupRecognizedTypes { + d.str = isRecognizedRtid(d.stid) + d.strp = isRecognizedRtidPtr(d.stid) + } + } +} + +func (d *Decoder) Reset(r io.Reader) { + if d.h.ReaderBufferSize > 0 { + d.bi.buf = make([]byte, 0, d.h.ReaderBufferSize) + d.bi.reset(r) + d.r = &d.bi + } else { + d.ri.x = &d.b + // d.s = d.sa[:0] + d.ri.reset(r) + d.r = &d.ri + } + d.resetCommon() +} + +func (d *Decoder) ResetBytes(in []byte) { + d.bytes = true + d.rb.reset(in) + d.r = &d.rb + d.resetCommon() +} + +// Decode decodes the stream from reader and stores the result in the +// value pointed to by v. v cannot be a nil pointer. v can also be +// a reflect.Value of a pointer. +// +// Note that a pointer to a nil interface is not a nil pointer. +// If you do not know what type of stream it is, pass in a pointer to a nil interface. +// We will decode and store a value in that nil interface. +// +// Sample usages: +// // Decoding into a non-nil typed value +// var f float32 +// err = codec.NewDecoder(r, handle).Decode(&f) +// +// // Decoding into nil interface +// var v interface{} +// dec := codec.NewDecoder(r, handle) +// err = dec.Decode(&v) +// +// When decoding into a nil interface{}, we will decode into an appropriate value based +// on the contents of the stream: +// - Numbers are decoded as float64, int64 or uint64. +// - Other values are decoded appropriately depending on the type: +// bool, string, []byte, time.Time, etc +// - Extensions are decoded as RawExt (if no ext function registered for the tag) +// Configurations exist on the Handle to override defaults +// (e.g. for MapType, SliceType and how to decode raw bytes). +// +// When decoding into a non-nil interface{} value, the mode of encoding is based on the +// type of the value. When a value is seen: +// - If an extension is registered for it, call that extension function +// - If it implements BinaryUnmarshaler, call its UnmarshalBinary(data []byte) error +// - Else decode it based on its reflect.Kind +// +// There are some special rules when decoding into containers (slice/array/map/struct). +// Decode will typically use the stream contents to UPDATE the container. +// - A map can be decoded from a stream map, by updating matching keys. +// - A slice can be decoded from a stream array, +// by updating the first n elements, where n is length of the stream. +// - A slice can be decoded from a stream map, by decoding as if +// it contains a sequence of key-value pairs. +// - A struct can be decoded from a stream map, by updating matching fields. +// - A struct can be decoded from a stream array, +// by updating fields as they occur in the struct (by index). +// +// When decoding a stream map or array with length of 0 into a nil map or slice, +// we reset the destination map or slice to a zero-length value. +// +// However, when decoding a stream nil, we reset the destination container +// to its "zero" value (e.g. nil for slice/map, etc). +// +func (d *Decoder) Decode(v interface{}) (err error) { + defer panicToErrs2(&d.err, &err) + d.MustDecode(v) + return +} + +// MustDecode is like Decode, but panics if unable to Decode. +// This provides insight to the code location that triggered the error. +func (d *Decoder) MustDecode(v interface{}) { + // TODO: Top-level: ensure that v is a pointer and not nil. + if d.err != nil { + panic(d.err) + } + if d.d.TryDecodeAsNil() { + d.setZero(v) + } else { + d.decode(v) + } + if d.nsp != nil { + if d.n != nil { + d.nsp.Put(d.n) + d.n = nil + } + d.nsp = nil + } + d.n = nil + // xprintf(">>>>>>>> >>>>>>>> num decFns: %v\n", d.cf.sn) +} + +// // this is not a smart swallow, as it allocates objects and does unnecessary work. +// func (d *Decoder) swallowViaHammer() { +// var blank interface{} +// d.decodeValueNoFn(reflect.ValueOf(&blank).Elem()) +// } + +func (d *Decoder) swallow() { + // smarter decode that just swallows the content + dd := d.d + if dd.TryDecodeAsNil() { + return + } + elemsep := d.hh.hasElemSeparators() + switch dd.ContainerType() { + case valueTypeMap: + containerLen := dd.ReadMapStart() + hasLen := containerLen >= 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + // if clenGtEqualZero {if j >= containerLen {break} } else if dd.CheckBreak() {break} + if elemsep { + dd.ReadMapElemKey() + } + d.swallow() + if elemsep { + dd.ReadMapElemValue() + } + d.swallow() + } + dd.ReadMapEnd() + case valueTypeArray: + containerLen := dd.ReadArrayStart() + hasLen := containerLen >= 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if elemsep { + dd.ReadArrayElem() + } + d.swallow() + } + dd.ReadArrayEnd() + case valueTypeBytes: + dd.DecodeBytes(d.b[:], true) + case valueTypeString: + dd.DecodeStringAsBytes() + default: + // these are all primitives, which we can get from decodeNaked + // if RawExt using Value, complete the processing. + n := d.naked() + dd.DecodeNaked() + if n.v == valueTypeExt && n.l == nil { + if n.li < arrayCacheLen { + n.ia[n.li] = nil + n.li++ + d.decode(&n.ia[n.li-1]) + n.ia[n.li-1] = nil + n.li-- + } else { + var v2 interface{} + d.decode(&v2) + } + } + } +} + +func (d *Decoder) setZero(iv interface{}) { + if iv == nil || definitelyNil(iv) { + return + } + switch v := iv.(type) { + case *string: + *v = "" + case *bool: + *v = false + case *int: + *v = 0 + case *int8: + *v = 0 + case *int16: + *v = 0 + case *int32: + *v = 0 + case *int64: + *v = 0 + case *uint: + *v = 0 + case *uint8: + *v = 0 + case *uint16: + *v = 0 + case *uint32: + *v = 0 + case *uint64: + *v = 0 + case *float32: + *v = 0 + case *float64: + *v = 0 + case *[]uint8: + *v = nil + case *Raw: + *v = nil + case reflect.Value: + v = d.ensureDecodeable(v) + if v.CanSet() { + v.Set(reflect.Zero(v.Type())) + } // TODO: else drain if chan, clear if map, set all to nil if slice??? + default: + if !fastpathDecodeSetZeroTypeSwitch(iv, d) { + v := reflect.ValueOf(iv) + v = d.ensureDecodeable(v) + if v.CanSet() { + v.Set(reflect.Zero(v.Type())) + } // TODO: else drain if chan, clear if map, set all to nil if slice??? + } + } +} + +func (d *Decoder) decode(iv interface{}) { + // check nil and interfaces explicitly, + // so that type switches just have a run of constant non-interface types. + if iv == nil { + d.error(cannotDecodeIntoNilErr) + return + } + if v, ok := iv.(Selfer); ok { + v.CodecDecodeSelf(d) + return + } + + switch v := iv.(type) { + // case nil: + // case Selfer: + + case reflect.Value: + v = d.ensureDecodeable(v) + d.decodeValue(v, nil, false, true) // TODO: maybe ask to recognize ... + + case *string: + *v = d.d.DecodeString() + case *bool: + *v = d.d.DecodeBool() + case *int: + *v = int(d.d.DecodeInt(intBitsize)) + case *int8: + *v = int8(d.d.DecodeInt(8)) + case *int16: + *v = int16(d.d.DecodeInt(16)) + case *int32: + *v = int32(d.d.DecodeInt(32)) + case *int64: + *v = d.d.DecodeInt(64) + case *uint: + *v = uint(d.d.DecodeUint(uintBitsize)) + case *uint8: + *v = uint8(d.d.DecodeUint(8)) + case *uint16: + *v = uint16(d.d.DecodeUint(16)) + case *uint32: + *v = uint32(d.d.DecodeUint(32)) + case *uint64: + *v = d.d.DecodeUint(64) + case *float32: + *v = float32(d.d.DecodeFloat(true)) + case *float64: + *v = d.d.DecodeFloat(false) + case *[]uint8: + *v = d.d.DecodeBytes(*v, false) + + case *Raw: + *v = d.rawBytes() + + case *interface{}: + d.decodeValue(reflect.ValueOf(iv).Elem(), nil, false, true) // TODO: consider recognize here + // d.decodeValueNotNil(reflect.ValueOf(iv).Elem()) + + default: + if !fastpathDecodeTypeSwitch(iv, d) { + v := reflect.ValueOf(iv) + v = d.ensureDecodeable(v) + d.decodeValue(v, nil, false, false) + // d.decodeValueFallback(v) + } + } +} + +func (d *Decoder) decodeValue(rv reflect.Value, fn *codecFn, tryRecognized, chkAll bool) { + // If stream is not containing a nil value, then we can deref to the base + // non-pointer value, and decode into that. + var rvp reflect.Value + var rvpValid bool + if rv.Kind() == reflect.Ptr { + rvpValid = true + for { + if rv.IsNil() { + rv.Set(reflect.New(rv.Type().Elem())) + } + rvp = rv + rv = rv.Elem() + if rv.Kind() != reflect.Ptr { + break + } + } + } + + if useLookupRecognizedTypes && tryRecognized && isRecognizedRtid(rv2rtid(rv)) { + if rvpValid { + d.decode(rv2i(rvp)) + return + } else if rv.CanAddr() { + d.decode(rv2i(rv.Addr())) + return + } + } + + if fn == nil { + // always pass checkCodecSelfer=true, in case T or ****T is passed, where *T is a Selfer + fn = d.cf.get(rv.Type(), chkAll, true) // chkAll, chkAll) + } + if fn.i.addr { + if rvpValid { + fn.fd(d, &fn.i, rvp) + } else if rv.CanAddr() { + fn.fd(d, &fn.i, rv.Addr()) + } else { + fn.fd(d, &fn.i, rv) + } + } else { + fn.fd(d, &fn.i, rv) + } + // return rv +} + +func (d *Decoder) structFieldNotFound(index int, rvkencname string) { + // NOTE: rvkencname may be a stringView, so don't pass it to another function. + if d.h.ErrorIfNoField { + if index >= 0 { + d.errorf("no matching struct field found when decoding stream array at index %v", index) + return + } else if rvkencname != "" { + d.errorf("no matching struct field found when decoding stream map with key " + rvkencname) + return + } + } + d.swallow() +} + +func (d *Decoder) arrayCannotExpand(sliceLen, streamLen int) { + if d.h.ErrorIfNoArrayExpand { + d.errorf("cannot expand array len during decode from %v to %v", sliceLen, streamLen) + } +} + +func isDecodeable(rv reflect.Value) (rv2 reflect.Value, canDecode bool) { + switch rv.Kind() { + case reflect.Array: + return rv, true + case reflect.Ptr: + if !rv.IsNil() { + return rv.Elem(), true + } + case reflect.Slice, reflect.Chan, reflect.Map: + if !rv.IsNil() { + return rv, true + } + } + return +} + +func (d *Decoder) ensureDecodeable(rv reflect.Value) (rv2 reflect.Value) { + // decode can take any reflect.Value that is a inherently addressable i.e. + // - array + // - non-nil chan (we will SEND to it) + // - non-nil slice (we will set its elements) + // - non-nil map (we will put into it) + // - non-nil pointer (we can "update" it) + rv2, canDecode := isDecodeable(rv) + if canDecode { + return + } + if !rv.IsValid() { + d.error(cannotDecodeIntoNilErr) + return + } + if !rv.CanInterface() { + d.errorf("cannot decode into a value without an interface: %v", rv) + return + } + rvi := rv2i(rv) + d.errorf("cannot decode into value of kind: %v, type: %T, %v", rv.Kind(), rvi, rvi) + return +} + +// func (d *Decoder) chkPtrValue(rv reflect.Value) { +// // We can only decode into a non-nil pointer +// if rv.Kind() == reflect.Ptr && !rv.IsNil() { +// return +// } +// d.errNotValidPtrValue(rv) +// } + +// func (d *Decoder) errNotValidPtrValue(rv reflect.Value) { +// if !rv.IsValid() { +// d.error(cannotDecodeIntoNilErr) +// return +// } +// if !rv.CanInterface() { +// d.errorf("cannot decode into a value without an interface: %v", rv) +// return +// } +// rvi := rv2i(rv) +// d.errorf("cannot decode into non-pointer or nil pointer. Got: %v, %T, %v", rv.Kind(), rvi, rvi) +// } + +func (d *Decoder) error(err error) { + panic(err) +} + +func (d *Decoder) errorf(format string, params ...interface{}) { + params2 := make([]interface{}, len(params)+1) + params2[0] = d.r.numread() + copy(params2[1:], params) + err := fmt.Errorf("[pos %d]: "+format, params2...) + panic(err) +} + +// Possibly get an interned version of a string +// +// This should mostly be used for map keys, where the key type is string. +// This is because keys of a map/struct are typically reused across many objects. +func (d *Decoder) string(v []byte) (s string) { + if d.is == nil { + return string(v) // don't return stringView, as we need a real string here. + } + s, ok := d.is[string(v)] // no allocation here, per go implementation + if !ok { + s = string(v) // new allocation here + d.is[s] = s + } + return s +} + +// nextValueBytes returns the next value in the stream as a set of bytes. +func (d *Decoder) nextValueBytes() (bs []byte) { + d.d.uncacheRead() + d.r.track() + d.swallow() + bs = d.r.stopTrack() + return +} + +func (d *Decoder) rawBytes() []byte { + // ensure that this is not a view into the bytes + // i.e. make new copy always. + bs := d.nextValueBytes() + bs2 := make([]byte, len(bs)) + copy(bs2, bs) + return bs2 +} + +// -------------------------------------------------- + +// decSliceHelper assists when decoding into a slice, from a map or an array in the stream. +// A slice can be set from a map or array in stream. This supports the MapBySlice interface. +type decSliceHelper struct { + d *Decoder + // ct valueType + array bool +} + +func (d *Decoder) decSliceHelperStart() (x decSliceHelper, clen int) { + dd := d.d + ctyp := dd.ContainerType() + if ctyp == valueTypeArray { + x.array = true + clen = dd.ReadArrayStart() + } else if ctyp == valueTypeMap { + clen = dd.ReadMapStart() * 2 + } else { + d.errorf("only encoded map or array can be decoded into a slice (%d)", ctyp) + } + // x.ct = ctyp + x.d = d + return +} + +func (x decSliceHelper) End() { + if x.array { + x.d.d.ReadArrayEnd() + } else { + x.d.d.ReadMapEnd() + } +} + +func (x decSliceHelper) ElemContainerState(index int) { + if x.array { + x.d.d.ReadArrayElem() + } else { + if index%2 == 0 { + x.d.d.ReadMapElemKey() + } else { + x.d.d.ReadMapElemValue() + } + } +} + +func decByteSlice(r decReader, clen, maxInitLen int, bs []byte) (bsOut []byte) { + if clen == 0 { + return zeroByteSlice + } + if len(bs) == clen { + bsOut = bs + r.readb(bsOut) + } else if cap(bs) >= clen { + bsOut = bs[:clen] + r.readb(bsOut) + } else { + // bsOut = make([]byte, clen) + len2 := decInferLen(clen, maxInitLen, 1) + bsOut = make([]byte, len2) + r.readb(bsOut) + for len2 < clen { + len3 := decInferLen(clen-len2, maxInitLen, 1) + bs3 := bsOut + bsOut = make([]byte, len2+len3) + copy(bsOut, bs3) + r.readb(bsOut[len2:]) + len2 += len3 + } + } + return +} + +func detachZeroCopyBytes(isBytesReader bool, dest []byte, in []byte) (out []byte) { + if xlen := len(in); xlen > 0 { + if isBytesReader || xlen <= scratchByteArrayLen { + if cap(dest) >= xlen { + out = dest[:xlen] + } else { + out = make([]byte, xlen) + } + copy(out, in) + return + } + } + return in +} + +// decInferLen will infer a sensible length, given the following: +// - clen: length wanted. +// - maxlen: max length to be returned. +// if <= 0, it is unset, and we infer it based on the unit size +// - unit: number of bytes for each element of the collection +func decInferLen(clen, maxlen, unit int) (rvlen int) { + // handle when maxlen is not set i.e. <= 0 + if clen <= 0 { + return + } + if maxlen <= 0 { + // no maxlen defined. Use maximum of 256K memory, with a floor of 4K items. + // maxlen = 256 * 1024 / unit + // if maxlen < (4 * 1024) { + // maxlen = 4 * 1024 + // } + if unit < (256 / 4) { + maxlen = 256 * 1024 / unit + } else { + maxlen = 4 * 1024 + } + } + if clen > maxlen { + rvlen = maxlen + } else { + rvlen = clen + } + return +} + +func decExpandSliceRV(s reflect.Value, st reflect.Type, stElemSize, num, slen, scap int) ( + s2 reflect.Value, scap2 int, changed bool) { + l1 := slen + num // new slice length + if l1 < slen { + panic("expandSlice: slice overflow") + } + if l1 <= scap { + if s.CanSet() { + s.SetLen(l1) + } else { + s2 = s.Slice(0, l1) + scap2 = scap + changed = true + } + return + } + scap2 = growCap(scap, stElemSize, num) + s2 = reflect.MakeSlice(st, l1, scap2) + changed = true + reflect.Copy(s2, s) + return +} + +func decReadFull(r io.Reader, bs []byte) (n int, err error) { + var nn int + for n < len(bs) && err == nil { + nn, err = r.Read(bs[n:]) + if nn > 0 { + if err == io.EOF { + // leave EOF for next time + err = nil + } + n += nn + } + } + + // do not do this - it serves no purpose + // if n != len(bs) && err == io.EOF { err = io.ErrUnexpectedEOF } + return +} diff --git a/vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/encode.go b/vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/encode.go new file mode 100644 index 00000000..dd15a2f4 --- /dev/null +++ b/vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/encode.go @@ -0,0 +1,1414 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +import ( + "bufio" + "encoding" + "fmt" + "io" + "reflect" + "sort" + "sync" +) + +const defEncByteBufSize = 1 << 6 // 4:16, 6:64, 8:256, 10:1024 + +// AsSymbolFlag defines what should be encoded as symbols. +type AsSymbolFlag uint8 + +const ( + // AsSymbolDefault is default. + // Currently, this means only encode struct field names as symbols. + // The default is subject to change. + AsSymbolDefault AsSymbolFlag = iota + + // AsSymbolAll means encode anything which could be a symbol as a symbol. + AsSymbolAll = 0xfe + + // AsSymbolNone means do not encode anything as a symbol. + AsSymbolNone = 1 << iota + + // AsSymbolMapStringKeys means encode keys in map[string]XXX as symbols. + AsSymbolMapStringKeysFlag + + // AsSymbolStructFieldName means encode struct field names as symbols. + AsSymbolStructFieldNameFlag +) + +// encWriter abstracts writing to a byte array or to an io.Writer. +type encWriter interface { + writeb([]byte) + writestr(string) + writen1(byte) + writen2(byte, byte) + writen4(byte, byte, byte, byte) + writen5(byte, byte, byte, byte, byte) + atEndOfEncode() +} + +// encDriver abstracts the actual codec (binc vs msgpack, etc) +type encDriver interface { + // IsBuiltinType(rt uintptr) bool + EncodeBuiltin(rt uintptr, v interface{}) + EncodeNil() + EncodeInt(i int64) + EncodeUint(i uint64) + EncodeBool(b bool) + EncodeFloat32(f float32) + EncodeFloat64(f float64) + // encodeExtPreamble(xtag byte, length int) + EncodeRawExt(re *RawExt, e *Encoder) + EncodeExt(v interface{}, xtag uint64, ext Ext, e *Encoder) + WriteArrayStart(length int) + WriteArrayElem() + WriteArrayEnd() + WriteMapStart(length int) + WriteMapElemKey() + WriteMapElemValue() + WriteMapEnd() + EncodeString(c charEncoding, v string) + EncodeSymbol(v string) + EncodeStringBytes(c charEncoding, v []byte) + //TODO + //encBignum(f *big.Int) + //encStringRunes(c charEncoding, v []rune) + + reset() + atEndOfEncode() +} + +type ioEncStringWriter interface { + WriteString(s string) (n int, err error) +} + +type ioEncFlusher interface { + Flush() error +} + +type encDriverAsis interface { + EncodeAsis(v []byte) +} + +// type encNoSeparator struct{} +// func (_ encNoSeparator) EncodeEnd() {} + +type encDriverNoopContainerWriter struct{} + +func (_ encDriverNoopContainerWriter) WriteArrayStart(length int) {} +func (_ encDriverNoopContainerWriter) WriteArrayElem() {} +func (_ encDriverNoopContainerWriter) WriteArrayEnd() {} +func (_ encDriverNoopContainerWriter) WriteMapStart(length int) {} +func (_ encDriverNoopContainerWriter) WriteMapElemKey() {} +func (_ encDriverNoopContainerWriter) WriteMapElemValue() {} +func (_ encDriverNoopContainerWriter) WriteMapEnd() {} +func (_ encDriverNoopContainerWriter) atEndOfEncode() {} + +// type ioEncWriterWriter interface { +// WriteByte(c byte) error +// WriteString(s string) (n int, err error) +// Write(p []byte) (n int, err error) +// } + +type EncodeOptions struct { + // Encode a struct as an array, and not as a map + StructToArray bool + + // Canonical representation means that encoding a value will always result in the same + // sequence of bytes. + // + // This only affects maps, as the iteration order for maps is random. + // + // The implementation MAY use the natural sort order for the map keys if possible: + // + // - If there is a natural sort order (ie for number, bool, string or []byte keys), + // then the map keys are first sorted in natural order and then written + // with corresponding map values to the strema. + // - If there is no natural sort order, then the map keys will first be + // encoded into []byte, and then sorted, + // before writing the sorted keys and the corresponding map values to the stream. + // + Canonical bool + + // CheckCircularRef controls whether we check for circular references + // and error fast during an encode. + // + // If enabled, an error is received if a pointer to a struct + // references itself either directly or through one of its fields (iteratively). + // + // This is opt-in, as there may be a performance hit to checking circular references. + CheckCircularRef bool + + // RecursiveEmptyCheck controls whether we descend into interfaces, structs and pointers + // when checking if a value is empty. + // + // Note that this may make OmitEmpty more expensive, as it incurs a lot more reflect calls. + RecursiveEmptyCheck bool + + // Raw controls whether we encode Raw values. + // This is a "dangerous" option and must be explicitly set. + // If set, we blindly encode Raw values as-is, without checking + // if they are a correct representation of a value in that format. + // If unset, we error out. + Raw bool + + // AsSymbols defines what should be encoded as symbols. + // + // Encoding as symbols can reduce the encoded size significantly. + // + // However, during decoding, each string to be encoded as a symbol must + // be checked to see if it has been seen before. Consequently, encoding time + // will increase if using symbols, because string comparisons has a clear cost. + // + // Sample values: + // AsSymbolNone + // AsSymbolAll + // AsSymbolMapStringKeys + // AsSymbolMapStringKeysFlag | AsSymbolStructFieldNameFlag + AsSymbols AsSymbolFlag + + // WriterBufferSize is the size of the buffer used when writing. + // + // if > 0, we use a smart buffer internally for performance purposes. + WriterBufferSize int +} + +// --------------------------------------------- + +type simpleIoEncWriter struct { + io.Writer +} + +// type bufIoEncWriter struct { +// w io.Writer +// buf []byte +// err error +// } + +// func (x *bufIoEncWriter) Write(b []byte) (n int, err error) { +// if x.err != nil { +// return 0, x.err +// } +// if cap(x.buf)-len(x.buf) >= len(b) { +// x.buf = append(x.buf, b) +// return len(b), nil +// } +// n, err = x.w.Write(x.buf) +// if err != nil { +// x.err = err +// return 0, x.err +// } +// n, err = x.w.Write(b) +// x.err = err +// return +// } + +// ioEncWriter implements encWriter and can write to an io.Writer implementation +type ioEncWriter struct { + w io.Writer + ww io.Writer + bw io.ByteWriter + sw ioEncStringWriter + fw ioEncFlusher + b [8]byte +} + +func (z *ioEncWriter) WriteByte(b byte) (err error) { + // x.bs[0] = b + // _, err = x.ww.Write(x.bs[:]) + z.b[0] = b + _, err = z.w.Write(z.b[:1]) + return +} + +func (z *ioEncWriter) WriteString(s string) (n int, err error) { + return z.w.Write(bytesView(s)) +} + +func (z *ioEncWriter) writeb(bs []byte) { + // if len(bs) == 0 { + // return + // } + if _, err := z.ww.Write(bs); err != nil { + panic(err) + } +} + +func (z *ioEncWriter) writestr(s string) { + // if len(s) == 0 { + // return + // } + if _, err := z.sw.WriteString(s); err != nil { + panic(err) + } +} + +func (z *ioEncWriter) writen1(b byte) { + if err := z.bw.WriteByte(b); err != nil { + panic(err) + } +} + +func (z *ioEncWriter) writen2(b1, b2 byte) { + var err error + if err = z.bw.WriteByte(b1); err == nil { + if err = z.bw.WriteByte(b2); err == nil { + return + } + } + panic(err) +} + +func (z *ioEncWriter) writen4(b1, b2, b3, b4 byte) { + z.b[0], z.b[1], z.b[2], z.b[3] = b1, b2, b3, b4 + if _, err := z.ww.Write(z.b[:4]); err != nil { + panic(err) + } +} + +func (z *ioEncWriter) writen5(b1, b2, b3, b4, b5 byte) { + z.b[0], z.b[1], z.b[2], z.b[3], z.b[4] = b1, b2, b3, b4, b5 + if _, err := z.ww.Write(z.b[:5]); err != nil { + panic(err) + } +} + +func (z *ioEncWriter) atEndOfEncode() { + if z.fw != nil { + z.fw.Flush() + } +} + +// ---------------------------------------- + +// bytesEncWriter implements encWriter and can write to an byte slice. +// It is used by Marshal function. +type bytesEncWriter struct { + b []byte + c int // cursor + out *[]byte // write out on atEndOfEncode +} + +func (z *bytesEncWriter) writeb(s []byte) { + oc, a := z.growNoAlloc(len(s)) + if a { + z.growAlloc(len(s), oc) + } + copy(z.b[oc:], s) +} + +func (z *bytesEncWriter) writestr(s string) { + oc, a := z.growNoAlloc(len(s)) + if a { + z.growAlloc(len(s), oc) + } + copy(z.b[oc:], s) +} + +func (z *bytesEncWriter) writen1(b1 byte) { + oc, a := z.growNoAlloc(1) + if a { + z.growAlloc(1, oc) + } + z.b[oc] = b1 +} + +func (z *bytesEncWriter) writen2(b1, b2 byte) { + oc, a := z.growNoAlloc(2) + if a { + z.growAlloc(2, oc) + } + z.b[oc+1] = b2 + z.b[oc] = b1 +} + +func (z *bytesEncWriter) writen4(b1, b2, b3, b4 byte) { + oc, a := z.growNoAlloc(4) + if a { + z.growAlloc(4, oc) + } + z.b[oc+3] = b4 + z.b[oc+2] = b3 + z.b[oc+1] = b2 + z.b[oc] = b1 +} + +func (z *bytesEncWriter) writen5(b1, b2, b3, b4, b5 byte) { + oc, a := z.growNoAlloc(5) + if a { + z.growAlloc(5, oc) + } + z.b[oc+4] = b5 + z.b[oc+3] = b4 + z.b[oc+2] = b3 + z.b[oc+1] = b2 + z.b[oc] = b1 +} + +func (z *bytesEncWriter) atEndOfEncode() { + *(z.out) = z.b[:z.c] +} + +// have a growNoalloc(n int), which can be inlined. +// if allocation is needed, then call growAlloc(n int) + +func (z *bytesEncWriter) growNoAlloc(n int) (oldcursor int, allocNeeded bool) { + oldcursor = z.c + z.c = z.c + n + if z.c > len(z.b) { + if z.c > cap(z.b) { + allocNeeded = true + } else { + z.b = z.b[:cap(z.b)] + } + } + return +} + +func (z *bytesEncWriter) growAlloc(n int, oldcursor int) { + // appendslice logic (if cap < 1024, *2, else *1.25): more expensive. many copy calls. + // bytes.Buffer model (2*cap + n): much better + // bs := make([]byte, 2*cap(z.b)+n) + bs := make([]byte, growCap(cap(z.b), 1, n)) + copy(bs, z.b[:oldcursor]) + z.b = bs +} + +// --------------------------------------------- + +func (e *Encoder) builtin(f *codecFnInfo, rv reflect.Value) { + e.e.EncodeBuiltin(f.ti.rtid, rv2i(rv)) +} + +func (e *Encoder) raw(f *codecFnInfo, rv reflect.Value) { + e.rawBytes(rv2i(rv).(Raw)) +} + +func (e *Encoder) rawExt(f *codecFnInfo, rv reflect.Value) { + // rev := rv2i(rv).(RawExt) + // e.e.EncodeRawExt(&rev, e) + var re *RawExt + if rv.CanAddr() { + re = rv2i(rv.Addr()).(*RawExt) + } else { + rev := rv2i(rv).(RawExt) + re = &rev + } + e.e.EncodeRawExt(re, e) +} + +func (e *Encoder) ext(f *codecFnInfo, rv reflect.Value) { + // if this is a struct|array and it was addressable, then pass the address directly (not the value) + if k := rv.Kind(); (k == reflect.Struct || k == reflect.Array) && rv.CanAddr() { + rv = rv.Addr() + } + e.e.EncodeExt(rv2i(rv), f.xfTag, f.xfFn, e) +} + +func (e *Encoder) getValueForMarshalInterface(rv reflect.Value, indir int8) (v interface{}, proceed bool) { + if indir == 0 { + v = rv2i(rv) + } else if indir == -1 { + // If a non-pointer was passed to Encode(), then that value is not addressable. + // Take addr if addressable, else copy value to an addressable value. + if rv.CanAddr() { + v = rv2i(rv.Addr()) + } else { + rv2 := reflect.New(rv.Type()) + rv2.Elem().Set(rv) + v = rv2i(rv2) + } + } else { + for j := int8(0); j < indir; j++ { + if rv.IsNil() { + e.e.EncodeNil() + return + } + rv = rv.Elem() + } + v = rv2i(rv) + } + return v, true +} + +func (e *Encoder) selferMarshal(f *codecFnInfo, rv reflect.Value) { + if v, proceed := e.getValueForMarshalInterface(rv, f.ti.csIndir); proceed { + v.(Selfer).CodecEncodeSelf(e) + } +} + +func (e *Encoder) binaryMarshal(f *codecFnInfo, rv reflect.Value) { + if v, proceed := e.getValueForMarshalInterface(rv, f.ti.bmIndir); proceed { + bs, fnerr := v.(encoding.BinaryMarshaler).MarshalBinary() + e.marshal(bs, fnerr, false, c_RAW) + } +} + +func (e *Encoder) textMarshal(f *codecFnInfo, rv reflect.Value) { + if v, proceed := e.getValueForMarshalInterface(rv, f.ti.tmIndir); proceed { + bs, fnerr := v.(encoding.TextMarshaler).MarshalText() + e.marshal(bs, fnerr, false, c_UTF8) + } +} + +func (e *Encoder) jsonMarshal(f *codecFnInfo, rv reflect.Value) { + if v, proceed := e.getValueForMarshalInterface(rv, f.ti.jmIndir); proceed { + bs, fnerr := v.(jsonMarshaler).MarshalJSON() + e.marshal(bs, fnerr, true, c_UTF8) + } +} + +func (e *Encoder) kBool(f *codecFnInfo, rv reflect.Value) { + e.e.EncodeBool(rv.Bool()) +} + +func (e *Encoder) kString(f *codecFnInfo, rv reflect.Value) { + e.e.EncodeString(c_UTF8, rv.String()) +} + +func (e *Encoder) kFloat64(f *codecFnInfo, rv reflect.Value) { + e.e.EncodeFloat64(rv.Float()) +} + +func (e *Encoder) kFloat32(f *codecFnInfo, rv reflect.Value) { + e.e.EncodeFloat32(float32(rv.Float())) +} + +func (e *Encoder) kInt(f *codecFnInfo, rv reflect.Value) { + e.e.EncodeInt(rv.Int()) +} + +func (e *Encoder) kUint(f *codecFnInfo, rv reflect.Value) { + e.e.EncodeUint(rv.Uint()) +} + +func (e *Encoder) kInvalid(f *codecFnInfo, rv reflect.Value) { + e.e.EncodeNil() +} + +func (e *Encoder) kErr(f *codecFnInfo, rv reflect.Value) { + e.errorf("unsupported kind %s, for %#v", rv.Kind(), rv) +} + +func (e *Encoder) kSlice(f *codecFnInfo, rv reflect.Value) { + ti := f.ti + ee := e.e + // array may be non-addressable, so we have to manage with care + // (don't call rv.Bytes, rv.Slice, etc). + // E.g. type struct S{B [2]byte}; + // Encode(S{}) will bomb on "panic: slice of unaddressable array". + if f.seq != seqTypeArray { + if rv.IsNil() { + ee.EncodeNil() + return + } + // If in this method, then there was no extension function defined. + // So it's okay to treat as []byte. + if ti.rtid == uint8SliceTypId { + ee.EncodeStringBytes(c_RAW, rv.Bytes()) + return + } + } + elemsep := e.hh.hasElemSeparators() + rtelem := ti.rt.Elem() + l := rv.Len() + if ti.rtid == uint8SliceTypId || rtelem.Kind() == reflect.Uint8 { + switch f.seq { + case seqTypeArray: + if rv.CanAddr() { + ee.EncodeStringBytes(c_RAW, rv.Slice(0, l).Bytes()) + } else { + var bs []byte + if l <= cap(e.b) { + bs = e.b[:l] + } else { + bs = make([]byte, l) + } + reflect.Copy(reflect.ValueOf(bs), rv) + ee.EncodeStringBytes(c_RAW, bs) + } + return + case seqTypeSlice: + ee.EncodeStringBytes(c_RAW, rv.Bytes()) + return + } + } + if ti.rtid == uint8SliceTypId && f.seq == seqTypeChan { + bs := e.b[:0] + // do not use range, so that the number of elements encoded + // does not change, and encoding does not hang waiting on someone to close chan. + // for b := range rv2i(rv).(<-chan byte) { bs = append(bs, b) } + ch := rv2i(rv).(<-chan byte) + for i := 0; i < l; i++ { + bs = append(bs, <-ch) + } + ee.EncodeStringBytes(c_RAW, bs) + return + } + + if ti.mbs { + if l%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", l) + return + } + ee.WriteMapStart(l / 2) + } else { + ee.WriteArrayStart(l) + } + + if l > 0 { + var fn *codecFn + var recognizedVtyp = useLookupRecognizedTypes && isRecognizedRtidOrPtr(rt2id(rtelem)) + if !recognizedVtyp { + for rtelem.Kind() == reflect.Ptr { + rtelem = rtelem.Elem() + } + // if kind is reflect.Interface, do not pre-determine the + // encoding type, because preEncodeValue may break it down to + // a concrete type and kInterface will bomb. + if rtelem.Kind() != reflect.Interface { + fn = e.cf.get(rtelem, true, true) + } + } + // TODO: Consider perf implication of encoding odd index values as symbols if type is string + for j := 0; j < l; j++ { + if elemsep { + if ti.mbs { + if j%2 == 0 { + ee.WriteMapElemKey() + } else { + ee.WriteMapElemValue() + } + } else { + ee.WriteArrayElem() + } + } + if f.seq == seqTypeChan { + if rv2, ok2 := rv.Recv(); ok2 { + if useLookupRecognizedTypes && recognizedVtyp { + e.encode(rv2i(rv2)) + } else { + e.encodeValue(rv2, fn, true) + } + } else { + ee.EncodeNil() // WE HAVE TO DO SOMETHING, so nil if nothing received. + } + } else { + if useLookupRecognizedTypes && recognizedVtyp { + e.encode(rv2i(rv.Index(j))) + } else { + e.encodeValue(rv.Index(j), fn, true) + } + } + } + } + + if ti.mbs { + ee.WriteMapEnd() + } else { + ee.WriteArrayEnd() + } +} + +func (e *Encoder) kStructNoOmitempty(f *codecFnInfo, rv reflect.Value) { + fti := f.ti + elemsep := e.hh.hasElemSeparators() + tisfi := fti.sfip + toMap := !(fti.toArray || e.h.StructToArray) + if toMap { + tisfi = fti.sfi + } + ee := e.e + + sfn := structFieldNode{v: rv, update: false} + if toMap { + ee.WriteMapStart(len(tisfi)) + // asSymbols := e.h.AsSymbols&AsSymbolStructFieldNameFlag != 0 + asSymbols := e.h.AsSymbols == AsSymbolDefault || e.h.AsSymbols&AsSymbolStructFieldNameFlag != 0 + if !elemsep { + for _, si := range tisfi { + if asSymbols { + ee.EncodeSymbol(si.encName) + } else { + ee.EncodeString(c_UTF8, si.encName) + } + e.encodeValue(sfn.field(si), nil, true) + } + } else { + for _, si := range tisfi { + ee.WriteMapElemKey() + if asSymbols { + ee.EncodeSymbol(si.encName) + } else { + ee.EncodeString(c_UTF8, si.encName) + } + ee.WriteMapElemValue() + e.encodeValue(sfn.field(si), nil, true) + } + } + ee.WriteMapEnd() + } else { + ee.WriteArrayStart(len(tisfi)) + if !elemsep { + for _, si := range tisfi { + e.encodeValue(sfn.field(si), nil, true) + } + } else { + for _, si := range tisfi { + ee.WriteArrayElem() + e.encodeValue(sfn.field(si), nil, true) + } + } + ee.WriteArrayEnd() + } +} + +func (e *Encoder) kStruct(f *codecFnInfo, rv reflect.Value) { + fti := f.ti + elemsep := e.hh.hasElemSeparators() + tisfi := fti.sfip + toMap := !(fti.toArray || e.h.StructToArray) + // if toMap, use the sorted array. If toArray, use unsorted array (to match sequence in struct) + if toMap { + tisfi = fti.sfi + } + newlen := len(fti.sfi) + ee := e.e + + // Use sync.Pool to reduce allocating slices unnecessarily. + // The cost of sync.Pool is less than the cost of new allocation. + // + // Each element of the array pools one of encStructPool(8|16|32|64). + // It allows the re-use of slices up to 64 in length. + // A performance cost of encoding structs was collecting + // which values were empty and should be omitted. + // We needed slices of reflect.Value and string to collect them. + // This shared pool reduces the amount of unnecessary creation we do. + // The cost is that of locking sometimes, but sync.Pool is efficient + // enough to reduce thread contention. + + var spool *sync.Pool + var poolv interface{} + var fkvs []stringRv + if newlen <= 8 { + spool, poolv = pool.stringRv8() + fkvs = poolv.(*[8]stringRv)[:newlen] + } else if newlen <= 16 { + spool, poolv = pool.stringRv16() + fkvs = poolv.(*[16]stringRv)[:newlen] + } else if newlen <= 32 { + spool, poolv = pool.stringRv32() + fkvs = poolv.(*[32]stringRv)[:newlen] + } else if newlen <= 64 { + spool, poolv = pool.stringRv64() + fkvs = poolv.(*[64]stringRv)[:newlen] + } else if newlen <= 128 { + spool, poolv = pool.stringRv128() + fkvs = poolv.(*[128]stringRv)[:newlen] + } else { + fkvs = make([]stringRv, newlen) + } + + newlen = 0 + var kv stringRv + recur := e.h.RecursiveEmptyCheck + sfn := structFieldNode{v: rv, update: false} + for _, si := range tisfi { + // kv.r = si.field(rv, false) + kv.r = sfn.field(si) + if toMap { + if si.omitEmpty && isEmptyValue(kv.r, recur, recur) { + continue + } + kv.v = si.encName + } else { + // use the zero value. + // if a reference or struct, set to nil (so you do not output too much) + if si.omitEmpty && isEmptyValue(kv.r, recur, recur) { + switch kv.r.Kind() { + case reflect.Struct, reflect.Interface, reflect.Ptr, reflect.Array, reflect.Map, reflect.Slice: + kv.r = reflect.Value{} //encode as nil + } + } + } + fkvs[newlen] = kv + newlen++ + } + + if toMap { + ee.WriteMapStart(newlen) + // asSymbols := e.h.AsSymbols&AsSymbolStructFieldNameFlag != 0 + asSymbols := e.h.AsSymbols == AsSymbolDefault || e.h.AsSymbols&AsSymbolStructFieldNameFlag != 0 + if !elemsep { + for j := 0; j < newlen; j++ { + kv = fkvs[j] + if asSymbols { + ee.EncodeSymbol(kv.v) + } else { + ee.EncodeString(c_UTF8, kv.v) + } + e.encodeValue(kv.r, nil, true) + } + } else { + for j := 0; j < newlen; j++ { + kv = fkvs[j] + ee.WriteMapElemKey() + if asSymbols { + ee.EncodeSymbol(kv.v) + } else { + ee.EncodeString(c_UTF8, kv.v) + } + ee.WriteMapElemValue() + e.encodeValue(kv.r, nil, true) + } + } + ee.WriteMapEnd() + } else { + ee.WriteArrayStart(newlen) + if !elemsep { + for j := 0; j < newlen; j++ { + e.encodeValue(fkvs[j].r, nil, true) + } + } else { + for j := 0; j < newlen; j++ { + ee.WriteArrayElem() + e.encodeValue(fkvs[j].r, nil, true) + } + } + ee.WriteArrayEnd() + } + + // do not use defer. Instead, use explicit pool return at end of function. + // defer has a cost we are trying to avoid. + // If there is a panic and these slices are not returned, it is ok. + if spool != nil { + spool.Put(poolv) + } +} + +func (e *Encoder) kMap(f *codecFnInfo, rv reflect.Value) { + ee := e.e + if rv.IsNil() { + ee.EncodeNil() + return + } + + l := rv.Len() + ee.WriteMapStart(l) + elemsep := e.hh.hasElemSeparators() + if l == 0 { + ee.WriteMapEnd() + return + } + var asSymbols bool + // determine the underlying key and val encFn's for the map. + // This eliminates some work which is done for each loop iteration i.e. + // rv.Type(), ref.ValueOf(rt).Pointer(), then check map/list for fn. + // + // However, if kind is reflect.Interface, do not pre-determine the + // encoding type, because preEncodeValue may break it down to + // a concrete type and kInterface will bomb. + var keyFn, valFn *codecFn + ti := f.ti + rtkey0 := ti.rt.Key() + rtkey := rtkey0 + rtval0 := ti.rt.Elem() + rtval := rtval0 + rtkeyid := rt2id(rtkey0) + rtvalid := rt2id(rtval0) + for rtval.Kind() == reflect.Ptr { + rtval = rtval.Elem() + } + if rtval.Kind() != reflect.Interface { + valFn = e.cf.get(rtval, true, true) + } + mks := rv.MapKeys() + + if e.h.Canonical { + e.kMapCanonical(rtkey, rv, mks, valFn, asSymbols) + ee.WriteMapEnd() + return + } + + var recognizedKtyp, recognizedVtyp bool + var keyTypeIsString = rtkeyid == stringTypId + if keyTypeIsString { + asSymbols = e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 + } else { + if useLookupRecognizedTypes { + if recognizedKtyp = isRecognizedRtidOrPtr(rtkeyid); recognizedKtyp { + goto LABEL1 + } + } + for rtkey.Kind() == reflect.Ptr { + rtkey = rtkey.Elem() + } + if rtkey.Kind() != reflect.Interface { + rtkeyid = rt2id(rtkey) + keyFn = e.cf.get(rtkey, true, true) + } + } + + // for j, lmks := 0, len(mks); j < lmks; j++ { +LABEL1: + recognizedVtyp = useLookupRecognizedTypes && isRecognizedRtidOrPtr(rtvalid) + for j := range mks { + if elemsep { + ee.WriteMapElemKey() + } + if keyTypeIsString { + if asSymbols { + ee.EncodeSymbol(mks[j].String()) + } else { + ee.EncodeString(c_UTF8, mks[j].String()) + } + } else if useLookupRecognizedTypes && recognizedKtyp { + e.encode(rv2i(mks[j])) + } else { + e.encodeValue(mks[j], keyFn, true) + } + if elemsep { + ee.WriteMapElemValue() + } + if useLookupRecognizedTypes && recognizedVtyp { + e.encode(rv2i(rv.MapIndex(mks[j]))) + } else { + e.encodeValue(rv.MapIndex(mks[j]), valFn, true) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) kMapCanonical(rtkey reflect.Type, rv reflect.Value, mks []reflect.Value, valFn *codecFn, asSymbols bool) { + ee := e.e + elemsep := e.hh.hasElemSeparators() + // we previously did out-of-band if an extension was registered. + // This is not necessary, as the natural kind is sufficient for ordering. + + // WHAT IS THIS? rtkeyid can never be a []uint8, per spec + // if rtkeyid == uint8SliceTypId { + // mksv := make([]bytesRv, len(mks)) + // for i, k := range mks { + // v := &mksv[i] + // v.r = k + // v.v = k.Bytes() + // } + // sort.Sort(bytesRvSlice(mksv)) + // for i := range mksv { + // if elemsep { + // ee.WriteMapElemKey() + // } + // ee.EncodeStringBytes(c_RAW, mksv[i].v) + // if elemsep { + // ee.WriteMapElemValue() + // } + // e.encodeValue(rv.MapIndex(mksv[i].r), valFn, true) + // } + // return + // } + + switch rtkey.Kind() { + case reflect.Bool: + mksv := make([]boolRv, len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = k.Bool() + } + sort.Sort(boolRvSlice(mksv)) + for i := range mksv { + if elemsep { + ee.WriteMapElemKey() + } + ee.EncodeBool(mksv[i].v) + if elemsep { + ee.WriteMapElemValue() + } + e.encodeValue(rv.MapIndex(mksv[i].r), valFn, true) + } + case reflect.String: + mksv := make([]stringRv, len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = k.String() + } + sort.Sort(stringRvSlice(mksv)) + for i := range mksv { + if elemsep { + ee.WriteMapElemKey() + } + if asSymbols { + ee.EncodeSymbol(mksv[i].v) + } else { + ee.EncodeString(c_UTF8, mksv[i].v) + } + if elemsep { + ee.WriteMapElemValue() + } + e.encodeValue(rv.MapIndex(mksv[i].r), valFn, true) + } + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint, reflect.Uintptr: + mksv := make([]uintRv, len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = k.Uint() + } + sort.Sort(uintRvSlice(mksv)) + for i := range mksv { + if elemsep { + ee.WriteMapElemKey() + } + ee.EncodeUint(mksv[i].v) + if elemsep { + ee.WriteMapElemValue() + } + e.encodeValue(rv.MapIndex(mksv[i].r), valFn, true) + } + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + mksv := make([]intRv, len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = k.Int() + } + sort.Sort(intRvSlice(mksv)) + for i := range mksv { + if elemsep { + ee.WriteMapElemKey() + } + ee.EncodeInt(mksv[i].v) + if elemsep { + ee.WriteMapElemValue() + } + e.encodeValue(rv.MapIndex(mksv[i].r), valFn, true) + } + case reflect.Float32: + mksv := make([]floatRv, len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = k.Float() + } + sort.Sort(floatRvSlice(mksv)) + for i := range mksv { + if elemsep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(float32(mksv[i].v)) + if elemsep { + ee.WriteMapElemValue() + } + e.encodeValue(rv.MapIndex(mksv[i].r), valFn, true) + } + case reflect.Float64: + mksv := make([]floatRv, len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = k.Float() + } + sort.Sort(floatRvSlice(mksv)) + for i := range mksv { + if elemsep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(mksv[i].v) + if elemsep { + ee.WriteMapElemValue() + } + e.encodeValue(rv.MapIndex(mksv[i].r), valFn, true) + } + default: + // out-of-band + // first encode each key to a []byte first, then sort them, then record + var mksv []byte = make([]byte, 0, len(mks)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + mksbv := make([]bytesRv, len(mks)) + for i, k := range mks { + v := &mksbv[i] + l := len(mksv) + e2.MustEncode(k) + v.r = k + v.v = mksv[l:] + } + sort.Sort(bytesRvSlice(mksbv)) + for j := range mksbv { + if elemsep { + ee.WriteMapElemKey() + } + e.asis(mksbv[j].v) + if elemsep { + ee.WriteMapElemValue() + } + e.encodeValue(rv.MapIndex(mksbv[j].r), valFn, true) + } + } +} + +// // -------------------------------------------------- + +// An Encoder writes an object to an output stream in the codec format. +type Encoder struct { + // hopefully, reduce derefencing cost by laying the encWriter inside the Encoder + e encDriver + // NOTE: Encoder shouldn't call it's write methods, + // as the handler MAY need to do some coordination. + w encWriter + + hh Handle + h *BasicHandle + + // ---- cpu cache line boundary? + + wi ioEncWriter + wb bytesEncWriter + bw bufio.Writer + + // cr containerStateRecv + as encDriverAsis + // ---- cpu cache line boundary? + + ci set + err error + + b [scratchByteArrayLen]byte + cf codecFner +} + +// NewEncoder returns an Encoder for encoding into an io.Writer. +// +// For efficiency, Users are encouraged to pass in a memory buffered writer +// (eg bufio.Writer, bytes.Buffer). +func NewEncoder(w io.Writer, h Handle) *Encoder { + e := newEncoder(h) + e.Reset(w) + return e +} + +// NewEncoderBytes returns an encoder for encoding directly and efficiently +// into a byte slice, using zero-copying to temporary slices. +// +// It will potentially replace the output byte slice pointed to. +// After encoding, the out parameter contains the encoded contents. +func NewEncoderBytes(out *[]byte, h Handle) *Encoder { + e := newEncoder(h) + e.ResetBytes(out) + return e +} + +func newEncoder(h Handle) *Encoder { + e := &Encoder{hh: h, h: h.getBasicHandle()} + e.e = h.newEncDriver(e) + e.as, _ = e.e.(encDriverAsis) + // e.cr, _ = e.e.(containerStateRecv) + return e +} + +// Reset the Encoder with a new output stream. +// +// This accommodates using the state of the Encoder, +// where it has "cached" information about sub-engines. +func (e *Encoder) Reset(w io.Writer) { + var ok bool + e.wi.w = w + if e.h.WriterBufferSize > 0 { + bw := bufio.NewWriterSize(w, e.h.WriterBufferSize) + e.bw = *bw + e.wi.bw = &e.bw + e.wi.sw = &e.bw + e.wi.fw = &e.bw + e.wi.ww = &e.bw + } else { + if e.wi.bw, ok = w.(io.ByteWriter); !ok { + e.wi.bw = &e.wi + } + if e.wi.sw, ok = w.(ioEncStringWriter); !ok { + e.wi.sw = &e.wi + } + e.wi.fw, _ = w.(ioEncFlusher) + e.wi.ww = w + } + e.w = &e.wi + e.e.reset() + e.cf.reset(e.hh) + e.err = nil +} + +func (e *Encoder) ResetBytes(out *[]byte) { + in := *out + if in == nil { + in = make([]byte, defEncByteBufSize) + } + e.wb.b, e.wb.out, e.wb.c = in, out, 0 + e.w = &e.wb + e.e.reset() + e.cf.reset(e.hh) + e.err = nil +} + +// Encode writes an object into a stream. +// +// Encoding can be configured via the struct tag for the fields. +// The "codec" key in struct field's tag value is the key name, +// followed by an optional comma and options. +// Note that the "json" key is used in the absence of the "codec" key. +// +// To set an option on all fields (e.g. omitempty on all fields), you +// can create a field called _struct, and set flags on it. +// +// Struct values "usually" encode as maps. Each exported struct field is encoded unless: +// - the field's tag is "-", OR +// - the field is empty (empty or the zero value) and its tag specifies the "omitempty" option. +// +// When encoding as a map, the first string in the tag (before the comma) +// is the map key string to use when encoding. +// +// However, struct values may encode as arrays. This happens when: +// - StructToArray Encode option is set, OR +// - the tag on the _struct field sets the "toarray" option +// Note that omitempty is ignored when encoding struct values as arrays, +// as an entry must be encoded for each field, to maintain its position. +// +// Values with types that implement MapBySlice are encoded as stream maps. +// +// The empty values (for omitempty option) are false, 0, any nil pointer +// or interface value, and any array, slice, map, or string of length zero. +// +// Anonymous fields are encoded inline except: +// - the struct tag specifies a replacement name (first value) +// - the field is of an interface type +// +// Examples: +// +// // NOTE: 'json:' can be used as struct tag key, in place 'codec:' below. +// type MyStruct struct { +// _struct bool `codec:",omitempty"` //set omitempty for every field +// Field1 string `codec:"-"` //skip this field +// Field2 int `codec:"myName"` //Use key "myName" in encode stream +// Field3 int32 `codec:",omitempty"` //use key "Field3". Omit if empty. +// Field4 bool `codec:"f4,omitempty"` //use key "f4". Omit if empty. +// io.Reader //use key "Reader". +// MyStruct `codec:"my1" //use key "my1". +// MyStruct //inline it +// ... +// } +// +// type MyStruct struct { +// _struct bool `codec:",toarray"` //encode struct as an array +// } +// +// The mode of encoding is based on the type of the value. When a value is seen: +// - If a Selfer, call its CodecEncodeSelf method +// - If an extension is registered for it, call that extension function +// - If it implements encoding.(Binary|Text|JSON)Marshaler, call its Marshal(Binary|Text|JSON) method +// - Else encode it based on its reflect.Kind +// +// Note that struct field names and keys in map[string]XXX will be treated as symbols. +// Some formats support symbols (e.g. binc) and will properly encode the string +// only once in the stream, and use a tag to refer to it thereafter. +func (e *Encoder) Encode(v interface{}) (err error) { + defer panicToErrs2(&e.err, &err) + e.MustEncode(v) + return +} + +// MustEncode is like Encode, but panics if unable to Encode. +// This provides insight to the code location that triggered the error. +func (e *Encoder) MustEncode(v interface{}) { + if e.err != nil { + panic(e.err) + } + e.encode(v) + e.e.atEndOfEncode() + e.w.atEndOfEncode() +} + +func (e *Encoder) encode(iv interface{}) { + if iv == nil || definitelyNil(iv) { + e.e.EncodeNil() + return + } + if v, ok := iv.(Selfer); ok { + v.CodecEncodeSelf(e) + return + } + + switch v := iv.(type) { + // case nil: + // e.e.EncodeNil() + // case Selfer: + // v.CodecEncodeSelf(e) + case Raw: + e.rawBytes(v) + case reflect.Value: + e.encodeValue(v, nil, true) + + case string: + e.e.EncodeString(c_UTF8, v) + case bool: + e.e.EncodeBool(v) + case int: + e.e.EncodeInt(int64(v)) + case int8: + e.e.EncodeInt(int64(v)) + case int16: + e.e.EncodeInt(int64(v)) + case int32: + e.e.EncodeInt(int64(v)) + case int64: + e.e.EncodeInt(v) + case uint: + e.e.EncodeUint(uint64(v)) + case uint8: + e.e.EncodeUint(uint64(v)) + case uint16: + e.e.EncodeUint(uint64(v)) + case uint32: + e.e.EncodeUint(uint64(v)) + case uint64: + e.e.EncodeUint(v) + case uintptr: + e.e.EncodeUint(uint64(v)) + case float32: + e.e.EncodeFloat32(v) + case float64: + e.e.EncodeFloat64(v) + + case []uint8: + e.e.EncodeStringBytes(c_RAW, v) + + case *string: + e.e.EncodeString(c_UTF8, *v) + case *bool: + e.e.EncodeBool(*v) + case *int: + e.e.EncodeInt(int64(*v)) + case *int8: + e.e.EncodeInt(int64(*v)) + case *int16: + e.e.EncodeInt(int64(*v)) + case *int32: + e.e.EncodeInt(int64(*v)) + case *int64: + e.e.EncodeInt(*v) + case *uint: + e.e.EncodeUint(uint64(*v)) + case *uint8: + e.e.EncodeUint(uint64(*v)) + case *uint16: + e.e.EncodeUint(uint64(*v)) + case *uint32: + e.e.EncodeUint(uint64(*v)) + case *uint64: + e.e.EncodeUint(*v) + case *uintptr: + e.e.EncodeUint(uint64(*v)) + case *float32: + e.e.EncodeFloat32(*v) + case *float64: + e.e.EncodeFloat64(*v) + + case *[]uint8: + e.e.EncodeStringBytes(c_RAW, *v) + + default: + if !fastpathEncodeTypeSwitch(iv, e) { + e.encodeValue(reflect.ValueOf(iv), nil, false) + } + } +} + +func (e *Encoder) encodeValue(rv reflect.Value, fn *codecFn, checkFastpath bool) { + // if a valid fn is passed, it MUST BE for the dereferenced type of rv + var sptr uintptr +TOP: + switch rv.Kind() { + case reflect.Ptr: + if rv.IsNil() { + e.e.EncodeNil() + return + } + rv = rv.Elem() + if e.h.CheckCircularRef && rv.Kind() == reflect.Struct { + // TODO: Movable pointers will be an issue here. Future problem. + sptr = rv.UnsafeAddr() + break TOP + } + goto TOP + case reflect.Interface: + if rv.IsNil() { + e.e.EncodeNil() + return + } + rv = rv.Elem() + goto TOP + case reflect.Slice, reflect.Map: + if rv.IsNil() { + e.e.EncodeNil() + return + } + case reflect.Invalid, reflect.Func: + e.e.EncodeNil() + return + } + + if sptr != 0 && (&e.ci).add(sptr) { + e.errorf("circular reference found: # %d", sptr) + } + + if fn == nil { + rt := rv.Type() + // TODO: calling isRecognizedRtid here is a major slowdown + if false && useLookupRecognizedTypes && isRecognizedRtidOrPtr(rt2id(rt)) { + e.encode(rv2i(rv)) + return + } + // always pass checkCodecSelfer=true, in case T or ****T is passed, where *T is a Selfer + fn = e.cf.get(rt, checkFastpath, true) + } + fn.fe(e, &fn.i, rv) + if sptr != 0 { + (&e.ci).remove(sptr) + } +} + +func (e *Encoder) marshal(bs []byte, fnerr error, asis bool, c charEncoding) { + if fnerr != nil { + panic(fnerr) + } + if bs == nil { + e.e.EncodeNil() + } else if asis { + e.asis(bs) + } else { + e.e.EncodeStringBytes(c, bs) + } +} + +func (e *Encoder) asis(v []byte) { + if e.as == nil { + e.w.writeb(v) + } else { + e.as.EncodeAsis(v) + } +} + +func (e *Encoder) rawBytes(vv Raw) { + v := []byte(vv) + if !e.h.Raw { + e.errorf("Raw values cannot be encoded: %v", v) + } + if e.as == nil { + e.w.writeb(v) + } else { + e.as.EncodeAsis(v) + } +} + +func (e *Encoder) errorf(format string, params ...interface{}) { + err := fmt.Errorf(format, params...) + panic(err) +} diff --git a/vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/fast-path.generated.go b/vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/fast-path.generated.go new file mode 100644 index 00000000..69b11fcf --- /dev/null +++ b/vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/fast-path.generated.go @@ -0,0 +1,33034 @@ +// +build !notfastpath + +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// ************************************************************ +// DO NOT EDIT. +// THIS FILE IS AUTO-GENERATED from fast-path.go.tmpl +// ************************************************************ + +package codec + +// Fast path functions try to create a fast path encode or decode implementation +// for common maps and slices. +// +// We define the functions and register then in this single file +// so as not to pollute the encode.go and decode.go, and create a dependency in there. +// This file can be omitted without causing a build failure. +// +// The advantage of fast paths is: +// - Many calls bypass reflection altogether +// +// Currently support +// - slice of all builtin types, +// - map of all builtin types to string or interface value +// - symmetrical maps of all builtin types (e.g. str-str, uint8-uint8) +// This should provide adequate "typical" implementations. +// +// Note that fast track decode functions must handle values for which an address cannot be obtained. +// For example: +// m2 := map[string]int{} +// p2 := []interface{}{m2} +// // decoding into p2 will bomb if fast track functions do not treat like unaddressable. +// + +import ( + "reflect" + "sort" +) + +const fastpathEnabled = true + +type fastpathT struct{} + +var fastpathTV fastpathT + +type fastpathE struct { + rtid uintptr + rt reflect.Type + encfn func(*Encoder, *codecFnInfo, reflect.Value) + decfn func(*Decoder, *codecFnInfo, reflect.Value) +} + +type fastpathA [271]fastpathE + +func (x *fastpathA) index(rtid uintptr) int { + // use binary search to grab the index (adapted from sort/search.go) + h, i, j := 0, 0, 271 // len(x) + for i < j { + h = i + (j-i)/2 + if x[h].rtid < rtid { + i = h + 1 + } else { + j = h + } + } + if i < 271 && x[i].rtid == rtid { + return i + } + return -1 +} + +type fastpathAslice []fastpathE + +func (x fastpathAslice) Len() int { return len(x) } +func (x fastpathAslice) Less(i, j int) bool { return x[i].rtid < x[j].rtid } +func (x fastpathAslice) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +var fastpathAV fastpathA + +// due to possible initialization loop error, make fastpath in an init() +func init() { + i := 0 + fn := func(v interface{}, + fe func(*Encoder, *codecFnInfo, reflect.Value), + fd func(*Decoder, *codecFnInfo, reflect.Value)) (f fastpathE) { + xrt := reflect.TypeOf(v) + xptr := rt2id(xrt) + if useLookupRecognizedTypes { + recognizedRtids = append(recognizedRtids, xptr) + recognizedRtidPtrs = append(recognizedRtidPtrs, rt2id(reflect.PtrTo(xrt))) + } + fastpathAV[i] = fastpathE{xptr, xrt, fe, fd} + i++ + return + } + + fn([]interface{}(nil), (*Encoder).fastpathEncSliceIntfR, (*Decoder).fastpathDecSliceIntfR) + fn([]string(nil), (*Encoder).fastpathEncSliceStringR, (*Decoder).fastpathDecSliceStringR) + fn([]float32(nil), (*Encoder).fastpathEncSliceFloat32R, (*Decoder).fastpathDecSliceFloat32R) + fn([]float64(nil), (*Encoder).fastpathEncSliceFloat64R, (*Decoder).fastpathDecSliceFloat64R) + fn([]uint(nil), (*Encoder).fastpathEncSliceUintR, (*Decoder).fastpathDecSliceUintR) + fn([]uint16(nil), (*Encoder).fastpathEncSliceUint16R, (*Decoder).fastpathDecSliceUint16R) + fn([]uint32(nil), (*Encoder).fastpathEncSliceUint32R, (*Decoder).fastpathDecSliceUint32R) + fn([]uint64(nil), (*Encoder).fastpathEncSliceUint64R, (*Decoder).fastpathDecSliceUint64R) + fn([]uintptr(nil), (*Encoder).fastpathEncSliceUintptrR, (*Decoder).fastpathDecSliceUintptrR) + fn([]int(nil), (*Encoder).fastpathEncSliceIntR, (*Decoder).fastpathDecSliceIntR) + fn([]int8(nil), (*Encoder).fastpathEncSliceInt8R, (*Decoder).fastpathDecSliceInt8R) + fn([]int16(nil), (*Encoder).fastpathEncSliceInt16R, (*Decoder).fastpathDecSliceInt16R) + fn([]int32(nil), (*Encoder).fastpathEncSliceInt32R, (*Decoder).fastpathDecSliceInt32R) + fn([]int64(nil), (*Encoder).fastpathEncSliceInt64R, (*Decoder).fastpathDecSliceInt64R) + fn([]bool(nil), (*Encoder).fastpathEncSliceBoolR, (*Decoder).fastpathDecSliceBoolR) + + fn(map[interface{}]interface{}(nil), (*Encoder).fastpathEncMapIntfIntfR, (*Decoder).fastpathDecMapIntfIntfR) + fn(map[interface{}]string(nil), (*Encoder).fastpathEncMapIntfStringR, (*Decoder).fastpathDecMapIntfStringR) + fn(map[interface{}]uint(nil), (*Encoder).fastpathEncMapIntfUintR, (*Decoder).fastpathDecMapIntfUintR) + fn(map[interface{}]uint8(nil), (*Encoder).fastpathEncMapIntfUint8R, (*Decoder).fastpathDecMapIntfUint8R) + fn(map[interface{}]uint16(nil), (*Encoder).fastpathEncMapIntfUint16R, (*Decoder).fastpathDecMapIntfUint16R) + fn(map[interface{}]uint32(nil), (*Encoder).fastpathEncMapIntfUint32R, (*Decoder).fastpathDecMapIntfUint32R) + fn(map[interface{}]uint64(nil), (*Encoder).fastpathEncMapIntfUint64R, (*Decoder).fastpathDecMapIntfUint64R) + fn(map[interface{}]uintptr(nil), (*Encoder).fastpathEncMapIntfUintptrR, (*Decoder).fastpathDecMapIntfUintptrR) + fn(map[interface{}]int(nil), (*Encoder).fastpathEncMapIntfIntR, (*Decoder).fastpathDecMapIntfIntR) + fn(map[interface{}]int8(nil), (*Encoder).fastpathEncMapIntfInt8R, (*Decoder).fastpathDecMapIntfInt8R) + fn(map[interface{}]int16(nil), (*Encoder).fastpathEncMapIntfInt16R, (*Decoder).fastpathDecMapIntfInt16R) + fn(map[interface{}]int32(nil), (*Encoder).fastpathEncMapIntfInt32R, (*Decoder).fastpathDecMapIntfInt32R) + fn(map[interface{}]int64(nil), (*Encoder).fastpathEncMapIntfInt64R, (*Decoder).fastpathDecMapIntfInt64R) + fn(map[interface{}]float32(nil), (*Encoder).fastpathEncMapIntfFloat32R, (*Decoder).fastpathDecMapIntfFloat32R) + fn(map[interface{}]float64(nil), (*Encoder).fastpathEncMapIntfFloat64R, (*Decoder).fastpathDecMapIntfFloat64R) + fn(map[interface{}]bool(nil), (*Encoder).fastpathEncMapIntfBoolR, (*Decoder).fastpathDecMapIntfBoolR) + fn(map[string]interface{}(nil), (*Encoder).fastpathEncMapStringIntfR, (*Decoder).fastpathDecMapStringIntfR) + fn(map[string]string(nil), (*Encoder).fastpathEncMapStringStringR, (*Decoder).fastpathDecMapStringStringR) + fn(map[string]uint(nil), (*Encoder).fastpathEncMapStringUintR, (*Decoder).fastpathDecMapStringUintR) + fn(map[string]uint8(nil), (*Encoder).fastpathEncMapStringUint8R, (*Decoder).fastpathDecMapStringUint8R) + fn(map[string]uint16(nil), (*Encoder).fastpathEncMapStringUint16R, (*Decoder).fastpathDecMapStringUint16R) + fn(map[string]uint32(nil), (*Encoder).fastpathEncMapStringUint32R, (*Decoder).fastpathDecMapStringUint32R) + fn(map[string]uint64(nil), (*Encoder).fastpathEncMapStringUint64R, (*Decoder).fastpathDecMapStringUint64R) + fn(map[string]uintptr(nil), (*Encoder).fastpathEncMapStringUintptrR, (*Decoder).fastpathDecMapStringUintptrR) + fn(map[string]int(nil), (*Encoder).fastpathEncMapStringIntR, (*Decoder).fastpathDecMapStringIntR) + fn(map[string]int8(nil), (*Encoder).fastpathEncMapStringInt8R, (*Decoder).fastpathDecMapStringInt8R) + fn(map[string]int16(nil), (*Encoder).fastpathEncMapStringInt16R, (*Decoder).fastpathDecMapStringInt16R) + fn(map[string]int32(nil), (*Encoder).fastpathEncMapStringInt32R, (*Decoder).fastpathDecMapStringInt32R) + fn(map[string]int64(nil), (*Encoder).fastpathEncMapStringInt64R, (*Decoder).fastpathDecMapStringInt64R) + fn(map[string]float32(nil), (*Encoder).fastpathEncMapStringFloat32R, (*Decoder).fastpathDecMapStringFloat32R) + fn(map[string]float64(nil), (*Encoder).fastpathEncMapStringFloat64R, (*Decoder).fastpathDecMapStringFloat64R) + fn(map[string]bool(nil), (*Encoder).fastpathEncMapStringBoolR, (*Decoder).fastpathDecMapStringBoolR) + fn(map[float32]interface{}(nil), (*Encoder).fastpathEncMapFloat32IntfR, (*Decoder).fastpathDecMapFloat32IntfR) + fn(map[float32]string(nil), (*Encoder).fastpathEncMapFloat32StringR, (*Decoder).fastpathDecMapFloat32StringR) + fn(map[float32]uint(nil), (*Encoder).fastpathEncMapFloat32UintR, (*Decoder).fastpathDecMapFloat32UintR) + fn(map[float32]uint8(nil), (*Encoder).fastpathEncMapFloat32Uint8R, (*Decoder).fastpathDecMapFloat32Uint8R) + fn(map[float32]uint16(nil), (*Encoder).fastpathEncMapFloat32Uint16R, (*Decoder).fastpathDecMapFloat32Uint16R) + fn(map[float32]uint32(nil), (*Encoder).fastpathEncMapFloat32Uint32R, (*Decoder).fastpathDecMapFloat32Uint32R) + fn(map[float32]uint64(nil), (*Encoder).fastpathEncMapFloat32Uint64R, (*Decoder).fastpathDecMapFloat32Uint64R) + fn(map[float32]uintptr(nil), (*Encoder).fastpathEncMapFloat32UintptrR, (*Decoder).fastpathDecMapFloat32UintptrR) + fn(map[float32]int(nil), (*Encoder).fastpathEncMapFloat32IntR, (*Decoder).fastpathDecMapFloat32IntR) + fn(map[float32]int8(nil), (*Encoder).fastpathEncMapFloat32Int8R, (*Decoder).fastpathDecMapFloat32Int8R) + fn(map[float32]int16(nil), (*Encoder).fastpathEncMapFloat32Int16R, (*Decoder).fastpathDecMapFloat32Int16R) + fn(map[float32]int32(nil), (*Encoder).fastpathEncMapFloat32Int32R, (*Decoder).fastpathDecMapFloat32Int32R) + fn(map[float32]int64(nil), (*Encoder).fastpathEncMapFloat32Int64R, (*Decoder).fastpathDecMapFloat32Int64R) + fn(map[float32]float32(nil), (*Encoder).fastpathEncMapFloat32Float32R, (*Decoder).fastpathDecMapFloat32Float32R) + fn(map[float32]float64(nil), (*Encoder).fastpathEncMapFloat32Float64R, (*Decoder).fastpathDecMapFloat32Float64R) + fn(map[float32]bool(nil), (*Encoder).fastpathEncMapFloat32BoolR, (*Decoder).fastpathDecMapFloat32BoolR) + fn(map[float64]interface{}(nil), (*Encoder).fastpathEncMapFloat64IntfR, (*Decoder).fastpathDecMapFloat64IntfR) + fn(map[float64]string(nil), (*Encoder).fastpathEncMapFloat64StringR, (*Decoder).fastpathDecMapFloat64StringR) + fn(map[float64]uint(nil), (*Encoder).fastpathEncMapFloat64UintR, (*Decoder).fastpathDecMapFloat64UintR) + fn(map[float64]uint8(nil), (*Encoder).fastpathEncMapFloat64Uint8R, (*Decoder).fastpathDecMapFloat64Uint8R) + fn(map[float64]uint16(nil), (*Encoder).fastpathEncMapFloat64Uint16R, (*Decoder).fastpathDecMapFloat64Uint16R) + fn(map[float64]uint32(nil), (*Encoder).fastpathEncMapFloat64Uint32R, (*Decoder).fastpathDecMapFloat64Uint32R) + fn(map[float64]uint64(nil), (*Encoder).fastpathEncMapFloat64Uint64R, (*Decoder).fastpathDecMapFloat64Uint64R) + fn(map[float64]uintptr(nil), (*Encoder).fastpathEncMapFloat64UintptrR, (*Decoder).fastpathDecMapFloat64UintptrR) + fn(map[float64]int(nil), (*Encoder).fastpathEncMapFloat64IntR, (*Decoder).fastpathDecMapFloat64IntR) + fn(map[float64]int8(nil), (*Encoder).fastpathEncMapFloat64Int8R, (*Decoder).fastpathDecMapFloat64Int8R) + fn(map[float64]int16(nil), (*Encoder).fastpathEncMapFloat64Int16R, (*Decoder).fastpathDecMapFloat64Int16R) + fn(map[float64]int32(nil), (*Encoder).fastpathEncMapFloat64Int32R, (*Decoder).fastpathDecMapFloat64Int32R) + fn(map[float64]int64(nil), (*Encoder).fastpathEncMapFloat64Int64R, (*Decoder).fastpathDecMapFloat64Int64R) + fn(map[float64]float32(nil), (*Encoder).fastpathEncMapFloat64Float32R, (*Decoder).fastpathDecMapFloat64Float32R) + fn(map[float64]float64(nil), (*Encoder).fastpathEncMapFloat64Float64R, (*Decoder).fastpathDecMapFloat64Float64R) + fn(map[float64]bool(nil), (*Encoder).fastpathEncMapFloat64BoolR, (*Decoder).fastpathDecMapFloat64BoolR) + fn(map[uint]interface{}(nil), (*Encoder).fastpathEncMapUintIntfR, (*Decoder).fastpathDecMapUintIntfR) + fn(map[uint]string(nil), (*Encoder).fastpathEncMapUintStringR, (*Decoder).fastpathDecMapUintStringR) + fn(map[uint]uint(nil), (*Encoder).fastpathEncMapUintUintR, (*Decoder).fastpathDecMapUintUintR) + fn(map[uint]uint8(nil), (*Encoder).fastpathEncMapUintUint8R, (*Decoder).fastpathDecMapUintUint8R) + fn(map[uint]uint16(nil), (*Encoder).fastpathEncMapUintUint16R, (*Decoder).fastpathDecMapUintUint16R) + fn(map[uint]uint32(nil), (*Encoder).fastpathEncMapUintUint32R, (*Decoder).fastpathDecMapUintUint32R) + fn(map[uint]uint64(nil), (*Encoder).fastpathEncMapUintUint64R, (*Decoder).fastpathDecMapUintUint64R) + fn(map[uint]uintptr(nil), (*Encoder).fastpathEncMapUintUintptrR, (*Decoder).fastpathDecMapUintUintptrR) + fn(map[uint]int(nil), (*Encoder).fastpathEncMapUintIntR, (*Decoder).fastpathDecMapUintIntR) + fn(map[uint]int8(nil), (*Encoder).fastpathEncMapUintInt8R, (*Decoder).fastpathDecMapUintInt8R) + fn(map[uint]int16(nil), (*Encoder).fastpathEncMapUintInt16R, (*Decoder).fastpathDecMapUintInt16R) + fn(map[uint]int32(nil), (*Encoder).fastpathEncMapUintInt32R, (*Decoder).fastpathDecMapUintInt32R) + fn(map[uint]int64(nil), (*Encoder).fastpathEncMapUintInt64R, (*Decoder).fastpathDecMapUintInt64R) + fn(map[uint]float32(nil), (*Encoder).fastpathEncMapUintFloat32R, (*Decoder).fastpathDecMapUintFloat32R) + fn(map[uint]float64(nil), (*Encoder).fastpathEncMapUintFloat64R, (*Decoder).fastpathDecMapUintFloat64R) + fn(map[uint]bool(nil), (*Encoder).fastpathEncMapUintBoolR, (*Decoder).fastpathDecMapUintBoolR) + fn(map[uint8]interface{}(nil), (*Encoder).fastpathEncMapUint8IntfR, (*Decoder).fastpathDecMapUint8IntfR) + fn(map[uint8]string(nil), (*Encoder).fastpathEncMapUint8StringR, (*Decoder).fastpathDecMapUint8StringR) + fn(map[uint8]uint(nil), (*Encoder).fastpathEncMapUint8UintR, (*Decoder).fastpathDecMapUint8UintR) + fn(map[uint8]uint8(nil), (*Encoder).fastpathEncMapUint8Uint8R, (*Decoder).fastpathDecMapUint8Uint8R) + fn(map[uint8]uint16(nil), (*Encoder).fastpathEncMapUint8Uint16R, (*Decoder).fastpathDecMapUint8Uint16R) + fn(map[uint8]uint32(nil), (*Encoder).fastpathEncMapUint8Uint32R, (*Decoder).fastpathDecMapUint8Uint32R) + fn(map[uint8]uint64(nil), (*Encoder).fastpathEncMapUint8Uint64R, (*Decoder).fastpathDecMapUint8Uint64R) + fn(map[uint8]uintptr(nil), (*Encoder).fastpathEncMapUint8UintptrR, (*Decoder).fastpathDecMapUint8UintptrR) + fn(map[uint8]int(nil), (*Encoder).fastpathEncMapUint8IntR, (*Decoder).fastpathDecMapUint8IntR) + fn(map[uint8]int8(nil), (*Encoder).fastpathEncMapUint8Int8R, (*Decoder).fastpathDecMapUint8Int8R) + fn(map[uint8]int16(nil), (*Encoder).fastpathEncMapUint8Int16R, (*Decoder).fastpathDecMapUint8Int16R) + fn(map[uint8]int32(nil), (*Encoder).fastpathEncMapUint8Int32R, (*Decoder).fastpathDecMapUint8Int32R) + fn(map[uint8]int64(nil), (*Encoder).fastpathEncMapUint8Int64R, (*Decoder).fastpathDecMapUint8Int64R) + fn(map[uint8]float32(nil), (*Encoder).fastpathEncMapUint8Float32R, (*Decoder).fastpathDecMapUint8Float32R) + fn(map[uint8]float64(nil), (*Encoder).fastpathEncMapUint8Float64R, (*Decoder).fastpathDecMapUint8Float64R) + fn(map[uint8]bool(nil), (*Encoder).fastpathEncMapUint8BoolR, (*Decoder).fastpathDecMapUint8BoolR) + fn(map[uint16]interface{}(nil), (*Encoder).fastpathEncMapUint16IntfR, (*Decoder).fastpathDecMapUint16IntfR) + fn(map[uint16]string(nil), (*Encoder).fastpathEncMapUint16StringR, (*Decoder).fastpathDecMapUint16StringR) + fn(map[uint16]uint(nil), (*Encoder).fastpathEncMapUint16UintR, (*Decoder).fastpathDecMapUint16UintR) + fn(map[uint16]uint8(nil), (*Encoder).fastpathEncMapUint16Uint8R, (*Decoder).fastpathDecMapUint16Uint8R) + fn(map[uint16]uint16(nil), (*Encoder).fastpathEncMapUint16Uint16R, (*Decoder).fastpathDecMapUint16Uint16R) + fn(map[uint16]uint32(nil), (*Encoder).fastpathEncMapUint16Uint32R, (*Decoder).fastpathDecMapUint16Uint32R) + fn(map[uint16]uint64(nil), (*Encoder).fastpathEncMapUint16Uint64R, (*Decoder).fastpathDecMapUint16Uint64R) + fn(map[uint16]uintptr(nil), (*Encoder).fastpathEncMapUint16UintptrR, (*Decoder).fastpathDecMapUint16UintptrR) + fn(map[uint16]int(nil), (*Encoder).fastpathEncMapUint16IntR, (*Decoder).fastpathDecMapUint16IntR) + fn(map[uint16]int8(nil), (*Encoder).fastpathEncMapUint16Int8R, (*Decoder).fastpathDecMapUint16Int8R) + fn(map[uint16]int16(nil), (*Encoder).fastpathEncMapUint16Int16R, (*Decoder).fastpathDecMapUint16Int16R) + fn(map[uint16]int32(nil), (*Encoder).fastpathEncMapUint16Int32R, (*Decoder).fastpathDecMapUint16Int32R) + fn(map[uint16]int64(nil), (*Encoder).fastpathEncMapUint16Int64R, (*Decoder).fastpathDecMapUint16Int64R) + fn(map[uint16]float32(nil), (*Encoder).fastpathEncMapUint16Float32R, (*Decoder).fastpathDecMapUint16Float32R) + fn(map[uint16]float64(nil), (*Encoder).fastpathEncMapUint16Float64R, (*Decoder).fastpathDecMapUint16Float64R) + fn(map[uint16]bool(nil), (*Encoder).fastpathEncMapUint16BoolR, (*Decoder).fastpathDecMapUint16BoolR) + fn(map[uint32]interface{}(nil), (*Encoder).fastpathEncMapUint32IntfR, (*Decoder).fastpathDecMapUint32IntfR) + fn(map[uint32]string(nil), (*Encoder).fastpathEncMapUint32StringR, (*Decoder).fastpathDecMapUint32StringR) + fn(map[uint32]uint(nil), (*Encoder).fastpathEncMapUint32UintR, (*Decoder).fastpathDecMapUint32UintR) + fn(map[uint32]uint8(nil), (*Encoder).fastpathEncMapUint32Uint8R, (*Decoder).fastpathDecMapUint32Uint8R) + fn(map[uint32]uint16(nil), (*Encoder).fastpathEncMapUint32Uint16R, (*Decoder).fastpathDecMapUint32Uint16R) + fn(map[uint32]uint32(nil), (*Encoder).fastpathEncMapUint32Uint32R, (*Decoder).fastpathDecMapUint32Uint32R) + fn(map[uint32]uint64(nil), (*Encoder).fastpathEncMapUint32Uint64R, (*Decoder).fastpathDecMapUint32Uint64R) + fn(map[uint32]uintptr(nil), (*Encoder).fastpathEncMapUint32UintptrR, (*Decoder).fastpathDecMapUint32UintptrR) + fn(map[uint32]int(nil), (*Encoder).fastpathEncMapUint32IntR, (*Decoder).fastpathDecMapUint32IntR) + fn(map[uint32]int8(nil), (*Encoder).fastpathEncMapUint32Int8R, (*Decoder).fastpathDecMapUint32Int8R) + fn(map[uint32]int16(nil), (*Encoder).fastpathEncMapUint32Int16R, (*Decoder).fastpathDecMapUint32Int16R) + fn(map[uint32]int32(nil), (*Encoder).fastpathEncMapUint32Int32R, (*Decoder).fastpathDecMapUint32Int32R) + fn(map[uint32]int64(nil), (*Encoder).fastpathEncMapUint32Int64R, (*Decoder).fastpathDecMapUint32Int64R) + fn(map[uint32]float32(nil), (*Encoder).fastpathEncMapUint32Float32R, (*Decoder).fastpathDecMapUint32Float32R) + fn(map[uint32]float64(nil), (*Encoder).fastpathEncMapUint32Float64R, (*Decoder).fastpathDecMapUint32Float64R) + fn(map[uint32]bool(nil), (*Encoder).fastpathEncMapUint32BoolR, (*Decoder).fastpathDecMapUint32BoolR) + fn(map[uint64]interface{}(nil), (*Encoder).fastpathEncMapUint64IntfR, (*Decoder).fastpathDecMapUint64IntfR) + fn(map[uint64]string(nil), (*Encoder).fastpathEncMapUint64StringR, (*Decoder).fastpathDecMapUint64StringR) + fn(map[uint64]uint(nil), (*Encoder).fastpathEncMapUint64UintR, (*Decoder).fastpathDecMapUint64UintR) + fn(map[uint64]uint8(nil), (*Encoder).fastpathEncMapUint64Uint8R, (*Decoder).fastpathDecMapUint64Uint8R) + fn(map[uint64]uint16(nil), (*Encoder).fastpathEncMapUint64Uint16R, (*Decoder).fastpathDecMapUint64Uint16R) + fn(map[uint64]uint32(nil), (*Encoder).fastpathEncMapUint64Uint32R, (*Decoder).fastpathDecMapUint64Uint32R) + fn(map[uint64]uint64(nil), (*Encoder).fastpathEncMapUint64Uint64R, (*Decoder).fastpathDecMapUint64Uint64R) + fn(map[uint64]uintptr(nil), (*Encoder).fastpathEncMapUint64UintptrR, (*Decoder).fastpathDecMapUint64UintptrR) + fn(map[uint64]int(nil), (*Encoder).fastpathEncMapUint64IntR, (*Decoder).fastpathDecMapUint64IntR) + fn(map[uint64]int8(nil), (*Encoder).fastpathEncMapUint64Int8R, (*Decoder).fastpathDecMapUint64Int8R) + fn(map[uint64]int16(nil), (*Encoder).fastpathEncMapUint64Int16R, (*Decoder).fastpathDecMapUint64Int16R) + fn(map[uint64]int32(nil), (*Encoder).fastpathEncMapUint64Int32R, (*Decoder).fastpathDecMapUint64Int32R) + fn(map[uint64]int64(nil), (*Encoder).fastpathEncMapUint64Int64R, (*Decoder).fastpathDecMapUint64Int64R) + fn(map[uint64]float32(nil), (*Encoder).fastpathEncMapUint64Float32R, (*Decoder).fastpathDecMapUint64Float32R) + fn(map[uint64]float64(nil), (*Encoder).fastpathEncMapUint64Float64R, (*Decoder).fastpathDecMapUint64Float64R) + fn(map[uint64]bool(nil), (*Encoder).fastpathEncMapUint64BoolR, (*Decoder).fastpathDecMapUint64BoolR) + fn(map[uintptr]interface{}(nil), (*Encoder).fastpathEncMapUintptrIntfR, (*Decoder).fastpathDecMapUintptrIntfR) + fn(map[uintptr]string(nil), (*Encoder).fastpathEncMapUintptrStringR, (*Decoder).fastpathDecMapUintptrStringR) + fn(map[uintptr]uint(nil), (*Encoder).fastpathEncMapUintptrUintR, (*Decoder).fastpathDecMapUintptrUintR) + fn(map[uintptr]uint8(nil), (*Encoder).fastpathEncMapUintptrUint8R, (*Decoder).fastpathDecMapUintptrUint8R) + fn(map[uintptr]uint16(nil), (*Encoder).fastpathEncMapUintptrUint16R, (*Decoder).fastpathDecMapUintptrUint16R) + fn(map[uintptr]uint32(nil), (*Encoder).fastpathEncMapUintptrUint32R, (*Decoder).fastpathDecMapUintptrUint32R) + fn(map[uintptr]uint64(nil), (*Encoder).fastpathEncMapUintptrUint64R, (*Decoder).fastpathDecMapUintptrUint64R) + fn(map[uintptr]uintptr(nil), (*Encoder).fastpathEncMapUintptrUintptrR, (*Decoder).fastpathDecMapUintptrUintptrR) + fn(map[uintptr]int(nil), (*Encoder).fastpathEncMapUintptrIntR, (*Decoder).fastpathDecMapUintptrIntR) + fn(map[uintptr]int8(nil), (*Encoder).fastpathEncMapUintptrInt8R, (*Decoder).fastpathDecMapUintptrInt8R) + fn(map[uintptr]int16(nil), (*Encoder).fastpathEncMapUintptrInt16R, (*Decoder).fastpathDecMapUintptrInt16R) + fn(map[uintptr]int32(nil), (*Encoder).fastpathEncMapUintptrInt32R, (*Decoder).fastpathDecMapUintptrInt32R) + fn(map[uintptr]int64(nil), (*Encoder).fastpathEncMapUintptrInt64R, (*Decoder).fastpathDecMapUintptrInt64R) + fn(map[uintptr]float32(nil), (*Encoder).fastpathEncMapUintptrFloat32R, (*Decoder).fastpathDecMapUintptrFloat32R) + fn(map[uintptr]float64(nil), (*Encoder).fastpathEncMapUintptrFloat64R, (*Decoder).fastpathDecMapUintptrFloat64R) + fn(map[uintptr]bool(nil), (*Encoder).fastpathEncMapUintptrBoolR, (*Decoder).fastpathDecMapUintptrBoolR) + fn(map[int]interface{}(nil), (*Encoder).fastpathEncMapIntIntfR, (*Decoder).fastpathDecMapIntIntfR) + fn(map[int]string(nil), (*Encoder).fastpathEncMapIntStringR, (*Decoder).fastpathDecMapIntStringR) + fn(map[int]uint(nil), (*Encoder).fastpathEncMapIntUintR, (*Decoder).fastpathDecMapIntUintR) + fn(map[int]uint8(nil), (*Encoder).fastpathEncMapIntUint8R, (*Decoder).fastpathDecMapIntUint8R) + fn(map[int]uint16(nil), (*Encoder).fastpathEncMapIntUint16R, (*Decoder).fastpathDecMapIntUint16R) + fn(map[int]uint32(nil), (*Encoder).fastpathEncMapIntUint32R, (*Decoder).fastpathDecMapIntUint32R) + fn(map[int]uint64(nil), (*Encoder).fastpathEncMapIntUint64R, (*Decoder).fastpathDecMapIntUint64R) + fn(map[int]uintptr(nil), (*Encoder).fastpathEncMapIntUintptrR, (*Decoder).fastpathDecMapIntUintptrR) + fn(map[int]int(nil), (*Encoder).fastpathEncMapIntIntR, (*Decoder).fastpathDecMapIntIntR) + fn(map[int]int8(nil), (*Encoder).fastpathEncMapIntInt8R, (*Decoder).fastpathDecMapIntInt8R) + fn(map[int]int16(nil), (*Encoder).fastpathEncMapIntInt16R, (*Decoder).fastpathDecMapIntInt16R) + fn(map[int]int32(nil), (*Encoder).fastpathEncMapIntInt32R, (*Decoder).fastpathDecMapIntInt32R) + fn(map[int]int64(nil), (*Encoder).fastpathEncMapIntInt64R, (*Decoder).fastpathDecMapIntInt64R) + fn(map[int]float32(nil), (*Encoder).fastpathEncMapIntFloat32R, (*Decoder).fastpathDecMapIntFloat32R) + fn(map[int]float64(nil), (*Encoder).fastpathEncMapIntFloat64R, (*Decoder).fastpathDecMapIntFloat64R) + fn(map[int]bool(nil), (*Encoder).fastpathEncMapIntBoolR, (*Decoder).fastpathDecMapIntBoolR) + fn(map[int8]interface{}(nil), (*Encoder).fastpathEncMapInt8IntfR, (*Decoder).fastpathDecMapInt8IntfR) + fn(map[int8]string(nil), (*Encoder).fastpathEncMapInt8StringR, (*Decoder).fastpathDecMapInt8StringR) + fn(map[int8]uint(nil), (*Encoder).fastpathEncMapInt8UintR, (*Decoder).fastpathDecMapInt8UintR) + fn(map[int8]uint8(nil), (*Encoder).fastpathEncMapInt8Uint8R, (*Decoder).fastpathDecMapInt8Uint8R) + fn(map[int8]uint16(nil), (*Encoder).fastpathEncMapInt8Uint16R, (*Decoder).fastpathDecMapInt8Uint16R) + fn(map[int8]uint32(nil), (*Encoder).fastpathEncMapInt8Uint32R, (*Decoder).fastpathDecMapInt8Uint32R) + fn(map[int8]uint64(nil), (*Encoder).fastpathEncMapInt8Uint64R, (*Decoder).fastpathDecMapInt8Uint64R) + fn(map[int8]uintptr(nil), (*Encoder).fastpathEncMapInt8UintptrR, (*Decoder).fastpathDecMapInt8UintptrR) + fn(map[int8]int(nil), (*Encoder).fastpathEncMapInt8IntR, (*Decoder).fastpathDecMapInt8IntR) + fn(map[int8]int8(nil), (*Encoder).fastpathEncMapInt8Int8R, (*Decoder).fastpathDecMapInt8Int8R) + fn(map[int8]int16(nil), (*Encoder).fastpathEncMapInt8Int16R, (*Decoder).fastpathDecMapInt8Int16R) + fn(map[int8]int32(nil), (*Encoder).fastpathEncMapInt8Int32R, (*Decoder).fastpathDecMapInt8Int32R) + fn(map[int8]int64(nil), (*Encoder).fastpathEncMapInt8Int64R, (*Decoder).fastpathDecMapInt8Int64R) + fn(map[int8]float32(nil), (*Encoder).fastpathEncMapInt8Float32R, (*Decoder).fastpathDecMapInt8Float32R) + fn(map[int8]float64(nil), (*Encoder).fastpathEncMapInt8Float64R, (*Decoder).fastpathDecMapInt8Float64R) + fn(map[int8]bool(nil), (*Encoder).fastpathEncMapInt8BoolR, (*Decoder).fastpathDecMapInt8BoolR) + fn(map[int16]interface{}(nil), (*Encoder).fastpathEncMapInt16IntfR, (*Decoder).fastpathDecMapInt16IntfR) + fn(map[int16]string(nil), (*Encoder).fastpathEncMapInt16StringR, (*Decoder).fastpathDecMapInt16StringR) + fn(map[int16]uint(nil), (*Encoder).fastpathEncMapInt16UintR, (*Decoder).fastpathDecMapInt16UintR) + fn(map[int16]uint8(nil), (*Encoder).fastpathEncMapInt16Uint8R, (*Decoder).fastpathDecMapInt16Uint8R) + fn(map[int16]uint16(nil), (*Encoder).fastpathEncMapInt16Uint16R, (*Decoder).fastpathDecMapInt16Uint16R) + fn(map[int16]uint32(nil), (*Encoder).fastpathEncMapInt16Uint32R, (*Decoder).fastpathDecMapInt16Uint32R) + fn(map[int16]uint64(nil), (*Encoder).fastpathEncMapInt16Uint64R, (*Decoder).fastpathDecMapInt16Uint64R) + fn(map[int16]uintptr(nil), (*Encoder).fastpathEncMapInt16UintptrR, (*Decoder).fastpathDecMapInt16UintptrR) + fn(map[int16]int(nil), (*Encoder).fastpathEncMapInt16IntR, (*Decoder).fastpathDecMapInt16IntR) + fn(map[int16]int8(nil), (*Encoder).fastpathEncMapInt16Int8R, (*Decoder).fastpathDecMapInt16Int8R) + fn(map[int16]int16(nil), (*Encoder).fastpathEncMapInt16Int16R, (*Decoder).fastpathDecMapInt16Int16R) + fn(map[int16]int32(nil), (*Encoder).fastpathEncMapInt16Int32R, (*Decoder).fastpathDecMapInt16Int32R) + fn(map[int16]int64(nil), (*Encoder).fastpathEncMapInt16Int64R, (*Decoder).fastpathDecMapInt16Int64R) + fn(map[int16]float32(nil), (*Encoder).fastpathEncMapInt16Float32R, (*Decoder).fastpathDecMapInt16Float32R) + fn(map[int16]float64(nil), (*Encoder).fastpathEncMapInt16Float64R, (*Decoder).fastpathDecMapInt16Float64R) + fn(map[int16]bool(nil), (*Encoder).fastpathEncMapInt16BoolR, (*Decoder).fastpathDecMapInt16BoolR) + fn(map[int32]interface{}(nil), (*Encoder).fastpathEncMapInt32IntfR, (*Decoder).fastpathDecMapInt32IntfR) + fn(map[int32]string(nil), (*Encoder).fastpathEncMapInt32StringR, (*Decoder).fastpathDecMapInt32StringR) + fn(map[int32]uint(nil), (*Encoder).fastpathEncMapInt32UintR, (*Decoder).fastpathDecMapInt32UintR) + fn(map[int32]uint8(nil), (*Encoder).fastpathEncMapInt32Uint8R, (*Decoder).fastpathDecMapInt32Uint8R) + fn(map[int32]uint16(nil), (*Encoder).fastpathEncMapInt32Uint16R, (*Decoder).fastpathDecMapInt32Uint16R) + fn(map[int32]uint32(nil), (*Encoder).fastpathEncMapInt32Uint32R, (*Decoder).fastpathDecMapInt32Uint32R) + fn(map[int32]uint64(nil), (*Encoder).fastpathEncMapInt32Uint64R, (*Decoder).fastpathDecMapInt32Uint64R) + fn(map[int32]uintptr(nil), (*Encoder).fastpathEncMapInt32UintptrR, (*Decoder).fastpathDecMapInt32UintptrR) + fn(map[int32]int(nil), (*Encoder).fastpathEncMapInt32IntR, (*Decoder).fastpathDecMapInt32IntR) + fn(map[int32]int8(nil), (*Encoder).fastpathEncMapInt32Int8R, (*Decoder).fastpathDecMapInt32Int8R) + fn(map[int32]int16(nil), (*Encoder).fastpathEncMapInt32Int16R, (*Decoder).fastpathDecMapInt32Int16R) + fn(map[int32]int32(nil), (*Encoder).fastpathEncMapInt32Int32R, (*Decoder).fastpathDecMapInt32Int32R) + fn(map[int32]int64(nil), (*Encoder).fastpathEncMapInt32Int64R, (*Decoder).fastpathDecMapInt32Int64R) + fn(map[int32]float32(nil), (*Encoder).fastpathEncMapInt32Float32R, (*Decoder).fastpathDecMapInt32Float32R) + fn(map[int32]float64(nil), (*Encoder).fastpathEncMapInt32Float64R, (*Decoder).fastpathDecMapInt32Float64R) + fn(map[int32]bool(nil), (*Encoder).fastpathEncMapInt32BoolR, (*Decoder).fastpathDecMapInt32BoolR) + fn(map[int64]interface{}(nil), (*Encoder).fastpathEncMapInt64IntfR, (*Decoder).fastpathDecMapInt64IntfR) + fn(map[int64]string(nil), (*Encoder).fastpathEncMapInt64StringR, (*Decoder).fastpathDecMapInt64StringR) + fn(map[int64]uint(nil), (*Encoder).fastpathEncMapInt64UintR, (*Decoder).fastpathDecMapInt64UintR) + fn(map[int64]uint8(nil), (*Encoder).fastpathEncMapInt64Uint8R, (*Decoder).fastpathDecMapInt64Uint8R) + fn(map[int64]uint16(nil), (*Encoder).fastpathEncMapInt64Uint16R, (*Decoder).fastpathDecMapInt64Uint16R) + fn(map[int64]uint32(nil), (*Encoder).fastpathEncMapInt64Uint32R, (*Decoder).fastpathDecMapInt64Uint32R) + fn(map[int64]uint64(nil), (*Encoder).fastpathEncMapInt64Uint64R, (*Decoder).fastpathDecMapInt64Uint64R) + fn(map[int64]uintptr(nil), (*Encoder).fastpathEncMapInt64UintptrR, (*Decoder).fastpathDecMapInt64UintptrR) + fn(map[int64]int(nil), (*Encoder).fastpathEncMapInt64IntR, (*Decoder).fastpathDecMapInt64IntR) + fn(map[int64]int8(nil), (*Encoder).fastpathEncMapInt64Int8R, (*Decoder).fastpathDecMapInt64Int8R) + fn(map[int64]int16(nil), (*Encoder).fastpathEncMapInt64Int16R, (*Decoder).fastpathDecMapInt64Int16R) + fn(map[int64]int32(nil), (*Encoder).fastpathEncMapInt64Int32R, (*Decoder).fastpathDecMapInt64Int32R) + fn(map[int64]int64(nil), (*Encoder).fastpathEncMapInt64Int64R, (*Decoder).fastpathDecMapInt64Int64R) + fn(map[int64]float32(nil), (*Encoder).fastpathEncMapInt64Float32R, (*Decoder).fastpathDecMapInt64Float32R) + fn(map[int64]float64(nil), (*Encoder).fastpathEncMapInt64Float64R, (*Decoder).fastpathDecMapInt64Float64R) + fn(map[int64]bool(nil), (*Encoder).fastpathEncMapInt64BoolR, (*Decoder).fastpathDecMapInt64BoolR) + fn(map[bool]interface{}(nil), (*Encoder).fastpathEncMapBoolIntfR, (*Decoder).fastpathDecMapBoolIntfR) + fn(map[bool]string(nil), (*Encoder).fastpathEncMapBoolStringR, (*Decoder).fastpathDecMapBoolStringR) + fn(map[bool]uint(nil), (*Encoder).fastpathEncMapBoolUintR, (*Decoder).fastpathDecMapBoolUintR) + fn(map[bool]uint8(nil), (*Encoder).fastpathEncMapBoolUint8R, (*Decoder).fastpathDecMapBoolUint8R) + fn(map[bool]uint16(nil), (*Encoder).fastpathEncMapBoolUint16R, (*Decoder).fastpathDecMapBoolUint16R) + fn(map[bool]uint32(nil), (*Encoder).fastpathEncMapBoolUint32R, (*Decoder).fastpathDecMapBoolUint32R) + fn(map[bool]uint64(nil), (*Encoder).fastpathEncMapBoolUint64R, (*Decoder).fastpathDecMapBoolUint64R) + fn(map[bool]uintptr(nil), (*Encoder).fastpathEncMapBoolUintptrR, (*Decoder).fastpathDecMapBoolUintptrR) + fn(map[bool]int(nil), (*Encoder).fastpathEncMapBoolIntR, (*Decoder).fastpathDecMapBoolIntR) + fn(map[bool]int8(nil), (*Encoder).fastpathEncMapBoolInt8R, (*Decoder).fastpathDecMapBoolInt8R) + fn(map[bool]int16(nil), (*Encoder).fastpathEncMapBoolInt16R, (*Decoder).fastpathDecMapBoolInt16R) + fn(map[bool]int32(nil), (*Encoder).fastpathEncMapBoolInt32R, (*Decoder).fastpathDecMapBoolInt32R) + fn(map[bool]int64(nil), (*Encoder).fastpathEncMapBoolInt64R, (*Decoder).fastpathDecMapBoolInt64R) + fn(map[bool]float32(nil), (*Encoder).fastpathEncMapBoolFloat32R, (*Decoder).fastpathDecMapBoolFloat32R) + fn(map[bool]float64(nil), (*Encoder).fastpathEncMapBoolFloat64R, (*Decoder).fastpathDecMapBoolFloat64R) + fn(map[bool]bool(nil), (*Encoder).fastpathEncMapBoolBoolR, (*Decoder).fastpathDecMapBoolBoolR) + + sort.Sort(fastpathAslice(fastpathAV[:])) +} + +// -- encode + +// -- -- fast path type switch +func fastpathEncodeTypeSwitch(iv interface{}, e *Encoder) bool { + switch v := iv.(type) { + + case []interface{}: + fastpathTV.EncSliceIntfV(v, e) + case *[]interface{}: + fastpathTV.EncSliceIntfV(*v, e) + + case map[interface{}]interface{}: + fastpathTV.EncMapIntfIntfV(v, e) + case *map[interface{}]interface{}: + fastpathTV.EncMapIntfIntfV(*v, e) + + case map[interface{}]string: + fastpathTV.EncMapIntfStringV(v, e) + case *map[interface{}]string: + fastpathTV.EncMapIntfStringV(*v, e) + + case map[interface{}]uint: + fastpathTV.EncMapIntfUintV(v, e) + case *map[interface{}]uint: + fastpathTV.EncMapIntfUintV(*v, e) + + case map[interface{}]uint8: + fastpathTV.EncMapIntfUint8V(v, e) + case *map[interface{}]uint8: + fastpathTV.EncMapIntfUint8V(*v, e) + + case map[interface{}]uint16: + fastpathTV.EncMapIntfUint16V(v, e) + case *map[interface{}]uint16: + fastpathTV.EncMapIntfUint16V(*v, e) + + case map[interface{}]uint32: + fastpathTV.EncMapIntfUint32V(v, e) + case *map[interface{}]uint32: + fastpathTV.EncMapIntfUint32V(*v, e) + + case map[interface{}]uint64: + fastpathTV.EncMapIntfUint64V(v, e) + case *map[interface{}]uint64: + fastpathTV.EncMapIntfUint64V(*v, e) + + case map[interface{}]uintptr: + fastpathTV.EncMapIntfUintptrV(v, e) + case *map[interface{}]uintptr: + fastpathTV.EncMapIntfUintptrV(*v, e) + + case map[interface{}]int: + fastpathTV.EncMapIntfIntV(v, e) + case *map[interface{}]int: + fastpathTV.EncMapIntfIntV(*v, e) + + case map[interface{}]int8: + fastpathTV.EncMapIntfInt8V(v, e) + case *map[interface{}]int8: + fastpathTV.EncMapIntfInt8V(*v, e) + + case map[interface{}]int16: + fastpathTV.EncMapIntfInt16V(v, e) + case *map[interface{}]int16: + fastpathTV.EncMapIntfInt16V(*v, e) + + case map[interface{}]int32: + fastpathTV.EncMapIntfInt32V(v, e) + case *map[interface{}]int32: + fastpathTV.EncMapIntfInt32V(*v, e) + + case map[interface{}]int64: + fastpathTV.EncMapIntfInt64V(v, e) + case *map[interface{}]int64: + fastpathTV.EncMapIntfInt64V(*v, e) + + case map[interface{}]float32: + fastpathTV.EncMapIntfFloat32V(v, e) + case *map[interface{}]float32: + fastpathTV.EncMapIntfFloat32V(*v, e) + + case map[interface{}]float64: + fastpathTV.EncMapIntfFloat64V(v, e) + case *map[interface{}]float64: + fastpathTV.EncMapIntfFloat64V(*v, e) + + case map[interface{}]bool: + fastpathTV.EncMapIntfBoolV(v, e) + case *map[interface{}]bool: + fastpathTV.EncMapIntfBoolV(*v, e) + + case []string: + fastpathTV.EncSliceStringV(v, e) + case *[]string: + fastpathTV.EncSliceStringV(*v, e) + + case map[string]interface{}: + fastpathTV.EncMapStringIntfV(v, e) + case *map[string]interface{}: + fastpathTV.EncMapStringIntfV(*v, e) + + case map[string]string: + fastpathTV.EncMapStringStringV(v, e) + case *map[string]string: + fastpathTV.EncMapStringStringV(*v, e) + + case map[string]uint: + fastpathTV.EncMapStringUintV(v, e) + case *map[string]uint: + fastpathTV.EncMapStringUintV(*v, e) + + case map[string]uint8: + fastpathTV.EncMapStringUint8V(v, e) + case *map[string]uint8: + fastpathTV.EncMapStringUint8V(*v, e) + + case map[string]uint16: + fastpathTV.EncMapStringUint16V(v, e) + case *map[string]uint16: + fastpathTV.EncMapStringUint16V(*v, e) + + case map[string]uint32: + fastpathTV.EncMapStringUint32V(v, e) + case *map[string]uint32: + fastpathTV.EncMapStringUint32V(*v, e) + + case map[string]uint64: + fastpathTV.EncMapStringUint64V(v, e) + case *map[string]uint64: + fastpathTV.EncMapStringUint64V(*v, e) + + case map[string]uintptr: + fastpathTV.EncMapStringUintptrV(v, e) + case *map[string]uintptr: + fastpathTV.EncMapStringUintptrV(*v, e) + + case map[string]int: + fastpathTV.EncMapStringIntV(v, e) + case *map[string]int: + fastpathTV.EncMapStringIntV(*v, e) + + case map[string]int8: + fastpathTV.EncMapStringInt8V(v, e) + case *map[string]int8: + fastpathTV.EncMapStringInt8V(*v, e) + + case map[string]int16: + fastpathTV.EncMapStringInt16V(v, e) + case *map[string]int16: + fastpathTV.EncMapStringInt16V(*v, e) + + case map[string]int32: + fastpathTV.EncMapStringInt32V(v, e) + case *map[string]int32: + fastpathTV.EncMapStringInt32V(*v, e) + + case map[string]int64: + fastpathTV.EncMapStringInt64V(v, e) + case *map[string]int64: + fastpathTV.EncMapStringInt64V(*v, e) + + case map[string]float32: + fastpathTV.EncMapStringFloat32V(v, e) + case *map[string]float32: + fastpathTV.EncMapStringFloat32V(*v, e) + + case map[string]float64: + fastpathTV.EncMapStringFloat64V(v, e) + case *map[string]float64: + fastpathTV.EncMapStringFloat64V(*v, e) + + case map[string]bool: + fastpathTV.EncMapStringBoolV(v, e) + case *map[string]bool: + fastpathTV.EncMapStringBoolV(*v, e) + + case []float32: + fastpathTV.EncSliceFloat32V(v, e) + case *[]float32: + fastpathTV.EncSliceFloat32V(*v, e) + + case map[float32]interface{}: + fastpathTV.EncMapFloat32IntfV(v, e) + case *map[float32]interface{}: + fastpathTV.EncMapFloat32IntfV(*v, e) + + case map[float32]string: + fastpathTV.EncMapFloat32StringV(v, e) + case *map[float32]string: + fastpathTV.EncMapFloat32StringV(*v, e) + + case map[float32]uint: + fastpathTV.EncMapFloat32UintV(v, e) + case *map[float32]uint: + fastpathTV.EncMapFloat32UintV(*v, e) + + case map[float32]uint8: + fastpathTV.EncMapFloat32Uint8V(v, e) + case *map[float32]uint8: + fastpathTV.EncMapFloat32Uint8V(*v, e) + + case map[float32]uint16: + fastpathTV.EncMapFloat32Uint16V(v, e) + case *map[float32]uint16: + fastpathTV.EncMapFloat32Uint16V(*v, e) + + case map[float32]uint32: + fastpathTV.EncMapFloat32Uint32V(v, e) + case *map[float32]uint32: + fastpathTV.EncMapFloat32Uint32V(*v, e) + + case map[float32]uint64: + fastpathTV.EncMapFloat32Uint64V(v, e) + case *map[float32]uint64: + fastpathTV.EncMapFloat32Uint64V(*v, e) + + case map[float32]uintptr: + fastpathTV.EncMapFloat32UintptrV(v, e) + case *map[float32]uintptr: + fastpathTV.EncMapFloat32UintptrV(*v, e) + + case map[float32]int: + fastpathTV.EncMapFloat32IntV(v, e) + case *map[float32]int: + fastpathTV.EncMapFloat32IntV(*v, e) + + case map[float32]int8: + fastpathTV.EncMapFloat32Int8V(v, e) + case *map[float32]int8: + fastpathTV.EncMapFloat32Int8V(*v, e) + + case map[float32]int16: + fastpathTV.EncMapFloat32Int16V(v, e) + case *map[float32]int16: + fastpathTV.EncMapFloat32Int16V(*v, e) + + case map[float32]int32: + fastpathTV.EncMapFloat32Int32V(v, e) + case *map[float32]int32: + fastpathTV.EncMapFloat32Int32V(*v, e) + + case map[float32]int64: + fastpathTV.EncMapFloat32Int64V(v, e) + case *map[float32]int64: + fastpathTV.EncMapFloat32Int64V(*v, e) + + case map[float32]float32: + fastpathTV.EncMapFloat32Float32V(v, e) + case *map[float32]float32: + fastpathTV.EncMapFloat32Float32V(*v, e) + + case map[float32]float64: + fastpathTV.EncMapFloat32Float64V(v, e) + case *map[float32]float64: + fastpathTV.EncMapFloat32Float64V(*v, e) + + case map[float32]bool: + fastpathTV.EncMapFloat32BoolV(v, e) + case *map[float32]bool: + fastpathTV.EncMapFloat32BoolV(*v, e) + + case []float64: + fastpathTV.EncSliceFloat64V(v, e) + case *[]float64: + fastpathTV.EncSliceFloat64V(*v, e) + + case map[float64]interface{}: + fastpathTV.EncMapFloat64IntfV(v, e) + case *map[float64]interface{}: + fastpathTV.EncMapFloat64IntfV(*v, e) + + case map[float64]string: + fastpathTV.EncMapFloat64StringV(v, e) + case *map[float64]string: + fastpathTV.EncMapFloat64StringV(*v, e) + + case map[float64]uint: + fastpathTV.EncMapFloat64UintV(v, e) + case *map[float64]uint: + fastpathTV.EncMapFloat64UintV(*v, e) + + case map[float64]uint8: + fastpathTV.EncMapFloat64Uint8V(v, e) + case *map[float64]uint8: + fastpathTV.EncMapFloat64Uint8V(*v, e) + + case map[float64]uint16: + fastpathTV.EncMapFloat64Uint16V(v, e) + case *map[float64]uint16: + fastpathTV.EncMapFloat64Uint16V(*v, e) + + case map[float64]uint32: + fastpathTV.EncMapFloat64Uint32V(v, e) + case *map[float64]uint32: + fastpathTV.EncMapFloat64Uint32V(*v, e) + + case map[float64]uint64: + fastpathTV.EncMapFloat64Uint64V(v, e) + case *map[float64]uint64: + fastpathTV.EncMapFloat64Uint64V(*v, e) + + case map[float64]uintptr: + fastpathTV.EncMapFloat64UintptrV(v, e) + case *map[float64]uintptr: + fastpathTV.EncMapFloat64UintptrV(*v, e) + + case map[float64]int: + fastpathTV.EncMapFloat64IntV(v, e) + case *map[float64]int: + fastpathTV.EncMapFloat64IntV(*v, e) + + case map[float64]int8: + fastpathTV.EncMapFloat64Int8V(v, e) + case *map[float64]int8: + fastpathTV.EncMapFloat64Int8V(*v, e) + + case map[float64]int16: + fastpathTV.EncMapFloat64Int16V(v, e) + case *map[float64]int16: + fastpathTV.EncMapFloat64Int16V(*v, e) + + case map[float64]int32: + fastpathTV.EncMapFloat64Int32V(v, e) + case *map[float64]int32: + fastpathTV.EncMapFloat64Int32V(*v, e) + + case map[float64]int64: + fastpathTV.EncMapFloat64Int64V(v, e) + case *map[float64]int64: + fastpathTV.EncMapFloat64Int64V(*v, e) + + case map[float64]float32: + fastpathTV.EncMapFloat64Float32V(v, e) + case *map[float64]float32: + fastpathTV.EncMapFloat64Float32V(*v, e) + + case map[float64]float64: + fastpathTV.EncMapFloat64Float64V(v, e) + case *map[float64]float64: + fastpathTV.EncMapFloat64Float64V(*v, e) + + case map[float64]bool: + fastpathTV.EncMapFloat64BoolV(v, e) + case *map[float64]bool: + fastpathTV.EncMapFloat64BoolV(*v, e) + + case []uint: + fastpathTV.EncSliceUintV(v, e) + case *[]uint: + fastpathTV.EncSliceUintV(*v, e) + + case map[uint]interface{}: + fastpathTV.EncMapUintIntfV(v, e) + case *map[uint]interface{}: + fastpathTV.EncMapUintIntfV(*v, e) + + case map[uint]string: + fastpathTV.EncMapUintStringV(v, e) + case *map[uint]string: + fastpathTV.EncMapUintStringV(*v, e) + + case map[uint]uint: + fastpathTV.EncMapUintUintV(v, e) + case *map[uint]uint: + fastpathTV.EncMapUintUintV(*v, e) + + case map[uint]uint8: + fastpathTV.EncMapUintUint8V(v, e) + case *map[uint]uint8: + fastpathTV.EncMapUintUint8V(*v, e) + + case map[uint]uint16: + fastpathTV.EncMapUintUint16V(v, e) + case *map[uint]uint16: + fastpathTV.EncMapUintUint16V(*v, e) + + case map[uint]uint32: + fastpathTV.EncMapUintUint32V(v, e) + case *map[uint]uint32: + fastpathTV.EncMapUintUint32V(*v, e) + + case map[uint]uint64: + fastpathTV.EncMapUintUint64V(v, e) + case *map[uint]uint64: + fastpathTV.EncMapUintUint64V(*v, e) + + case map[uint]uintptr: + fastpathTV.EncMapUintUintptrV(v, e) + case *map[uint]uintptr: + fastpathTV.EncMapUintUintptrV(*v, e) + + case map[uint]int: + fastpathTV.EncMapUintIntV(v, e) + case *map[uint]int: + fastpathTV.EncMapUintIntV(*v, e) + + case map[uint]int8: + fastpathTV.EncMapUintInt8V(v, e) + case *map[uint]int8: + fastpathTV.EncMapUintInt8V(*v, e) + + case map[uint]int16: + fastpathTV.EncMapUintInt16V(v, e) + case *map[uint]int16: + fastpathTV.EncMapUintInt16V(*v, e) + + case map[uint]int32: + fastpathTV.EncMapUintInt32V(v, e) + case *map[uint]int32: + fastpathTV.EncMapUintInt32V(*v, e) + + case map[uint]int64: + fastpathTV.EncMapUintInt64V(v, e) + case *map[uint]int64: + fastpathTV.EncMapUintInt64V(*v, e) + + case map[uint]float32: + fastpathTV.EncMapUintFloat32V(v, e) + case *map[uint]float32: + fastpathTV.EncMapUintFloat32V(*v, e) + + case map[uint]float64: + fastpathTV.EncMapUintFloat64V(v, e) + case *map[uint]float64: + fastpathTV.EncMapUintFloat64V(*v, e) + + case map[uint]bool: + fastpathTV.EncMapUintBoolV(v, e) + case *map[uint]bool: + fastpathTV.EncMapUintBoolV(*v, e) + + case map[uint8]interface{}: + fastpathTV.EncMapUint8IntfV(v, e) + case *map[uint8]interface{}: + fastpathTV.EncMapUint8IntfV(*v, e) + + case map[uint8]string: + fastpathTV.EncMapUint8StringV(v, e) + case *map[uint8]string: + fastpathTV.EncMapUint8StringV(*v, e) + + case map[uint8]uint: + fastpathTV.EncMapUint8UintV(v, e) + case *map[uint8]uint: + fastpathTV.EncMapUint8UintV(*v, e) + + case map[uint8]uint8: + fastpathTV.EncMapUint8Uint8V(v, e) + case *map[uint8]uint8: + fastpathTV.EncMapUint8Uint8V(*v, e) + + case map[uint8]uint16: + fastpathTV.EncMapUint8Uint16V(v, e) + case *map[uint8]uint16: + fastpathTV.EncMapUint8Uint16V(*v, e) + + case map[uint8]uint32: + fastpathTV.EncMapUint8Uint32V(v, e) + case *map[uint8]uint32: + fastpathTV.EncMapUint8Uint32V(*v, e) + + case map[uint8]uint64: + fastpathTV.EncMapUint8Uint64V(v, e) + case *map[uint8]uint64: + fastpathTV.EncMapUint8Uint64V(*v, e) + + case map[uint8]uintptr: + fastpathTV.EncMapUint8UintptrV(v, e) + case *map[uint8]uintptr: + fastpathTV.EncMapUint8UintptrV(*v, e) + + case map[uint8]int: + fastpathTV.EncMapUint8IntV(v, e) + case *map[uint8]int: + fastpathTV.EncMapUint8IntV(*v, e) + + case map[uint8]int8: + fastpathTV.EncMapUint8Int8V(v, e) + case *map[uint8]int8: + fastpathTV.EncMapUint8Int8V(*v, e) + + case map[uint8]int16: + fastpathTV.EncMapUint8Int16V(v, e) + case *map[uint8]int16: + fastpathTV.EncMapUint8Int16V(*v, e) + + case map[uint8]int32: + fastpathTV.EncMapUint8Int32V(v, e) + case *map[uint8]int32: + fastpathTV.EncMapUint8Int32V(*v, e) + + case map[uint8]int64: + fastpathTV.EncMapUint8Int64V(v, e) + case *map[uint8]int64: + fastpathTV.EncMapUint8Int64V(*v, e) + + case map[uint8]float32: + fastpathTV.EncMapUint8Float32V(v, e) + case *map[uint8]float32: + fastpathTV.EncMapUint8Float32V(*v, e) + + case map[uint8]float64: + fastpathTV.EncMapUint8Float64V(v, e) + case *map[uint8]float64: + fastpathTV.EncMapUint8Float64V(*v, e) + + case map[uint8]bool: + fastpathTV.EncMapUint8BoolV(v, e) + case *map[uint8]bool: + fastpathTV.EncMapUint8BoolV(*v, e) + + case []uint16: + fastpathTV.EncSliceUint16V(v, e) + case *[]uint16: + fastpathTV.EncSliceUint16V(*v, e) + + case map[uint16]interface{}: + fastpathTV.EncMapUint16IntfV(v, e) + case *map[uint16]interface{}: + fastpathTV.EncMapUint16IntfV(*v, e) + + case map[uint16]string: + fastpathTV.EncMapUint16StringV(v, e) + case *map[uint16]string: + fastpathTV.EncMapUint16StringV(*v, e) + + case map[uint16]uint: + fastpathTV.EncMapUint16UintV(v, e) + case *map[uint16]uint: + fastpathTV.EncMapUint16UintV(*v, e) + + case map[uint16]uint8: + fastpathTV.EncMapUint16Uint8V(v, e) + case *map[uint16]uint8: + fastpathTV.EncMapUint16Uint8V(*v, e) + + case map[uint16]uint16: + fastpathTV.EncMapUint16Uint16V(v, e) + case *map[uint16]uint16: + fastpathTV.EncMapUint16Uint16V(*v, e) + + case map[uint16]uint32: + fastpathTV.EncMapUint16Uint32V(v, e) + case *map[uint16]uint32: + fastpathTV.EncMapUint16Uint32V(*v, e) + + case map[uint16]uint64: + fastpathTV.EncMapUint16Uint64V(v, e) + case *map[uint16]uint64: + fastpathTV.EncMapUint16Uint64V(*v, e) + + case map[uint16]uintptr: + fastpathTV.EncMapUint16UintptrV(v, e) + case *map[uint16]uintptr: + fastpathTV.EncMapUint16UintptrV(*v, e) + + case map[uint16]int: + fastpathTV.EncMapUint16IntV(v, e) + case *map[uint16]int: + fastpathTV.EncMapUint16IntV(*v, e) + + case map[uint16]int8: + fastpathTV.EncMapUint16Int8V(v, e) + case *map[uint16]int8: + fastpathTV.EncMapUint16Int8V(*v, e) + + case map[uint16]int16: + fastpathTV.EncMapUint16Int16V(v, e) + case *map[uint16]int16: + fastpathTV.EncMapUint16Int16V(*v, e) + + case map[uint16]int32: + fastpathTV.EncMapUint16Int32V(v, e) + case *map[uint16]int32: + fastpathTV.EncMapUint16Int32V(*v, e) + + case map[uint16]int64: + fastpathTV.EncMapUint16Int64V(v, e) + case *map[uint16]int64: + fastpathTV.EncMapUint16Int64V(*v, e) + + case map[uint16]float32: + fastpathTV.EncMapUint16Float32V(v, e) + case *map[uint16]float32: + fastpathTV.EncMapUint16Float32V(*v, e) + + case map[uint16]float64: + fastpathTV.EncMapUint16Float64V(v, e) + case *map[uint16]float64: + fastpathTV.EncMapUint16Float64V(*v, e) + + case map[uint16]bool: + fastpathTV.EncMapUint16BoolV(v, e) + case *map[uint16]bool: + fastpathTV.EncMapUint16BoolV(*v, e) + + case []uint32: + fastpathTV.EncSliceUint32V(v, e) + case *[]uint32: + fastpathTV.EncSliceUint32V(*v, e) + + case map[uint32]interface{}: + fastpathTV.EncMapUint32IntfV(v, e) + case *map[uint32]interface{}: + fastpathTV.EncMapUint32IntfV(*v, e) + + case map[uint32]string: + fastpathTV.EncMapUint32StringV(v, e) + case *map[uint32]string: + fastpathTV.EncMapUint32StringV(*v, e) + + case map[uint32]uint: + fastpathTV.EncMapUint32UintV(v, e) + case *map[uint32]uint: + fastpathTV.EncMapUint32UintV(*v, e) + + case map[uint32]uint8: + fastpathTV.EncMapUint32Uint8V(v, e) + case *map[uint32]uint8: + fastpathTV.EncMapUint32Uint8V(*v, e) + + case map[uint32]uint16: + fastpathTV.EncMapUint32Uint16V(v, e) + case *map[uint32]uint16: + fastpathTV.EncMapUint32Uint16V(*v, e) + + case map[uint32]uint32: + fastpathTV.EncMapUint32Uint32V(v, e) + case *map[uint32]uint32: + fastpathTV.EncMapUint32Uint32V(*v, e) + + case map[uint32]uint64: + fastpathTV.EncMapUint32Uint64V(v, e) + case *map[uint32]uint64: + fastpathTV.EncMapUint32Uint64V(*v, e) + + case map[uint32]uintptr: + fastpathTV.EncMapUint32UintptrV(v, e) + case *map[uint32]uintptr: + fastpathTV.EncMapUint32UintptrV(*v, e) + + case map[uint32]int: + fastpathTV.EncMapUint32IntV(v, e) + case *map[uint32]int: + fastpathTV.EncMapUint32IntV(*v, e) + + case map[uint32]int8: + fastpathTV.EncMapUint32Int8V(v, e) + case *map[uint32]int8: + fastpathTV.EncMapUint32Int8V(*v, e) + + case map[uint32]int16: + fastpathTV.EncMapUint32Int16V(v, e) + case *map[uint32]int16: + fastpathTV.EncMapUint32Int16V(*v, e) + + case map[uint32]int32: + fastpathTV.EncMapUint32Int32V(v, e) + case *map[uint32]int32: + fastpathTV.EncMapUint32Int32V(*v, e) + + case map[uint32]int64: + fastpathTV.EncMapUint32Int64V(v, e) + case *map[uint32]int64: + fastpathTV.EncMapUint32Int64V(*v, e) + + case map[uint32]float32: + fastpathTV.EncMapUint32Float32V(v, e) + case *map[uint32]float32: + fastpathTV.EncMapUint32Float32V(*v, e) + + case map[uint32]float64: + fastpathTV.EncMapUint32Float64V(v, e) + case *map[uint32]float64: + fastpathTV.EncMapUint32Float64V(*v, e) + + case map[uint32]bool: + fastpathTV.EncMapUint32BoolV(v, e) + case *map[uint32]bool: + fastpathTV.EncMapUint32BoolV(*v, e) + + case []uint64: + fastpathTV.EncSliceUint64V(v, e) + case *[]uint64: + fastpathTV.EncSliceUint64V(*v, e) + + case map[uint64]interface{}: + fastpathTV.EncMapUint64IntfV(v, e) + case *map[uint64]interface{}: + fastpathTV.EncMapUint64IntfV(*v, e) + + case map[uint64]string: + fastpathTV.EncMapUint64StringV(v, e) + case *map[uint64]string: + fastpathTV.EncMapUint64StringV(*v, e) + + case map[uint64]uint: + fastpathTV.EncMapUint64UintV(v, e) + case *map[uint64]uint: + fastpathTV.EncMapUint64UintV(*v, e) + + case map[uint64]uint8: + fastpathTV.EncMapUint64Uint8V(v, e) + case *map[uint64]uint8: + fastpathTV.EncMapUint64Uint8V(*v, e) + + case map[uint64]uint16: + fastpathTV.EncMapUint64Uint16V(v, e) + case *map[uint64]uint16: + fastpathTV.EncMapUint64Uint16V(*v, e) + + case map[uint64]uint32: + fastpathTV.EncMapUint64Uint32V(v, e) + case *map[uint64]uint32: + fastpathTV.EncMapUint64Uint32V(*v, e) + + case map[uint64]uint64: + fastpathTV.EncMapUint64Uint64V(v, e) + case *map[uint64]uint64: + fastpathTV.EncMapUint64Uint64V(*v, e) + + case map[uint64]uintptr: + fastpathTV.EncMapUint64UintptrV(v, e) + case *map[uint64]uintptr: + fastpathTV.EncMapUint64UintptrV(*v, e) + + case map[uint64]int: + fastpathTV.EncMapUint64IntV(v, e) + case *map[uint64]int: + fastpathTV.EncMapUint64IntV(*v, e) + + case map[uint64]int8: + fastpathTV.EncMapUint64Int8V(v, e) + case *map[uint64]int8: + fastpathTV.EncMapUint64Int8V(*v, e) + + case map[uint64]int16: + fastpathTV.EncMapUint64Int16V(v, e) + case *map[uint64]int16: + fastpathTV.EncMapUint64Int16V(*v, e) + + case map[uint64]int32: + fastpathTV.EncMapUint64Int32V(v, e) + case *map[uint64]int32: + fastpathTV.EncMapUint64Int32V(*v, e) + + case map[uint64]int64: + fastpathTV.EncMapUint64Int64V(v, e) + case *map[uint64]int64: + fastpathTV.EncMapUint64Int64V(*v, e) + + case map[uint64]float32: + fastpathTV.EncMapUint64Float32V(v, e) + case *map[uint64]float32: + fastpathTV.EncMapUint64Float32V(*v, e) + + case map[uint64]float64: + fastpathTV.EncMapUint64Float64V(v, e) + case *map[uint64]float64: + fastpathTV.EncMapUint64Float64V(*v, e) + + case map[uint64]bool: + fastpathTV.EncMapUint64BoolV(v, e) + case *map[uint64]bool: + fastpathTV.EncMapUint64BoolV(*v, e) + + case []uintptr: + fastpathTV.EncSliceUintptrV(v, e) + case *[]uintptr: + fastpathTV.EncSliceUintptrV(*v, e) + + case map[uintptr]interface{}: + fastpathTV.EncMapUintptrIntfV(v, e) + case *map[uintptr]interface{}: + fastpathTV.EncMapUintptrIntfV(*v, e) + + case map[uintptr]string: + fastpathTV.EncMapUintptrStringV(v, e) + case *map[uintptr]string: + fastpathTV.EncMapUintptrStringV(*v, e) + + case map[uintptr]uint: + fastpathTV.EncMapUintptrUintV(v, e) + case *map[uintptr]uint: + fastpathTV.EncMapUintptrUintV(*v, e) + + case map[uintptr]uint8: + fastpathTV.EncMapUintptrUint8V(v, e) + case *map[uintptr]uint8: + fastpathTV.EncMapUintptrUint8V(*v, e) + + case map[uintptr]uint16: + fastpathTV.EncMapUintptrUint16V(v, e) + case *map[uintptr]uint16: + fastpathTV.EncMapUintptrUint16V(*v, e) + + case map[uintptr]uint32: + fastpathTV.EncMapUintptrUint32V(v, e) + case *map[uintptr]uint32: + fastpathTV.EncMapUintptrUint32V(*v, e) + + case map[uintptr]uint64: + fastpathTV.EncMapUintptrUint64V(v, e) + case *map[uintptr]uint64: + fastpathTV.EncMapUintptrUint64V(*v, e) + + case map[uintptr]uintptr: + fastpathTV.EncMapUintptrUintptrV(v, e) + case *map[uintptr]uintptr: + fastpathTV.EncMapUintptrUintptrV(*v, e) + + case map[uintptr]int: + fastpathTV.EncMapUintptrIntV(v, e) + case *map[uintptr]int: + fastpathTV.EncMapUintptrIntV(*v, e) + + case map[uintptr]int8: + fastpathTV.EncMapUintptrInt8V(v, e) + case *map[uintptr]int8: + fastpathTV.EncMapUintptrInt8V(*v, e) + + case map[uintptr]int16: + fastpathTV.EncMapUintptrInt16V(v, e) + case *map[uintptr]int16: + fastpathTV.EncMapUintptrInt16V(*v, e) + + case map[uintptr]int32: + fastpathTV.EncMapUintptrInt32V(v, e) + case *map[uintptr]int32: + fastpathTV.EncMapUintptrInt32V(*v, e) + + case map[uintptr]int64: + fastpathTV.EncMapUintptrInt64V(v, e) + case *map[uintptr]int64: + fastpathTV.EncMapUintptrInt64V(*v, e) + + case map[uintptr]float32: + fastpathTV.EncMapUintptrFloat32V(v, e) + case *map[uintptr]float32: + fastpathTV.EncMapUintptrFloat32V(*v, e) + + case map[uintptr]float64: + fastpathTV.EncMapUintptrFloat64V(v, e) + case *map[uintptr]float64: + fastpathTV.EncMapUintptrFloat64V(*v, e) + + case map[uintptr]bool: + fastpathTV.EncMapUintptrBoolV(v, e) + case *map[uintptr]bool: + fastpathTV.EncMapUintptrBoolV(*v, e) + + case []int: + fastpathTV.EncSliceIntV(v, e) + case *[]int: + fastpathTV.EncSliceIntV(*v, e) + + case map[int]interface{}: + fastpathTV.EncMapIntIntfV(v, e) + case *map[int]interface{}: + fastpathTV.EncMapIntIntfV(*v, e) + + case map[int]string: + fastpathTV.EncMapIntStringV(v, e) + case *map[int]string: + fastpathTV.EncMapIntStringV(*v, e) + + case map[int]uint: + fastpathTV.EncMapIntUintV(v, e) + case *map[int]uint: + fastpathTV.EncMapIntUintV(*v, e) + + case map[int]uint8: + fastpathTV.EncMapIntUint8V(v, e) + case *map[int]uint8: + fastpathTV.EncMapIntUint8V(*v, e) + + case map[int]uint16: + fastpathTV.EncMapIntUint16V(v, e) + case *map[int]uint16: + fastpathTV.EncMapIntUint16V(*v, e) + + case map[int]uint32: + fastpathTV.EncMapIntUint32V(v, e) + case *map[int]uint32: + fastpathTV.EncMapIntUint32V(*v, e) + + case map[int]uint64: + fastpathTV.EncMapIntUint64V(v, e) + case *map[int]uint64: + fastpathTV.EncMapIntUint64V(*v, e) + + case map[int]uintptr: + fastpathTV.EncMapIntUintptrV(v, e) + case *map[int]uintptr: + fastpathTV.EncMapIntUintptrV(*v, e) + + case map[int]int: + fastpathTV.EncMapIntIntV(v, e) + case *map[int]int: + fastpathTV.EncMapIntIntV(*v, e) + + case map[int]int8: + fastpathTV.EncMapIntInt8V(v, e) + case *map[int]int8: + fastpathTV.EncMapIntInt8V(*v, e) + + case map[int]int16: + fastpathTV.EncMapIntInt16V(v, e) + case *map[int]int16: + fastpathTV.EncMapIntInt16V(*v, e) + + case map[int]int32: + fastpathTV.EncMapIntInt32V(v, e) + case *map[int]int32: + fastpathTV.EncMapIntInt32V(*v, e) + + case map[int]int64: + fastpathTV.EncMapIntInt64V(v, e) + case *map[int]int64: + fastpathTV.EncMapIntInt64V(*v, e) + + case map[int]float32: + fastpathTV.EncMapIntFloat32V(v, e) + case *map[int]float32: + fastpathTV.EncMapIntFloat32V(*v, e) + + case map[int]float64: + fastpathTV.EncMapIntFloat64V(v, e) + case *map[int]float64: + fastpathTV.EncMapIntFloat64V(*v, e) + + case map[int]bool: + fastpathTV.EncMapIntBoolV(v, e) + case *map[int]bool: + fastpathTV.EncMapIntBoolV(*v, e) + + case []int8: + fastpathTV.EncSliceInt8V(v, e) + case *[]int8: + fastpathTV.EncSliceInt8V(*v, e) + + case map[int8]interface{}: + fastpathTV.EncMapInt8IntfV(v, e) + case *map[int8]interface{}: + fastpathTV.EncMapInt8IntfV(*v, e) + + case map[int8]string: + fastpathTV.EncMapInt8StringV(v, e) + case *map[int8]string: + fastpathTV.EncMapInt8StringV(*v, e) + + case map[int8]uint: + fastpathTV.EncMapInt8UintV(v, e) + case *map[int8]uint: + fastpathTV.EncMapInt8UintV(*v, e) + + case map[int8]uint8: + fastpathTV.EncMapInt8Uint8V(v, e) + case *map[int8]uint8: + fastpathTV.EncMapInt8Uint8V(*v, e) + + case map[int8]uint16: + fastpathTV.EncMapInt8Uint16V(v, e) + case *map[int8]uint16: + fastpathTV.EncMapInt8Uint16V(*v, e) + + case map[int8]uint32: + fastpathTV.EncMapInt8Uint32V(v, e) + case *map[int8]uint32: + fastpathTV.EncMapInt8Uint32V(*v, e) + + case map[int8]uint64: + fastpathTV.EncMapInt8Uint64V(v, e) + case *map[int8]uint64: + fastpathTV.EncMapInt8Uint64V(*v, e) + + case map[int8]uintptr: + fastpathTV.EncMapInt8UintptrV(v, e) + case *map[int8]uintptr: + fastpathTV.EncMapInt8UintptrV(*v, e) + + case map[int8]int: + fastpathTV.EncMapInt8IntV(v, e) + case *map[int8]int: + fastpathTV.EncMapInt8IntV(*v, e) + + case map[int8]int8: + fastpathTV.EncMapInt8Int8V(v, e) + case *map[int8]int8: + fastpathTV.EncMapInt8Int8V(*v, e) + + case map[int8]int16: + fastpathTV.EncMapInt8Int16V(v, e) + case *map[int8]int16: + fastpathTV.EncMapInt8Int16V(*v, e) + + case map[int8]int32: + fastpathTV.EncMapInt8Int32V(v, e) + case *map[int8]int32: + fastpathTV.EncMapInt8Int32V(*v, e) + + case map[int8]int64: + fastpathTV.EncMapInt8Int64V(v, e) + case *map[int8]int64: + fastpathTV.EncMapInt8Int64V(*v, e) + + case map[int8]float32: + fastpathTV.EncMapInt8Float32V(v, e) + case *map[int8]float32: + fastpathTV.EncMapInt8Float32V(*v, e) + + case map[int8]float64: + fastpathTV.EncMapInt8Float64V(v, e) + case *map[int8]float64: + fastpathTV.EncMapInt8Float64V(*v, e) + + case map[int8]bool: + fastpathTV.EncMapInt8BoolV(v, e) + case *map[int8]bool: + fastpathTV.EncMapInt8BoolV(*v, e) + + case []int16: + fastpathTV.EncSliceInt16V(v, e) + case *[]int16: + fastpathTV.EncSliceInt16V(*v, e) + + case map[int16]interface{}: + fastpathTV.EncMapInt16IntfV(v, e) + case *map[int16]interface{}: + fastpathTV.EncMapInt16IntfV(*v, e) + + case map[int16]string: + fastpathTV.EncMapInt16StringV(v, e) + case *map[int16]string: + fastpathTV.EncMapInt16StringV(*v, e) + + case map[int16]uint: + fastpathTV.EncMapInt16UintV(v, e) + case *map[int16]uint: + fastpathTV.EncMapInt16UintV(*v, e) + + case map[int16]uint8: + fastpathTV.EncMapInt16Uint8V(v, e) + case *map[int16]uint8: + fastpathTV.EncMapInt16Uint8V(*v, e) + + case map[int16]uint16: + fastpathTV.EncMapInt16Uint16V(v, e) + case *map[int16]uint16: + fastpathTV.EncMapInt16Uint16V(*v, e) + + case map[int16]uint32: + fastpathTV.EncMapInt16Uint32V(v, e) + case *map[int16]uint32: + fastpathTV.EncMapInt16Uint32V(*v, e) + + case map[int16]uint64: + fastpathTV.EncMapInt16Uint64V(v, e) + case *map[int16]uint64: + fastpathTV.EncMapInt16Uint64V(*v, e) + + case map[int16]uintptr: + fastpathTV.EncMapInt16UintptrV(v, e) + case *map[int16]uintptr: + fastpathTV.EncMapInt16UintptrV(*v, e) + + case map[int16]int: + fastpathTV.EncMapInt16IntV(v, e) + case *map[int16]int: + fastpathTV.EncMapInt16IntV(*v, e) + + case map[int16]int8: + fastpathTV.EncMapInt16Int8V(v, e) + case *map[int16]int8: + fastpathTV.EncMapInt16Int8V(*v, e) + + case map[int16]int16: + fastpathTV.EncMapInt16Int16V(v, e) + case *map[int16]int16: + fastpathTV.EncMapInt16Int16V(*v, e) + + case map[int16]int32: + fastpathTV.EncMapInt16Int32V(v, e) + case *map[int16]int32: + fastpathTV.EncMapInt16Int32V(*v, e) + + case map[int16]int64: + fastpathTV.EncMapInt16Int64V(v, e) + case *map[int16]int64: + fastpathTV.EncMapInt16Int64V(*v, e) + + case map[int16]float32: + fastpathTV.EncMapInt16Float32V(v, e) + case *map[int16]float32: + fastpathTV.EncMapInt16Float32V(*v, e) + + case map[int16]float64: + fastpathTV.EncMapInt16Float64V(v, e) + case *map[int16]float64: + fastpathTV.EncMapInt16Float64V(*v, e) + + case map[int16]bool: + fastpathTV.EncMapInt16BoolV(v, e) + case *map[int16]bool: + fastpathTV.EncMapInt16BoolV(*v, e) + + case []int32: + fastpathTV.EncSliceInt32V(v, e) + case *[]int32: + fastpathTV.EncSliceInt32V(*v, e) + + case map[int32]interface{}: + fastpathTV.EncMapInt32IntfV(v, e) + case *map[int32]interface{}: + fastpathTV.EncMapInt32IntfV(*v, e) + + case map[int32]string: + fastpathTV.EncMapInt32StringV(v, e) + case *map[int32]string: + fastpathTV.EncMapInt32StringV(*v, e) + + case map[int32]uint: + fastpathTV.EncMapInt32UintV(v, e) + case *map[int32]uint: + fastpathTV.EncMapInt32UintV(*v, e) + + case map[int32]uint8: + fastpathTV.EncMapInt32Uint8V(v, e) + case *map[int32]uint8: + fastpathTV.EncMapInt32Uint8V(*v, e) + + case map[int32]uint16: + fastpathTV.EncMapInt32Uint16V(v, e) + case *map[int32]uint16: + fastpathTV.EncMapInt32Uint16V(*v, e) + + case map[int32]uint32: + fastpathTV.EncMapInt32Uint32V(v, e) + case *map[int32]uint32: + fastpathTV.EncMapInt32Uint32V(*v, e) + + case map[int32]uint64: + fastpathTV.EncMapInt32Uint64V(v, e) + case *map[int32]uint64: + fastpathTV.EncMapInt32Uint64V(*v, e) + + case map[int32]uintptr: + fastpathTV.EncMapInt32UintptrV(v, e) + case *map[int32]uintptr: + fastpathTV.EncMapInt32UintptrV(*v, e) + + case map[int32]int: + fastpathTV.EncMapInt32IntV(v, e) + case *map[int32]int: + fastpathTV.EncMapInt32IntV(*v, e) + + case map[int32]int8: + fastpathTV.EncMapInt32Int8V(v, e) + case *map[int32]int8: + fastpathTV.EncMapInt32Int8V(*v, e) + + case map[int32]int16: + fastpathTV.EncMapInt32Int16V(v, e) + case *map[int32]int16: + fastpathTV.EncMapInt32Int16V(*v, e) + + case map[int32]int32: + fastpathTV.EncMapInt32Int32V(v, e) + case *map[int32]int32: + fastpathTV.EncMapInt32Int32V(*v, e) + + case map[int32]int64: + fastpathTV.EncMapInt32Int64V(v, e) + case *map[int32]int64: + fastpathTV.EncMapInt32Int64V(*v, e) + + case map[int32]float32: + fastpathTV.EncMapInt32Float32V(v, e) + case *map[int32]float32: + fastpathTV.EncMapInt32Float32V(*v, e) + + case map[int32]float64: + fastpathTV.EncMapInt32Float64V(v, e) + case *map[int32]float64: + fastpathTV.EncMapInt32Float64V(*v, e) + + case map[int32]bool: + fastpathTV.EncMapInt32BoolV(v, e) + case *map[int32]bool: + fastpathTV.EncMapInt32BoolV(*v, e) + + case []int64: + fastpathTV.EncSliceInt64V(v, e) + case *[]int64: + fastpathTV.EncSliceInt64V(*v, e) + + case map[int64]interface{}: + fastpathTV.EncMapInt64IntfV(v, e) + case *map[int64]interface{}: + fastpathTV.EncMapInt64IntfV(*v, e) + + case map[int64]string: + fastpathTV.EncMapInt64StringV(v, e) + case *map[int64]string: + fastpathTV.EncMapInt64StringV(*v, e) + + case map[int64]uint: + fastpathTV.EncMapInt64UintV(v, e) + case *map[int64]uint: + fastpathTV.EncMapInt64UintV(*v, e) + + case map[int64]uint8: + fastpathTV.EncMapInt64Uint8V(v, e) + case *map[int64]uint8: + fastpathTV.EncMapInt64Uint8V(*v, e) + + case map[int64]uint16: + fastpathTV.EncMapInt64Uint16V(v, e) + case *map[int64]uint16: + fastpathTV.EncMapInt64Uint16V(*v, e) + + case map[int64]uint32: + fastpathTV.EncMapInt64Uint32V(v, e) + case *map[int64]uint32: + fastpathTV.EncMapInt64Uint32V(*v, e) + + case map[int64]uint64: + fastpathTV.EncMapInt64Uint64V(v, e) + case *map[int64]uint64: + fastpathTV.EncMapInt64Uint64V(*v, e) + + case map[int64]uintptr: + fastpathTV.EncMapInt64UintptrV(v, e) + case *map[int64]uintptr: + fastpathTV.EncMapInt64UintptrV(*v, e) + + case map[int64]int: + fastpathTV.EncMapInt64IntV(v, e) + case *map[int64]int: + fastpathTV.EncMapInt64IntV(*v, e) + + case map[int64]int8: + fastpathTV.EncMapInt64Int8V(v, e) + case *map[int64]int8: + fastpathTV.EncMapInt64Int8V(*v, e) + + case map[int64]int16: + fastpathTV.EncMapInt64Int16V(v, e) + case *map[int64]int16: + fastpathTV.EncMapInt64Int16V(*v, e) + + case map[int64]int32: + fastpathTV.EncMapInt64Int32V(v, e) + case *map[int64]int32: + fastpathTV.EncMapInt64Int32V(*v, e) + + case map[int64]int64: + fastpathTV.EncMapInt64Int64V(v, e) + case *map[int64]int64: + fastpathTV.EncMapInt64Int64V(*v, e) + + case map[int64]float32: + fastpathTV.EncMapInt64Float32V(v, e) + case *map[int64]float32: + fastpathTV.EncMapInt64Float32V(*v, e) + + case map[int64]float64: + fastpathTV.EncMapInt64Float64V(v, e) + case *map[int64]float64: + fastpathTV.EncMapInt64Float64V(*v, e) + + case map[int64]bool: + fastpathTV.EncMapInt64BoolV(v, e) + case *map[int64]bool: + fastpathTV.EncMapInt64BoolV(*v, e) + + case []bool: + fastpathTV.EncSliceBoolV(v, e) + case *[]bool: + fastpathTV.EncSliceBoolV(*v, e) + + case map[bool]interface{}: + fastpathTV.EncMapBoolIntfV(v, e) + case *map[bool]interface{}: + fastpathTV.EncMapBoolIntfV(*v, e) + + case map[bool]string: + fastpathTV.EncMapBoolStringV(v, e) + case *map[bool]string: + fastpathTV.EncMapBoolStringV(*v, e) + + case map[bool]uint: + fastpathTV.EncMapBoolUintV(v, e) + case *map[bool]uint: + fastpathTV.EncMapBoolUintV(*v, e) + + case map[bool]uint8: + fastpathTV.EncMapBoolUint8V(v, e) + case *map[bool]uint8: + fastpathTV.EncMapBoolUint8V(*v, e) + + case map[bool]uint16: + fastpathTV.EncMapBoolUint16V(v, e) + case *map[bool]uint16: + fastpathTV.EncMapBoolUint16V(*v, e) + + case map[bool]uint32: + fastpathTV.EncMapBoolUint32V(v, e) + case *map[bool]uint32: + fastpathTV.EncMapBoolUint32V(*v, e) + + case map[bool]uint64: + fastpathTV.EncMapBoolUint64V(v, e) + case *map[bool]uint64: + fastpathTV.EncMapBoolUint64V(*v, e) + + case map[bool]uintptr: + fastpathTV.EncMapBoolUintptrV(v, e) + case *map[bool]uintptr: + fastpathTV.EncMapBoolUintptrV(*v, e) + + case map[bool]int: + fastpathTV.EncMapBoolIntV(v, e) + case *map[bool]int: + fastpathTV.EncMapBoolIntV(*v, e) + + case map[bool]int8: + fastpathTV.EncMapBoolInt8V(v, e) + case *map[bool]int8: + fastpathTV.EncMapBoolInt8V(*v, e) + + case map[bool]int16: + fastpathTV.EncMapBoolInt16V(v, e) + case *map[bool]int16: + fastpathTV.EncMapBoolInt16V(*v, e) + + case map[bool]int32: + fastpathTV.EncMapBoolInt32V(v, e) + case *map[bool]int32: + fastpathTV.EncMapBoolInt32V(*v, e) + + case map[bool]int64: + fastpathTV.EncMapBoolInt64V(v, e) + case *map[bool]int64: + fastpathTV.EncMapBoolInt64V(*v, e) + + case map[bool]float32: + fastpathTV.EncMapBoolFloat32V(v, e) + case *map[bool]float32: + fastpathTV.EncMapBoolFloat32V(*v, e) + + case map[bool]float64: + fastpathTV.EncMapBoolFloat64V(v, e) + case *map[bool]float64: + fastpathTV.EncMapBoolFloat64V(*v, e) + + case map[bool]bool: + fastpathTV.EncMapBoolBoolV(v, e) + case *map[bool]bool: + fastpathTV.EncMapBoolBoolV(*v, e) + + default: + _ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release) + return false + } + return true +} + +func fastpathEncodeTypeSwitchSlice(iv interface{}, e *Encoder) bool { + switch v := iv.(type) { + + case []interface{}: + fastpathTV.EncSliceIntfV(v, e) + case *[]interface{}: + fastpathTV.EncSliceIntfV(*v, e) + + case []string: + fastpathTV.EncSliceStringV(v, e) + case *[]string: + fastpathTV.EncSliceStringV(*v, e) + + case []float32: + fastpathTV.EncSliceFloat32V(v, e) + case *[]float32: + fastpathTV.EncSliceFloat32V(*v, e) + + case []float64: + fastpathTV.EncSliceFloat64V(v, e) + case *[]float64: + fastpathTV.EncSliceFloat64V(*v, e) + + case []uint: + fastpathTV.EncSliceUintV(v, e) + case *[]uint: + fastpathTV.EncSliceUintV(*v, e) + + case []uint16: + fastpathTV.EncSliceUint16V(v, e) + case *[]uint16: + fastpathTV.EncSliceUint16V(*v, e) + + case []uint32: + fastpathTV.EncSliceUint32V(v, e) + case *[]uint32: + fastpathTV.EncSliceUint32V(*v, e) + + case []uint64: + fastpathTV.EncSliceUint64V(v, e) + case *[]uint64: + fastpathTV.EncSliceUint64V(*v, e) + + case []uintptr: + fastpathTV.EncSliceUintptrV(v, e) + case *[]uintptr: + fastpathTV.EncSliceUintptrV(*v, e) + + case []int: + fastpathTV.EncSliceIntV(v, e) + case *[]int: + fastpathTV.EncSliceIntV(*v, e) + + case []int8: + fastpathTV.EncSliceInt8V(v, e) + case *[]int8: + fastpathTV.EncSliceInt8V(*v, e) + + case []int16: + fastpathTV.EncSliceInt16V(v, e) + case *[]int16: + fastpathTV.EncSliceInt16V(*v, e) + + case []int32: + fastpathTV.EncSliceInt32V(v, e) + case *[]int32: + fastpathTV.EncSliceInt32V(*v, e) + + case []int64: + fastpathTV.EncSliceInt64V(v, e) + case *[]int64: + fastpathTV.EncSliceInt64V(*v, e) + + case []bool: + fastpathTV.EncSliceBoolV(v, e) + case *[]bool: + fastpathTV.EncSliceBoolV(*v, e) + + default: + _ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release) + return false + } + return true +} + +func fastpathEncodeTypeSwitchMap(iv interface{}, e *Encoder) bool { + switch v := iv.(type) { + + case map[interface{}]interface{}: + fastpathTV.EncMapIntfIntfV(v, e) + case *map[interface{}]interface{}: + fastpathTV.EncMapIntfIntfV(*v, e) + + case map[interface{}]string: + fastpathTV.EncMapIntfStringV(v, e) + case *map[interface{}]string: + fastpathTV.EncMapIntfStringV(*v, e) + + case map[interface{}]uint: + fastpathTV.EncMapIntfUintV(v, e) + case *map[interface{}]uint: + fastpathTV.EncMapIntfUintV(*v, e) + + case map[interface{}]uint8: + fastpathTV.EncMapIntfUint8V(v, e) + case *map[interface{}]uint8: + fastpathTV.EncMapIntfUint8V(*v, e) + + case map[interface{}]uint16: + fastpathTV.EncMapIntfUint16V(v, e) + case *map[interface{}]uint16: + fastpathTV.EncMapIntfUint16V(*v, e) + + case map[interface{}]uint32: + fastpathTV.EncMapIntfUint32V(v, e) + case *map[interface{}]uint32: + fastpathTV.EncMapIntfUint32V(*v, e) + + case map[interface{}]uint64: + fastpathTV.EncMapIntfUint64V(v, e) + case *map[interface{}]uint64: + fastpathTV.EncMapIntfUint64V(*v, e) + + case map[interface{}]uintptr: + fastpathTV.EncMapIntfUintptrV(v, e) + case *map[interface{}]uintptr: + fastpathTV.EncMapIntfUintptrV(*v, e) + + case map[interface{}]int: + fastpathTV.EncMapIntfIntV(v, e) + case *map[interface{}]int: + fastpathTV.EncMapIntfIntV(*v, e) + + case map[interface{}]int8: + fastpathTV.EncMapIntfInt8V(v, e) + case *map[interface{}]int8: + fastpathTV.EncMapIntfInt8V(*v, e) + + case map[interface{}]int16: + fastpathTV.EncMapIntfInt16V(v, e) + case *map[interface{}]int16: + fastpathTV.EncMapIntfInt16V(*v, e) + + case map[interface{}]int32: + fastpathTV.EncMapIntfInt32V(v, e) + case *map[interface{}]int32: + fastpathTV.EncMapIntfInt32V(*v, e) + + case map[interface{}]int64: + fastpathTV.EncMapIntfInt64V(v, e) + case *map[interface{}]int64: + fastpathTV.EncMapIntfInt64V(*v, e) + + case map[interface{}]float32: + fastpathTV.EncMapIntfFloat32V(v, e) + case *map[interface{}]float32: + fastpathTV.EncMapIntfFloat32V(*v, e) + + case map[interface{}]float64: + fastpathTV.EncMapIntfFloat64V(v, e) + case *map[interface{}]float64: + fastpathTV.EncMapIntfFloat64V(*v, e) + + case map[interface{}]bool: + fastpathTV.EncMapIntfBoolV(v, e) + case *map[interface{}]bool: + fastpathTV.EncMapIntfBoolV(*v, e) + + case map[string]interface{}: + fastpathTV.EncMapStringIntfV(v, e) + case *map[string]interface{}: + fastpathTV.EncMapStringIntfV(*v, e) + + case map[string]string: + fastpathTV.EncMapStringStringV(v, e) + case *map[string]string: + fastpathTV.EncMapStringStringV(*v, e) + + case map[string]uint: + fastpathTV.EncMapStringUintV(v, e) + case *map[string]uint: + fastpathTV.EncMapStringUintV(*v, e) + + case map[string]uint8: + fastpathTV.EncMapStringUint8V(v, e) + case *map[string]uint8: + fastpathTV.EncMapStringUint8V(*v, e) + + case map[string]uint16: + fastpathTV.EncMapStringUint16V(v, e) + case *map[string]uint16: + fastpathTV.EncMapStringUint16V(*v, e) + + case map[string]uint32: + fastpathTV.EncMapStringUint32V(v, e) + case *map[string]uint32: + fastpathTV.EncMapStringUint32V(*v, e) + + case map[string]uint64: + fastpathTV.EncMapStringUint64V(v, e) + case *map[string]uint64: + fastpathTV.EncMapStringUint64V(*v, e) + + case map[string]uintptr: + fastpathTV.EncMapStringUintptrV(v, e) + case *map[string]uintptr: + fastpathTV.EncMapStringUintptrV(*v, e) + + case map[string]int: + fastpathTV.EncMapStringIntV(v, e) + case *map[string]int: + fastpathTV.EncMapStringIntV(*v, e) + + case map[string]int8: + fastpathTV.EncMapStringInt8V(v, e) + case *map[string]int8: + fastpathTV.EncMapStringInt8V(*v, e) + + case map[string]int16: + fastpathTV.EncMapStringInt16V(v, e) + case *map[string]int16: + fastpathTV.EncMapStringInt16V(*v, e) + + case map[string]int32: + fastpathTV.EncMapStringInt32V(v, e) + case *map[string]int32: + fastpathTV.EncMapStringInt32V(*v, e) + + case map[string]int64: + fastpathTV.EncMapStringInt64V(v, e) + case *map[string]int64: + fastpathTV.EncMapStringInt64V(*v, e) + + case map[string]float32: + fastpathTV.EncMapStringFloat32V(v, e) + case *map[string]float32: + fastpathTV.EncMapStringFloat32V(*v, e) + + case map[string]float64: + fastpathTV.EncMapStringFloat64V(v, e) + case *map[string]float64: + fastpathTV.EncMapStringFloat64V(*v, e) + + case map[string]bool: + fastpathTV.EncMapStringBoolV(v, e) + case *map[string]bool: + fastpathTV.EncMapStringBoolV(*v, e) + + case map[float32]interface{}: + fastpathTV.EncMapFloat32IntfV(v, e) + case *map[float32]interface{}: + fastpathTV.EncMapFloat32IntfV(*v, e) + + case map[float32]string: + fastpathTV.EncMapFloat32StringV(v, e) + case *map[float32]string: + fastpathTV.EncMapFloat32StringV(*v, e) + + case map[float32]uint: + fastpathTV.EncMapFloat32UintV(v, e) + case *map[float32]uint: + fastpathTV.EncMapFloat32UintV(*v, e) + + case map[float32]uint8: + fastpathTV.EncMapFloat32Uint8V(v, e) + case *map[float32]uint8: + fastpathTV.EncMapFloat32Uint8V(*v, e) + + case map[float32]uint16: + fastpathTV.EncMapFloat32Uint16V(v, e) + case *map[float32]uint16: + fastpathTV.EncMapFloat32Uint16V(*v, e) + + case map[float32]uint32: + fastpathTV.EncMapFloat32Uint32V(v, e) + case *map[float32]uint32: + fastpathTV.EncMapFloat32Uint32V(*v, e) + + case map[float32]uint64: + fastpathTV.EncMapFloat32Uint64V(v, e) + case *map[float32]uint64: + fastpathTV.EncMapFloat32Uint64V(*v, e) + + case map[float32]uintptr: + fastpathTV.EncMapFloat32UintptrV(v, e) + case *map[float32]uintptr: + fastpathTV.EncMapFloat32UintptrV(*v, e) + + case map[float32]int: + fastpathTV.EncMapFloat32IntV(v, e) + case *map[float32]int: + fastpathTV.EncMapFloat32IntV(*v, e) + + case map[float32]int8: + fastpathTV.EncMapFloat32Int8V(v, e) + case *map[float32]int8: + fastpathTV.EncMapFloat32Int8V(*v, e) + + case map[float32]int16: + fastpathTV.EncMapFloat32Int16V(v, e) + case *map[float32]int16: + fastpathTV.EncMapFloat32Int16V(*v, e) + + case map[float32]int32: + fastpathTV.EncMapFloat32Int32V(v, e) + case *map[float32]int32: + fastpathTV.EncMapFloat32Int32V(*v, e) + + case map[float32]int64: + fastpathTV.EncMapFloat32Int64V(v, e) + case *map[float32]int64: + fastpathTV.EncMapFloat32Int64V(*v, e) + + case map[float32]float32: + fastpathTV.EncMapFloat32Float32V(v, e) + case *map[float32]float32: + fastpathTV.EncMapFloat32Float32V(*v, e) + + case map[float32]float64: + fastpathTV.EncMapFloat32Float64V(v, e) + case *map[float32]float64: + fastpathTV.EncMapFloat32Float64V(*v, e) + + case map[float32]bool: + fastpathTV.EncMapFloat32BoolV(v, e) + case *map[float32]bool: + fastpathTV.EncMapFloat32BoolV(*v, e) + + case map[float64]interface{}: + fastpathTV.EncMapFloat64IntfV(v, e) + case *map[float64]interface{}: + fastpathTV.EncMapFloat64IntfV(*v, e) + + case map[float64]string: + fastpathTV.EncMapFloat64StringV(v, e) + case *map[float64]string: + fastpathTV.EncMapFloat64StringV(*v, e) + + case map[float64]uint: + fastpathTV.EncMapFloat64UintV(v, e) + case *map[float64]uint: + fastpathTV.EncMapFloat64UintV(*v, e) + + case map[float64]uint8: + fastpathTV.EncMapFloat64Uint8V(v, e) + case *map[float64]uint8: + fastpathTV.EncMapFloat64Uint8V(*v, e) + + case map[float64]uint16: + fastpathTV.EncMapFloat64Uint16V(v, e) + case *map[float64]uint16: + fastpathTV.EncMapFloat64Uint16V(*v, e) + + case map[float64]uint32: + fastpathTV.EncMapFloat64Uint32V(v, e) + case *map[float64]uint32: + fastpathTV.EncMapFloat64Uint32V(*v, e) + + case map[float64]uint64: + fastpathTV.EncMapFloat64Uint64V(v, e) + case *map[float64]uint64: + fastpathTV.EncMapFloat64Uint64V(*v, e) + + case map[float64]uintptr: + fastpathTV.EncMapFloat64UintptrV(v, e) + case *map[float64]uintptr: + fastpathTV.EncMapFloat64UintptrV(*v, e) + + case map[float64]int: + fastpathTV.EncMapFloat64IntV(v, e) + case *map[float64]int: + fastpathTV.EncMapFloat64IntV(*v, e) + + case map[float64]int8: + fastpathTV.EncMapFloat64Int8V(v, e) + case *map[float64]int8: + fastpathTV.EncMapFloat64Int8V(*v, e) + + case map[float64]int16: + fastpathTV.EncMapFloat64Int16V(v, e) + case *map[float64]int16: + fastpathTV.EncMapFloat64Int16V(*v, e) + + case map[float64]int32: + fastpathTV.EncMapFloat64Int32V(v, e) + case *map[float64]int32: + fastpathTV.EncMapFloat64Int32V(*v, e) + + case map[float64]int64: + fastpathTV.EncMapFloat64Int64V(v, e) + case *map[float64]int64: + fastpathTV.EncMapFloat64Int64V(*v, e) + + case map[float64]float32: + fastpathTV.EncMapFloat64Float32V(v, e) + case *map[float64]float32: + fastpathTV.EncMapFloat64Float32V(*v, e) + + case map[float64]float64: + fastpathTV.EncMapFloat64Float64V(v, e) + case *map[float64]float64: + fastpathTV.EncMapFloat64Float64V(*v, e) + + case map[float64]bool: + fastpathTV.EncMapFloat64BoolV(v, e) + case *map[float64]bool: + fastpathTV.EncMapFloat64BoolV(*v, e) + + case map[uint]interface{}: + fastpathTV.EncMapUintIntfV(v, e) + case *map[uint]interface{}: + fastpathTV.EncMapUintIntfV(*v, e) + + case map[uint]string: + fastpathTV.EncMapUintStringV(v, e) + case *map[uint]string: + fastpathTV.EncMapUintStringV(*v, e) + + case map[uint]uint: + fastpathTV.EncMapUintUintV(v, e) + case *map[uint]uint: + fastpathTV.EncMapUintUintV(*v, e) + + case map[uint]uint8: + fastpathTV.EncMapUintUint8V(v, e) + case *map[uint]uint8: + fastpathTV.EncMapUintUint8V(*v, e) + + case map[uint]uint16: + fastpathTV.EncMapUintUint16V(v, e) + case *map[uint]uint16: + fastpathTV.EncMapUintUint16V(*v, e) + + case map[uint]uint32: + fastpathTV.EncMapUintUint32V(v, e) + case *map[uint]uint32: + fastpathTV.EncMapUintUint32V(*v, e) + + case map[uint]uint64: + fastpathTV.EncMapUintUint64V(v, e) + case *map[uint]uint64: + fastpathTV.EncMapUintUint64V(*v, e) + + case map[uint]uintptr: + fastpathTV.EncMapUintUintptrV(v, e) + case *map[uint]uintptr: + fastpathTV.EncMapUintUintptrV(*v, e) + + case map[uint]int: + fastpathTV.EncMapUintIntV(v, e) + case *map[uint]int: + fastpathTV.EncMapUintIntV(*v, e) + + case map[uint]int8: + fastpathTV.EncMapUintInt8V(v, e) + case *map[uint]int8: + fastpathTV.EncMapUintInt8V(*v, e) + + case map[uint]int16: + fastpathTV.EncMapUintInt16V(v, e) + case *map[uint]int16: + fastpathTV.EncMapUintInt16V(*v, e) + + case map[uint]int32: + fastpathTV.EncMapUintInt32V(v, e) + case *map[uint]int32: + fastpathTV.EncMapUintInt32V(*v, e) + + case map[uint]int64: + fastpathTV.EncMapUintInt64V(v, e) + case *map[uint]int64: + fastpathTV.EncMapUintInt64V(*v, e) + + case map[uint]float32: + fastpathTV.EncMapUintFloat32V(v, e) + case *map[uint]float32: + fastpathTV.EncMapUintFloat32V(*v, e) + + case map[uint]float64: + fastpathTV.EncMapUintFloat64V(v, e) + case *map[uint]float64: + fastpathTV.EncMapUintFloat64V(*v, e) + + case map[uint]bool: + fastpathTV.EncMapUintBoolV(v, e) + case *map[uint]bool: + fastpathTV.EncMapUintBoolV(*v, e) + + case map[uint8]interface{}: + fastpathTV.EncMapUint8IntfV(v, e) + case *map[uint8]interface{}: + fastpathTV.EncMapUint8IntfV(*v, e) + + case map[uint8]string: + fastpathTV.EncMapUint8StringV(v, e) + case *map[uint8]string: + fastpathTV.EncMapUint8StringV(*v, e) + + case map[uint8]uint: + fastpathTV.EncMapUint8UintV(v, e) + case *map[uint8]uint: + fastpathTV.EncMapUint8UintV(*v, e) + + case map[uint8]uint8: + fastpathTV.EncMapUint8Uint8V(v, e) + case *map[uint8]uint8: + fastpathTV.EncMapUint8Uint8V(*v, e) + + case map[uint8]uint16: + fastpathTV.EncMapUint8Uint16V(v, e) + case *map[uint8]uint16: + fastpathTV.EncMapUint8Uint16V(*v, e) + + case map[uint8]uint32: + fastpathTV.EncMapUint8Uint32V(v, e) + case *map[uint8]uint32: + fastpathTV.EncMapUint8Uint32V(*v, e) + + case map[uint8]uint64: + fastpathTV.EncMapUint8Uint64V(v, e) + case *map[uint8]uint64: + fastpathTV.EncMapUint8Uint64V(*v, e) + + case map[uint8]uintptr: + fastpathTV.EncMapUint8UintptrV(v, e) + case *map[uint8]uintptr: + fastpathTV.EncMapUint8UintptrV(*v, e) + + case map[uint8]int: + fastpathTV.EncMapUint8IntV(v, e) + case *map[uint8]int: + fastpathTV.EncMapUint8IntV(*v, e) + + case map[uint8]int8: + fastpathTV.EncMapUint8Int8V(v, e) + case *map[uint8]int8: + fastpathTV.EncMapUint8Int8V(*v, e) + + case map[uint8]int16: + fastpathTV.EncMapUint8Int16V(v, e) + case *map[uint8]int16: + fastpathTV.EncMapUint8Int16V(*v, e) + + case map[uint8]int32: + fastpathTV.EncMapUint8Int32V(v, e) + case *map[uint8]int32: + fastpathTV.EncMapUint8Int32V(*v, e) + + case map[uint8]int64: + fastpathTV.EncMapUint8Int64V(v, e) + case *map[uint8]int64: + fastpathTV.EncMapUint8Int64V(*v, e) + + case map[uint8]float32: + fastpathTV.EncMapUint8Float32V(v, e) + case *map[uint8]float32: + fastpathTV.EncMapUint8Float32V(*v, e) + + case map[uint8]float64: + fastpathTV.EncMapUint8Float64V(v, e) + case *map[uint8]float64: + fastpathTV.EncMapUint8Float64V(*v, e) + + case map[uint8]bool: + fastpathTV.EncMapUint8BoolV(v, e) + case *map[uint8]bool: + fastpathTV.EncMapUint8BoolV(*v, e) + + case map[uint16]interface{}: + fastpathTV.EncMapUint16IntfV(v, e) + case *map[uint16]interface{}: + fastpathTV.EncMapUint16IntfV(*v, e) + + case map[uint16]string: + fastpathTV.EncMapUint16StringV(v, e) + case *map[uint16]string: + fastpathTV.EncMapUint16StringV(*v, e) + + case map[uint16]uint: + fastpathTV.EncMapUint16UintV(v, e) + case *map[uint16]uint: + fastpathTV.EncMapUint16UintV(*v, e) + + case map[uint16]uint8: + fastpathTV.EncMapUint16Uint8V(v, e) + case *map[uint16]uint8: + fastpathTV.EncMapUint16Uint8V(*v, e) + + case map[uint16]uint16: + fastpathTV.EncMapUint16Uint16V(v, e) + case *map[uint16]uint16: + fastpathTV.EncMapUint16Uint16V(*v, e) + + case map[uint16]uint32: + fastpathTV.EncMapUint16Uint32V(v, e) + case *map[uint16]uint32: + fastpathTV.EncMapUint16Uint32V(*v, e) + + case map[uint16]uint64: + fastpathTV.EncMapUint16Uint64V(v, e) + case *map[uint16]uint64: + fastpathTV.EncMapUint16Uint64V(*v, e) + + case map[uint16]uintptr: + fastpathTV.EncMapUint16UintptrV(v, e) + case *map[uint16]uintptr: + fastpathTV.EncMapUint16UintptrV(*v, e) + + case map[uint16]int: + fastpathTV.EncMapUint16IntV(v, e) + case *map[uint16]int: + fastpathTV.EncMapUint16IntV(*v, e) + + case map[uint16]int8: + fastpathTV.EncMapUint16Int8V(v, e) + case *map[uint16]int8: + fastpathTV.EncMapUint16Int8V(*v, e) + + case map[uint16]int16: + fastpathTV.EncMapUint16Int16V(v, e) + case *map[uint16]int16: + fastpathTV.EncMapUint16Int16V(*v, e) + + case map[uint16]int32: + fastpathTV.EncMapUint16Int32V(v, e) + case *map[uint16]int32: + fastpathTV.EncMapUint16Int32V(*v, e) + + case map[uint16]int64: + fastpathTV.EncMapUint16Int64V(v, e) + case *map[uint16]int64: + fastpathTV.EncMapUint16Int64V(*v, e) + + case map[uint16]float32: + fastpathTV.EncMapUint16Float32V(v, e) + case *map[uint16]float32: + fastpathTV.EncMapUint16Float32V(*v, e) + + case map[uint16]float64: + fastpathTV.EncMapUint16Float64V(v, e) + case *map[uint16]float64: + fastpathTV.EncMapUint16Float64V(*v, e) + + case map[uint16]bool: + fastpathTV.EncMapUint16BoolV(v, e) + case *map[uint16]bool: + fastpathTV.EncMapUint16BoolV(*v, e) + + case map[uint32]interface{}: + fastpathTV.EncMapUint32IntfV(v, e) + case *map[uint32]interface{}: + fastpathTV.EncMapUint32IntfV(*v, e) + + case map[uint32]string: + fastpathTV.EncMapUint32StringV(v, e) + case *map[uint32]string: + fastpathTV.EncMapUint32StringV(*v, e) + + case map[uint32]uint: + fastpathTV.EncMapUint32UintV(v, e) + case *map[uint32]uint: + fastpathTV.EncMapUint32UintV(*v, e) + + case map[uint32]uint8: + fastpathTV.EncMapUint32Uint8V(v, e) + case *map[uint32]uint8: + fastpathTV.EncMapUint32Uint8V(*v, e) + + case map[uint32]uint16: + fastpathTV.EncMapUint32Uint16V(v, e) + case *map[uint32]uint16: + fastpathTV.EncMapUint32Uint16V(*v, e) + + case map[uint32]uint32: + fastpathTV.EncMapUint32Uint32V(v, e) + case *map[uint32]uint32: + fastpathTV.EncMapUint32Uint32V(*v, e) + + case map[uint32]uint64: + fastpathTV.EncMapUint32Uint64V(v, e) + case *map[uint32]uint64: + fastpathTV.EncMapUint32Uint64V(*v, e) + + case map[uint32]uintptr: + fastpathTV.EncMapUint32UintptrV(v, e) + case *map[uint32]uintptr: + fastpathTV.EncMapUint32UintptrV(*v, e) + + case map[uint32]int: + fastpathTV.EncMapUint32IntV(v, e) + case *map[uint32]int: + fastpathTV.EncMapUint32IntV(*v, e) + + case map[uint32]int8: + fastpathTV.EncMapUint32Int8V(v, e) + case *map[uint32]int8: + fastpathTV.EncMapUint32Int8V(*v, e) + + case map[uint32]int16: + fastpathTV.EncMapUint32Int16V(v, e) + case *map[uint32]int16: + fastpathTV.EncMapUint32Int16V(*v, e) + + case map[uint32]int32: + fastpathTV.EncMapUint32Int32V(v, e) + case *map[uint32]int32: + fastpathTV.EncMapUint32Int32V(*v, e) + + case map[uint32]int64: + fastpathTV.EncMapUint32Int64V(v, e) + case *map[uint32]int64: + fastpathTV.EncMapUint32Int64V(*v, e) + + case map[uint32]float32: + fastpathTV.EncMapUint32Float32V(v, e) + case *map[uint32]float32: + fastpathTV.EncMapUint32Float32V(*v, e) + + case map[uint32]float64: + fastpathTV.EncMapUint32Float64V(v, e) + case *map[uint32]float64: + fastpathTV.EncMapUint32Float64V(*v, e) + + case map[uint32]bool: + fastpathTV.EncMapUint32BoolV(v, e) + case *map[uint32]bool: + fastpathTV.EncMapUint32BoolV(*v, e) + + case map[uint64]interface{}: + fastpathTV.EncMapUint64IntfV(v, e) + case *map[uint64]interface{}: + fastpathTV.EncMapUint64IntfV(*v, e) + + case map[uint64]string: + fastpathTV.EncMapUint64StringV(v, e) + case *map[uint64]string: + fastpathTV.EncMapUint64StringV(*v, e) + + case map[uint64]uint: + fastpathTV.EncMapUint64UintV(v, e) + case *map[uint64]uint: + fastpathTV.EncMapUint64UintV(*v, e) + + case map[uint64]uint8: + fastpathTV.EncMapUint64Uint8V(v, e) + case *map[uint64]uint8: + fastpathTV.EncMapUint64Uint8V(*v, e) + + case map[uint64]uint16: + fastpathTV.EncMapUint64Uint16V(v, e) + case *map[uint64]uint16: + fastpathTV.EncMapUint64Uint16V(*v, e) + + case map[uint64]uint32: + fastpathTV.EncMapUint64Uint32V(v, e) + case *map[uint64]uint32: + fastpathTV.EncMapUint64Uint32V(*v, e) + + case map[uint64]uint64: + fastpathTV.EncMapUint64Uint64V(v, e) + case *map[uint64]uint64: + fastpathTV.EncMapUint64Uint64V(*v, e) + + case map[uint64]uintptr: + fastpathTV.EncMapUint64UintptrV(v, e) + case *map[uint64]uintptr: + fastpathTV.EncMapUint64UintptrV(*v, e) + + case map[uint64]int: + fastpathTV.EncMapUint64IntV(v, e) + case *map[uint64]int: + fastpathTV.EncMapUint64IntV(*v, e) + + case map[uint64]int8: + fastpathTV.EncMapUint64Int8V(v, e) + case *map[uint64]int8: + fastpathTV.EncMapUint64Int8V(*v, e) + + case map[uint64]int16: + fastpathTV.EncMapUint64Int16V(v, e) + case *map[uint64]int16: + fastpathTV.EncMapUint64Int16V(*v, e) + + case map[uint64]int32: + fastpathTV.EncMapUint64Int32V(v, e) + case *map[uint64]int32: + fastpathTV.EncMapUint64Int32V(*v, e) + + case map[uint64]int64: + fastpathTV.EncMapUint64Int64V(v, e) + case *map[uint64]int64: + fastpathTV.EncMapUint64Int64V(*v, e) + + case map[uint64]float32: + fastpathTV.EncMapUint64Float32V(v, e) + case *map[uint64]float32: + fastpathTV.EncMapUint64Float32V(*v, e) + + case map[uint64]float64: + fastpathTV.EncMapUint64Float64V(v, e) + case *map[uint64]float64: + fastpathTV.EncMapUint64Float64V(*v, e) + + case map[uint64]bool: + fastpathTV.EncMapUint64BoolV(v, e) + case *map[uint64]bool: + fastpathTV.EncMapUint64BoolV(*v, e) + + case map[uintptr]interface{}: + fastpathTV.EncMapUintptrIntfV(v, e) + case *map[uintptr]interface{}: + fastpathTV.EncMapUintptrIntfV(*v, e) + + case map[uintptr]string: + fastpathTV.EncMapUintptrStringV(v, e) + case *map[uintptr]string: + fastpathTV.EncMapUintptrStringV(*v, e) + + case map[uintptr]uint: + fastpathTV.EncMapUintptrUintV(v, e) + case *map[uintptr]uint: + fastpathTV.EncMapUintptrUintV(*v, e) + + case map[uintptr]uint8: + fastpathTV.EncMapUintptrUint8V(v, e) + case *map[uintptr]uint8: + fastpathTV.EncMapUintptrUint8V(*v, e) + + case map[uintptr]uint16: + fastpathTV.EncMapUintptrUint16V(v, e) + case *map[uintptr]uint16: + fastpathTV.EncMapUintptrUint16V(*v, e) + + case map[uintptr]uint32: + fastpathTV.EncMapUintptrUint32V(v, e) + case *map[uintptr]uint32: + fastpathTV.EncMapUintptrUint32V(*v, e) + + case map[uintptr]uint64: + fastpathTV.EncMapUintptrUint64V(v, e) + case *map[uintptr]uint64: + fastpathTV.EncMapUintptrUint64V(*v, e) + + case map[uintptr]uintptr: + fastpathTV.EncMapUintptrUintptrV(v, e) + case *map[uintptr]uintptr: + fastpathTV.EncMapUintptrUintptrV(*v, e) + + case map[uintptr]int: + fastpathTV.EncMapUintptrIntV(v, e) + case *map[uintptr]int: + fastpathTV.EncMapUintptrIntV(*v, e) + + case map[uintptr]int8: + fastpathTV.EncMapUintptrInt8V(v, e) + case *map[uintptr]int8: + fastpathTV.EncMapUintptrInt8V(*v, e) + + case map[uintptr]int16: + fastpathTV.EncMapUintptrInt16V(v, e) + case *map[uintptr]int16: + fastpathTV.EncMapUintptrInt16V(*v, e) + + case map[uintptr]int32: + fastpathTV.EncMapUintptrInt32V(v, e) + case *map[uintptr]int32: + fastpathTV.EncMapUintptrInt32V(*v, e) + + case map[uintptr]int64: + fastpathTV.EncMapUintptrInt64V(v, e) + case *map[uintptr]int64: + fastpathTV.EncMapUintptrInt64V(*v, e) + + case map[uintptr]float32: + fastpathTV.EncMapUintptrFloat32V(v, e) + case *map[uintptr]float32: + fastpathTV.EncMapUintptrFloat32V(*v, e) + + case map[uintptr]float64: + fastpathTV.EncMapUintptrFloat64V(v, e) + case *map[uintptr]float64: + fastpathTV.EncMapUintptrFloat64V(*v, e) + + case map[uintptr]bool: + fastpathTV.EncMapUintptrBoolV(v, e) + case *map[uintptr]bool: + fastpathTV.EncMapUintptrBoolV(*v, e) + + case map[int]interface{}: + fastpathTV.EncMapIntIntfV(v, e) + case *map[int]interface{}: + fastpathTV.EncMapIntIntfV(*v, e) + + case map[int]string: + fastpathTV.EncMapIntStringV(v, e) + case *map[int]string: + fastpathTV.EncMapIntStringV(*v, e) + + case map[int]uint: + fastpathTV.EncMapIntUintV(v, e) + case *map[int]uint: + fastpathTV.EncMapIntUintV(*v, e) + + case map[int]uint8: + fastpathTV.EncMapIntUint8V(v, e) + case *map[int]uint8: + fastpathTV.EncMapIntUint8V(*v, e) + + case map[int]uint16: + fastpathTV.EncMapIntUint16V(v, e) + case *map[int]uint16: + fastpathTV.EncMapIntUint16V(*v, e) + + case map[int]uint32: + fastpathTV.EncMapIntUint32V(v, e) + case *map[int]uint32: + fastpathTV.EncMapIntUint32V(*v, e) + + case map[int]uint64: + fastpathTV.EncMapIntUint64V(v, e) + case *map[int]uint64: + fastpathTV.EncMapIntUint64V(*v, e) + + case map[int]uintptr: + fastpathTV.EncMapIntUintptrV(v, e) + case *map[int]uintptr: + fastpathTV.EncMapIntUintptrV(*v, e) + + case map[int]int: + fastpathTV.EncMapIntIntV(v, e) + case *map[int]int: + fastpathTV.EncMapIntIntV(*v, e) + + case map[int]int8: + fastpathTV.EncMapIntInt8V(v, e) + case *map[int]int8: + fastpathTV.EncMapIntInt8V(*v, e) + + case map[int]int16: + fastpathTV.EncMapIntInt16V(v, e) + case *map[int]int16: + fastpathTV.EncMapIntInt16V(*v, e) + + case map[int]int32: + fastpathTV.EncMapIntInt32V(v, e) + case *map[int]int32: + fastpathTV.EncMapIntInt32V(*v, e) + + case map[int]int64: + fastpathTV.EncMapIntInt64V(v, e) + case *map[int]int64: + fastpathTV.EncMapIntInt64V(*v, e) + + case map[int]float32: + fastpathTV.EncMapIntFloat32V(v, e) + case *map[int]float32: + fastpathTV.EncMapIntFloat32V(*v, e) + + case map[int]float64: + fastpathTV.EncMapIntFloat64V(v, e) + case *map[int]float64: + fastpathTV.EncMapIntFloat64V(*v, e) + + case map[int]bool: + fastpathTV.EncMapIntBoolV(v, e) + case *map[int]bool: + fastpathTV.EncMapIntBoolV(*v, e) + + case map[int8]interface{}: + fastpathTV.EncMapInt8IntfV(v, e) + case *map[int8]interface{}: + fastpathTV.EncMapInt8IntfV(*v, e) + + case map[int8]string: + fastpathTV.EncMapInt8StringV(v, e) + case *map[int8]string: + fastpathTV.EncMapInt8StringV(*v, e) + + case map[int8]uint: + fastpathTV.EncMapInt8UintV(v, e) + case *map[int8]uint: + fastpathTV.EncMapInt8UintV(*v, e) + + case map[int8]uint8: + fastpathTV.EncMapInt8Uint8V(v, e) + case *map[int8]uint8: + fastpathTV.EncMapInt8Uint8V(*v, e) + + case map[int8]uint16: + fastpathTV.EncMapInt8Uint16V(v, e) + case *map[int8]uint16: + fastpathTV.EncMapInt8Uint16V(*v, e) + + case map[int8]uint32: + fastpathTV.EncMapInt8Uint32V(v, e) + case *map[int8]uint32: + fastpathTV.EncMapInt8Uint32V(*v, e) + + case map[int8]uint64: + fastpathTV.EncMapInt8Uint64V(v, e) + case *map[int8]uint64: + fastpathTV.EncMapInt8Uint64V(*v, e) + + case map[int8]uintptr: + fastpathTV.EncMapInt8UintptrV(v, e) + case *map[int8]uintptr: + fastpathTV.EncMapInt8UintptrV(*v, e) + + case map[int8]int: + fastpathTV.EncMapInt8IntV(v, e) + case *map[int8]int: + fastpathTV.EncMapInt8IntV(*v, e) + + case map[int8]int8: + fastpathTV.EncMapInt8Int8V(v, e) + case *map[int8]int8: + fastpathTV.EncMapInt8Int8V(*v, e) + + case map[int8]int16: + fastpathTV.EncMapInt8Int16V(v, e) + case *map[int8]int16: + fastpathTV.EncMapInt8Int16V(*v, e) + + case map[int8]int32: + fastpathTV.EncMapInt8Int32V(v, e) + case *map[int8]int32: + fastpathTV.EncMapInt8Int32V(*v, e) + + case map[int8]int64: + fastpathTV.EncMapInt8Int64V(v, e) + case *map[int8]int64: + fastpathTV.EncMapInt8Int64V(*v, e) + + case map[int8]float32: + fastpathTV.EncMapInt8Float32V(v, e) + case *map[int8]float32: + fastpathTV.EncMapInt8Float32V(*v, e) + + case map[int8]float64: + fastpathTV.EncMapInt8Float64V(v, e) + case *map[int8]float64: + fastpathTV.EncMapInt8Float64V(*v, e) + + case map[int8]bool: + fastpathTV.EncMapInt8BoolV(v, e) + case *map[int8]bool: + fastpathTV.EncMapInt8BoolV(*v, e) + + case map[int16]interface{}: + fastpathTV.EncMapInt16IntfV(v, e) + case *map[int16]interface{}: + fastpathTV.EncMapInt16IntfV(*v, e) + + case map[int16]string: + fastpathTV.EncMapInt16StringV(v, e) + case *map[int16]string: + fastpathTV.EncMapInt16StringV(*v, e) + + case map[int16]uint: + fastpathTV.EncMapInt16UintV(v, e) + case *map[int16]uint: + fastpathTV.EncMapInt16UintV(*v, e) + + case map[int16]uint8: + fastpathTV.EncMapInt16Uint8V(v, e) + case *map[int16]uint8: + fastpathTV.EncMapInt16Uint8V(*v, e) + + case map[int16]uint16: + fastpathTV.EncMapInt16Uint16V(v, e) + case *map[int16]uint16: + fastpathTV.EncMapInt16Uint16V(*v, e) + + case map[int16]uint32: + fastpathTV.EncMapInt16Uint32V(v, e) + case *map[int16]uint32: + fastpathTV.EncMapInt16Uint32V(*v, e) + + case map[int16]uint64: + fastpathTV.EncMapInt16Uint64V(v, e) + case *map[int16]uint64: + fastpathTV.EncMapInt16Uint64V(*v, e) + + case map[int16]uintptr: + fastpathTV.EncMapInt16UintptrV(v, e) + case *map[int16]uintptr: + fastpathTV.EncMapInt16UintptrV(*v, e) + + case map[int16]int: + fastpathTV.EncMapInt16IntV(v, e) + case *map[int16]int: + fastpathTV.EncMapInt16IntV(*v, e) + + case map[int16]int8: + fastpathTV.EncMapInt16Int8V(v, e) + case *map[int16]int8: + fastpathTV.EncMapInt16Int8V(*v, e) + + case map[int16]int16: + fastpathTV.EncMapInt16Int16V(v, e) + case *map[int16]int16: + fastpathTV.EncMapInt16Int16V(*v, e) + + case map[int16]int32: + fastpathTV.EncMapInt16Int32V(v, e) + case *map[int16]int32: + fastpathTV.EncMapInt16Int32V(*v, e) + + case map[int16]int64: + fastpathTV.EncMapInt16Int64V(v, e) + case *map[int16]int64: + fastpathTV.EncMapInt16Int64V(*v, e) + + case map[int16]float32: + fastpathTV.EncMapInt16Float32V(v, e) + case *map[int16]float32: + fastpathTV.EncMapInt16Float32V(*v, e) + + case map[int16]float64: + fastpathTV.EncMapInt16Float64V(v, e) + case *map[int16]float64: + fastpathTV.EncMapInt16Float64V(*v, e) + + case map[int16]bool: + fastpathTV.EncMapInt16BoolV(v, e) + case *map[int16]bool: + fastpathTV.EncMapInt16BoolV(*v, e) + + case map[int32]interface{}: + fastpathTV.EncMapInt32IntfV(v, e) + case *map[int32]interface{}: + fastpathTV.EncMapInt32IntfV(*v, e) + + case map[int32]string: + fastpathTV.EncMapInt32StringV(v, e) + case *map[int32]string: + fastpathTV.EncMapInt32StringV(*v, e) + + case map[int32]uint: + fastpathTV.EncMapInt32UintV(v, e) + case *map[int32]uint: + fastpathTV.EncMapInt32UintV(*v, e) + + case map[int32]uint8: + fastpathTV.EncMapInt32Uint8V(v, e) + case *map[int32]uint8: + fastpathTV.EncMapInt32Uint8V(*v, e) + + case map[int32]uint16: + fastpathTV.EncMapInt32Uint16V(v, e) + case *map[int32]uint16: + fastpathTV.EncMapInt32Uint16V(*v, e) + + case map[int32]uint32: + fastpathTV.EncMapInt32Uint32V(v, e) + case *map[int32]uint32: + fastpathTV.EncMapInt32Uint32V(*v, e) + + case map[int32]uint64: + fastpathTV.EncMapInt32Uint64V(v, e) + case *map[int32]uint64: + fastpathTV.EncMapInt32Uint64V(*v, e) + + case map[int32]uintptr: + fastpathTV.EncMapInt32UintptrV(v, e) + case *map[int32]uintptr: + fastpathTV.EncMapInt32UintptrV(*v, e) + + case map[int32]int: + fastpathTV.EncMapInt32IntV(v, e) + case *map[int32]int: + fastpathTV.EncMapInt32IntV(*v, e) + + case map[int32]int8: + fastpathTV.EncMapInt32Int8V(v, e) + case *map[int32]int8: + fastpathTV.EncMapInt32Int8V(*v, e) + + case map[int32]int16: + fastpathTV.EncMapInt32Int16V(v, e) + case *map[int32]int16: + fastpathTV.EncMapInt32Int16V(*v, e) + + case map[int32]int32: + fastpathTV.EncMapInt32Int32V(v, e) + case *map[int32]int32: + fastpathTV.EncMapInt32Int32V(*v, e) + + case map[int32]int64: + fastpathTV.EncMapInt32Int64V(v, e) + case *map[int32]int64: + fastpathTV.EncMapInt32Int64V(*v, e) + + case map[int32]float32: + fastpathTV.EncMapInt32Float32V(v, e) + case *map[int32]float32: + fastpathTV.EncMapInt32Float32V(*v, e) + + case map[int32]float64: + fastpathTV.EncMapInt32Float64V(v, e) + case *map[int32]float64: + fastpathTV.EncMapInt32Float64V(*v, e) + + case map[int32]bool: + fastpathTV.EncMapInt32BoolV(v, e) + case *map[int32]bool: + fastpathTV.EncMapInt32BoolV(*v, e) + + case map[int64]interface{}: + fastpathTV.EncMapInt64IntfV(v, e) + case *map[int64]interface{}: + fastpathTV.EncMapInt64IntfV(*v, e) + + case map[int64]string: + fastpathTV.EncMapInt64StringV(v, e) + case *map[int64]string: + fastpathTV.EncMapInt64StringV(*v, e) + + case map[int64]uint: + fastpathTV.EncMapInt64UintV(v, e) + case *map[int64]uint: + fastpathTV.EncMapInt64UintV(*v, e) + + case map[int64]uint8: + fastpathTV.EncMapInt64Uint8V(v, e) + case *map[int64]uint8: + fastpathTV.EncMapInt64Uint8V(*v, e) + + case map[int64]uint16: + fastpathTV.EncMapInt64Uint16V(v, e) + case *map[int64]uint16: + fastpathTV.EncMapInt64Uint16V(*v, e) + + case map[int64]uint32: + fastpathTV.EncMapInt64Uint32V(v, e) + case *map[int64]uint32: + fastpathTV.EncMapInt64Uint32V(*v, e) + + case map[int64]uint64: + fastpathTV.EncMapInt64Uint64V(v, e) + case *map[int64]uint64: + fastpathTV.EncMapInt64Uint64V(*v, e) + + case map[int64]uintptr: + fastpathTV.EncMapInt64UintptrV(v, e) + case *map[int64]uintptr: + fastpathTV.EncMapInt64UintptrV(*v, e) + + case map[int64]int: + fastpathTV.EncMapInt64IntV(v, e) + case *map[int64]int: + fastpathTV.EncMapInt64IntV(*v, e) + + case map[int64]int8: + fastpathTV.EncMapInt64Int8V(v, e) + case *map[int64]int8: + fastpathTV.EncMapInt64Int8V(*v, e) + + case map[int64]int16: + fastpathTV.EncMapInt64Int16V(v, e) + case *map[int64]int16: + fastpathTV.EncMapInt64Int16V(*v, e) + + case map[int64]int32: + fastpathTV.EncMapInt64Int32V(v, e) + case *map[int64]int32: + fastpathTV.EncMapInt64Int32V(*v, e) + + case map[int64]int64: + fastpathTV.EncMapInt64Int64V(v, e) + case *map[int64]int64: + fastpathTV.EncMapInt64Int64V(*v, e) + + case map[int64]float32: + fastpathTV.EncMapInt64Float32V(v, e) + case *map[int64]float32: + fastpathTV.EncMapInt64Float32V(*v, e) + + case map[int64]float64: + fastpathTV.EncMapInt64Float64V(v, e) + case *map[int64]float64: + fastpathTV.EncMapInt64Float64V(*v, e) + + case map[int64]bool: + fastpathTV.EncMapInt64BoolV(v, e) + case *map[int64]bool: + fastpathTV.EncMapInt64BoolV(*v, e) + + case map[bool]interface{}: + fastpathTV.EncMapBoolIntfV(v, e) + case *map[bool]interface{}: + fastpathTV.EncMapBoolIntfV(*v, e) + + case map[bool]string: + fastpathTV.EncMapBoolStringV(v, e) + case *map[bool]string: + fastpathTV.EncMapBoolStringV(*v, e) + + case map[bool]uint: + fastpathTV.EncMapBoolUintV(v, e) + case *map[bool]uint: + fastpathTV.EncMapBoolUintV(*v, e) + + case map[bool]uint8: + fastpathTV.EncMapBoolUint8V(v, e) + case *map[bool]uint8: + fastpathTV.EncMapBoolUint8V(*v, e) + + case map[bool]uint16: + fastpathTV.EncMapBoolUint16V(v, e) + case *map[bool]uint16: + fastpathTV.EncMapBoolUint16V(*v, e) + + case map[bool]uint32: + fastpathTV.EncMapBoolUint32V(v, e) + case *map[bool]uint32: + fastpathTV.EncMapBoolUint32V(*v, e) + + case map[bool]uint64: + fastpathTV.EncMapBoolUint64V(v, e) + case *map[bool]uint64: + fastpathTV.EncMapBoolUint64V(*v, e) + + case map[bool]uintptr: + fastpathTV.EncMapBoolUintptrV(v, e) + case *map[bool]uintptr: + fastpathTV.EncMapBoolUintptrV(*v, e) + + case map[bool]int: + fastpathTV.EncMapBoolIntV(v, e) + case *map[bool]int: + fastpathTV.EncMapBoolIntV(*v, e) + + case map[bool]int8: + fastpathTV.EncMapBoolInt8V(v, e) + case *map[bool]int8: + fastpathTV.EncMapBoolInt8V(*v, e) + + case map[bool]int16: + fastpathTV.EncMapBoolInt16V(v, e) + case *map[bool]int16: + fastpathTV.EncMapBoolInt16V(*v, e) + + case map[bool]int32: + fastpathTV.EncMapBoolInt32V(v, e) + case *map[bool]int32: + fastpathTV.EncMapBoolInt32V(*v, e) + + case map[bool]int64: + fastpathTV.EncMapBoolInt64V(v, e) + case *map[bool]int64: + fastpathTV.EncMapBoolInt64V(*v, e) + + case map[bool]float32: + fastpathTV.EncMapBoolFloat32V(v, e) + case *map[bool]float32: + fastpathTV.EncMapBoolFloat32V(*v, e) + + case map[bool]float64: + fastpathTV.EncMapBoolFloat64V(v, e) + case *map[bool]float64: + fastpathTV.EncMapBoolFloat64V(*v, e) + + case map[bool]bool: + fastpathTV.EncMapBoolBoolV(v, e) + case *map[bool]bool: + fastpathTV.EncMapBoolBoolV(*v, e) + + default: + _ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release) + return false + } + return true +} + +// -- -- fast path functions + +func (e *Encoder) fastpathEncSliceIntfR(f *codecFnInfo, rv reflect.Value) { + if f.ti.mbs { + fastpathTV.EncAsMapSliceIntfV(rv2i(rv).([]interface{}), e) + } else { + fastpathTV.EncSliceIntfV(rv2i(rv).([]interface{}), e) + } +} +func (_ fastpathT) EncSliceIntfV(v []interface{}, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteArrayStart(len(v)) + for _, v2 := range v { + if esep { + ee.WriteArrayElem() + } + e.encode(v2) + } + ee.WriteArrayEnd() +} + +func (_ fastpathT) EncAsMapSliceIntfV(v []interface{}, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + if len(v)%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", len(v)) + return + } + ee.WriteMapStart(len(v) / 2) + for j, v2 := range v { + if esep { + if j%2 == 0 { + ee.WriteMapElemKey() + } else { + ee.WriteMapElemValue() + } + } + e.encode(v2) + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncSliceStringR(f *codecFnInfo, rv reflect.Value) { + if f.ti.mbs { + fastpathTV.EncAsMapSliceStringV(rv2i(rv).([]string), e) + } else { + fastpathTV.EncSliceStringV(rv2i(rv).([]string), e) + } +} +func (_ fastpathT) EncSliceStringV(v []string, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteArrayStart(len(v)) + for _, v2 := range v { + if esep { + ee.WriteArrayElem() + } + ee.EncodeString(c_UTF8, v2) + } + ee.WriteArrayEnd() +} + +func (_ fastpathT) EncAsMapSliceStringV(v []string, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + if len(v)%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", len(v)) + return + } + ee.WriteMapStart(len(v) / 2) + for j, v2 := range v { + if esep { + if j%2 == 0 { + ee.WriteMapElemKey() + } else { + ee.WriteMapElemValue() + } + } + ee.EncodeString(c_UTF8, v2) + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncSliceFloat32R(f *codecFnInfo, rv reflect.Value) { + if f.ti.mbs { + fastpathTV.EncAsMapSliceFloat32V(rv2i(rv).([]float32), e) + } else { + fastpathTV.EncSliceFloat32V(rv2i(rv).([]float32), e) + } +} +func (_ fastpathT) EncSliceFloat32V(v []float32, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteArrayStart(len(v)) + for _, v2 := range v { + if esep { + ee.WriteArrayElem() + } + ee.EncodeFloat32(v2) + } + ee.WriteArrayEnd() +} + +func (_ fastpathT) EncAsMapSliceFloat32V(v []float32, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + if len(v)%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", len(v)) + return + } + ee.WriteMapStart(len(v) / 2) + for j, v2 := range v { + if esep { + if j%2 == 0 { + ee.WriteMapElemKey() + } else { + ee.WriteMapElemValue() + } + } + ee.EncodeFloat32(v2) + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncSliceFloat64R(f *codecFnInfo, rv reflect.Value) { + if f.ti.mbs { + fastpathTV.EncAsMapSliceFloat64V(rv2i(rv).([]float64), e) + } else { + fastpathTV.EncSliceFloat64V(rv2i(rv).([]float64), e) + } +} +func (_ fastpathT) EncSliceFloat64V(v []float64, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteArrayStart(len(v)) + for _, v2 := range v { + if esep { + ee.WriteArrayElem() + } + ee.EncodeFloat64(v2) + } + ee.WriteArrayEnd() +} + +func (_ fastpathT) EncAsMapSliceFloat64V(v []float64, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + if len(v)%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", len(v)) + return + } + ee.WriteMapStart(len(v) / 2) + for j, v2 := range v { + if esep { + if j%2 == 0 { + ee.WriteMapElemKey() + } else { + ee.WriteMapElemValue() + } + } + ee.EncodeFloat64(v2) + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncSliceUintR(f *codecFnInfo, rv reflect.Value) { + if f.ti.mbs { + fastpathTV.EncAsMapSliceUintV(rv2i(rv).([]uint), e) + } else { + fastpathTV.EncSliceUintV(rv2i(rv).([]uint), e) + } +} +func (_ fastpathT) EncSliceUintV(v []uint, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteArrayStart(len(v)) + for _, v2 := range v { + if esep { + ee.WriteArrayElem() + } + ee.EncodeUint(uint64(v2)) + } + ee.WriteArrayEnd() +} + +func (_ fastpathT) EncAsMapSliceUintV(v []uint, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + if len(v)%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", len(v)) + return + } + ee.WriteMapStart(len(v) / 2) + for j, v2 := range v { + if esep { + if j%2 == 0 { + ee.WriteMapElemKey() + } else { + ee.WriteMapElemValue() + } + } + ee.EncodeUint(uint64(v2)) + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncSliceUint16R(f *codecFnInfo, rv reflect.Value) { + if f.ti.mbs { + fastpathTV.EncAsMapSliceUint16V(rv2i(rv).([]uint16), e) + } else { + fastpathTV.EncSliceUint16V(rv2i(rv).([]uint16), e) + } +} +func (_ fastpathT) EncSliceUint16V(v []uint16, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteArrayStart(len(v)) + for _, v2 := range v { + if esep { + ee.WriteArrayElem() + } + ee.EncodeUint(uint64(v2)) + } + ee.WriteArrayEnd() +} + +func (_ fastpathT) EncAsMapSliceUint16V(v []uint16, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + if len(v)%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", len(v)) + return + } + ee.WriteMapStart(len(v) / 2) + for j, v2 := range v { + if esep { + if j%2 == 0 { + ee.WriteMapElemKey() + } else { + ee.WriteMapElemValue() + } + } + ee.EncodeUint(uint64(v2)) + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncSliceUint32R(f *codecFnInfo, rv reflect.Value) { + if f.ti.mbs { + fastpathTV.EncAsMapSliceUint32V(rv2i(rv).([]uint32), e) + } else { + fastpathTV.EncSliceUint32V(rv2i(rv).([]uint32), e) + } +} +func (_ fastpathT) EncSliceUint32V(v []uint32, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteArrayStart(len(v)) + for _, v2 := range v { + if esep { + ee.WriteArrayElem() + } + ee.EncodeUint(uint64(v2)) + } + ee.WriteArrayEnd() +} + +func (_ fastpathT) EncAsMapSliceUint32V(v []uint32, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + if len(v)%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", len(v)) + return + } + ee.WriteMapStart(len(v) / 2) + for j, v2 := range v { + if esep { + if j%2 == 0 { + ee.WriteMapElemKey() + } else { + ee.WriteMapElemValue() + } + } + ee.EncodeUint(uint64(v2)) + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncSliceUint64R(f *codecFnInfo, rv reflect.Value) { + if f.ti.mbs { + fastpathTV.EncAsMapSliceUint64V(rv2i(rv).([]uint64), e) + } else { + fastpathTV.EncSliceUint64V(rv2i(rv).([]uint64), e) + } +} +func (_ fastpathT) EncSliceUint64V(v []uint64, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteArrayStart(len(v)) + for _, v2 := range v { + if esep { + ee.WriteArrayElem() + } + ee.EncodeUint(uint64(v2)) + } + ee.WriteArrayEnd() +} + +func (_ fastpathT) EncAsMapSliceUint64V(v []uint64, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + if len(v)%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", len(v)) + return + } + ee.WriteMapStart(len(v) / 2) + for j, v2 := range v { + if esep { + if j%2 == 0 { + ee.WriteMapElemKey() + } else { + ee.WriteMapElemValue() + } + } + ee.EncodeUint(uint64(v2)) + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncSliceUintptrR(f *codecFnInfo, rv reflect.Value) { + if f.ti.mbs { + fastpathTV.EncAsMapSliceUintptrV(rv2i(rv).([]uintptr), e) + } else { + fastpathTV.EncSliceUintptrV(rv2i(rv).([]uintptr), e) + } +} +func (_ fastpathT) EncSliceUintptrV(v []uintptr, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteArrayStart(len(v)) + for _, v2 := range v { + if esep { + ee.WriteArrayElem() + } + e.encode(v2) + } + ee.WriteArrayEnd() +} + +func (_ fastpathT) EncAsMapSliceUintptrV(v []uintptr, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + if len(v)%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", len(v)) + return + } + ee.WriteMapStart(len(v) / 2) + for j, v2 := range v { + if esep { + if j%2 == 0 { + ee.WriteMapElemKey() + } else { + ee.WriteMapElemValue() + } + } + e.encode(v2) + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncSliceIntR(f *codecFnInfo, rv reflect.Value) { + if f.ti.mbs { + fastpathTV.EncAsMapSliceIntV(rv2i(rv).([]int), e) + } else { + fastpathTV.EncSliceIntV(rv2i(rv).([]int), e) + } +} +func (_ fastpathT) EncSliceIntV(v []int, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteArrayStart(len(v)) + for _, v2 := range v { + if esep { + ee.WriteArrayElem() + } + ee.EncodeInt(int64(v2)) + } + ee.WriteArrayEnd() +} + +func (_ fastpathT) EncAsMapSliceIntV(v []int, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + if len(v)%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", len(v)) + return + } + ee.WriteMapStart(len(v) / 2) + for j, v2 := range v { + if esep { + if j%2 == 0 { + ee.WriteMapElemKey() + } else { + ee.WriteMapElemValue() + } + } + ee.EncodeInt(int64(v2)) + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncSliceInt8R(f *codecFnInfo, rv reflect.Value) { + if f.ti.mbs { + fastpathTV.EncAsMapSliceInt8V(rv2i(rv).([]int8), e) + } else { + fastpathTV.EncSliceInt8V(rv2i(rv).([]int8), e) + } +} +func (_ fastpathT) EncSliceInt8V(v []int8, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteArrayStart(len(v)) + for _, v2 := range v { + if esep { + ee.WriteArrayElem() + } + ee.EncodeInt(int64(v2)) + } + ee.WriteArrayEnd() +} + +func (_ fastpathT) EncAsMapSliceInt8V(v []int8, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + if len(v)%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", len(v)) + return + } + ee.WriteMapStart(len(v) / 2) + for j, v2 := range v { + if esep { + if j%2 == 0 { + ee.WriteMapElemKey() + } else { + ee.WriteMapElemValue() + } + } + ee.EncodeInt(int64(v2)) + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncSliceInt16R(f *codecFnInfo, rv reflect.Value) { + if f.ti.mbs { + fastpathTV.EncAsMapSliceInt16V(rv2i(rv).([]int16), e) + } else { + fastpathTV.EncSliceInt16V(rv2i(rv).([]int16), e) + } +} +func (_ fastpathT) EncSliceInt16V(v []int16, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteArrayStart(len(v)) + for _, v2 := range v { + if esep { + ee.WriteArrayElem() + } + ee.EncodeInt(int64(v2)) + } + ee.WriteArrayEnd() +} + +func (_ fastpathT) EncAsMapSliceInt16V(v []int16, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + if len(v)%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", len(v)) + return + } + ee.WriteMapStart(len(v) / 2) + for j, v2 := range v { + if esep { + if j%2 == 0 { + ee.WriteMapElemKey() + } else { + ee.WriteMapElemValue() + } + } + ee.EncodeInt(int64(v2)) + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncSliceInt32R(f *codecFnInfo, rv reflect.Value) { + if f.ti.mbs { + fastpathTV.EncAsMapSliceInt32V(rv2i(rv).([]int32), e) + } else { + fastpathTV.EncSliceInt32V(rv2i(rv).([]int32), e) + } +} +func (_ fastpathT) EncSliceInt32V(v []int32, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteArrayStart(len(v)) + for _, v2 := range v { + if esep { + ee.WriteArrayElem() + } + ee.EncodeInt(int64(v2)) + } + ee.WriteArrayEnd() +} + +func (_ fastpathT) EncAsMapSliceInt32V(v []int32, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + if len(v)%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", len(v)) + return + } + ee.WriteMapStart(len(v) / 2) + for j, v2 := range v { + if esep { + if j%2 == 0 { + ee.WriteMapElemKey() + } else { + ee.WriteMapElemValue() + } + } + ee.EncodeInt(int64(v2)) + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncSliceInt64R(f *codecFnInfo, rv reflect.Value) { + if f.ti.mbs { + fastpathTV.EncAsMapSliceInt64V(rv2i(rv).([]int64), e) + } else { + fastpathTV.EncSliceInt64V(rv2i(rv).([]int64), e) + } +} +func (_ fastpathT) EncSliceInt64V(v []int64, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteArrayStart(len(v)) + for _, v2 := range v { + if esep { + ee.WriteArrayElem() + } + ee.EncodeInt(int64(v2)) + } + ee.WriteArrayEnd() +} + +func (_ fastpathT) EncAsMapSliceInt64V(v []int64, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + if len(v)%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", len(v)) + return + } + ee.WriteMapStart(len(v) / 2) + for j, v2 := range v { + if esep { + if j%2 == 0 { + ee.WriteMapElemKey() + } else { + ee.WriteMapElemValue() + } + } + ee.EncodeInt(int64(v2)) + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncSliceBoolR(f *codecFnInfo, rv reflect.Value) { + if f.ti.mbs { + fastpathTV.EncAsMapSliceBoolV(rv2i(rv).([]bool), e) + } else { + fastpathTV.EncSliceBoolV(rv2i(rv).([]bool), e) + } +} +func (_ fastpathT) EncSliceBoolV(v []bool, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteArrayStart(len(v)) + for _, v2 := range v { + if esep { + ee.WriteArrayElem() + } + ee.EncodeBool(v2) + } + ee.WriteArrayEnd() +} + +func (_ fastpathT) EncAsMapSliceBoolV(v []bool, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + if len(v)%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", len(v)) + return + } + ee.WriteMapStart(len(v) / 2) + for j, v2 := range v { + if esep { + if j%2 == 0 { + ee.WriteMapElemKey() + } else { + ee.WriteMapElemValue() + } + } + ee.EncodeBool(v2) + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntfIntfR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntfIntfV(rv2i(rv).(map[interface{}]interface{}), e) +} +func (_ fastpathT) EncMapIntfIntfV(v map[interface{}]interface{}, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l int + var vp *bytesI + for k2, _ := range v { + l = len(mksv) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + for j := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.asis(v2[j].v) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[v2[j].i]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntfStringR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntfStringV(rv2i(rv).(map[interface{}]string), e) +} +func (_ fastpathT) EncMapIntfStringV(v map[interface{}]string, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l int + var vp *bytesI + for k2, _ := range v { + l = len(mksv) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + for j := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.asis(v2[j].v) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[v2[j].i]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeString(c_UTF8, v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntfUintR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntfUintV(rv2i(rv).(map[interface{}]uint), e) +} +func (_ fastpathT) EncMapIntfUintV(v map[interface{}]uint, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l int + var vp *bytesI + for k2, _ := range v { + l = len(mksv) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + for j := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.asis(v2[j].v) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[v2[j].i]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntfUint8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntfUint8V(rv2i(rv).(map[interface{}]uint8), e) +} +func (_ fastpathT) EncMapIntfUint8V(v map[interface{}]uint8, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l int + var vp *bytesI + for k2, _ := range v { + l = len(mksv) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + for j := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.asis(v2[j].v) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[v2[j].i]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntfUint16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntfUint16V(rv2i(rv).(map[interface{}]uint16), e) +} +func (_ fastpathT) EncMapIntfUint16V(v map[interface{}]uint16, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l int + var vp *bytesI + for k2, _ := range v { + l = len(mksv) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + for j := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.asis(v2[j].v) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[v2[j].i]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntfUint32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntfUint32V(rv2i(rv).(map[interface{}]uint32), e) +} +func (_ fastpathT) EncMapIntfUint32V(v map[interface{}]uint32, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l int + var vp *bytesI + for k2, _ := range v { + l = len(mksv) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + for j := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.asis(v2[j].v) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[v2[j].i]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntfUint64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntfUint64V(rv2i(rv).(map[interface{}]uint64), e) +} +func (_ fastpathT) EncMapIntfUint64V(v map[interface{}]uint64, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l int + var vp *bytesI + for k2, _ := range v { + l = len(mksv) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + for j := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.asis(v2[j].v) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[v2[j].i]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntfUintptrR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntfUintptrV(rv2i(rv).(map[interface{}]uintptr), e) +} +func (_ fastpathT) EncMapIntfUintptrV(v map[interface{}]uintptr, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l int + var vp *bytesI + for k2, _ := range v { + l = len(mksv) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + for j := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.asis(v2[j].v) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[v2[j].i]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntfIntR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntfIntV(rv2i(rv).(map[interface{}]int), e) +} +func (_ fastpathT) EncMapIntfIntV(v map[interface{}]int, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l int + var vp *bytesI + for k2, _ := range v { + l = len(mksv) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + for j := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.asis(v2[j].v) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[v2[j].i]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntfInt8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntfInt8V(rv2i(rv).(map[interface{}]int8), e) +} +func (_ fastpathT) EncMapIntfInt8V(v map[interface{}]int8, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l int + var vp *bytesI + for k2, _ := range v { + l = len(mksv) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + for j := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.asis(v2[j].v) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[v2[j].i]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntfInt16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntfInt16V(rv2i(rv).(map[interface{}]int16), e) +} +func (_ fastpathT) EncMapIntfInt16V(v map[interface{}]int16, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l int + var vp *bytesI + for k2, _ := range v { + l = len(mksv) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + for j := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.asis(v2[j].v) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[v2[j].i]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntfInt32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntfInt32V(rv2i(rv).(map[interface{}]int32), e) +} +func (_ fastpathT) EncMapIntfInt32V(v map[interface{}]int32, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l int + var vp *bytesI + for k2, _ := range v { + l = len(mksv) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + for j := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.asis(v2[j].v) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[v2[j].i]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntfInt64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntfInt64V(rv2i(rv).(map[interface{}]int64), e) +} +func (_ fastpathT) EncMapIntfInt64V(v map[interface{}]int64, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l int + var vp *bytesI + for k2, _ := range v { + l = len(mksv) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + for j := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.asis(v2[j].v) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[v2[j].i]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntfFloat32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntfFloat32V(rv2i(rv).(map[interface{}]float32), e) +} +func (_ fastpathT) EncMapIntfFloat32V(v map[interface{}]float32, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l int + var vp *bytesI + for k2, _ := range v { + l = len(mksv) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + for j := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.asis(v2[j].v) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[v2[j].i]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntfFloat64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntfFloat64V(rv2i(rv).(map[interface{}]float64), e) +} +func (_ fastpathT) EncMapIntfFloat64V(v map[interface{}]float64, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l int + var vp *bytesI + for k2, _ := range v { + l = len(mksv) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + for j := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.asis(v2[j].v) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[v2[j].i]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntfBoolR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntfBoolV(rv2i(rv).(map[interface{}]bool), e) +} +func (_ fastpathT) EncMapIntfBoolV(v map[interface{}]bool, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l int + var vp *bytesI + for k2, _ := range v { + l = len(mksv) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + for j := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.asis(v2[j].v) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[v2[j].i]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapStringIntfR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapStringIntfV(rv2i(rv).(map[string]interface{}), e) +} +func (_ fastpathT) EncMapStringIntfV(v map[string]interface{}, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 + if e.h.Canonical { + v2 := make([]string, len(v)) + var i int + for k, _ := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + e.encode(v[string(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapStringStringR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapStringStringV(rv2i(rv).(map[string]string), e) +} +func (_ fastpathT) EncMapStringStringV(v map[string]string, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 + if e.h.Canonical { + v2 := make([]string, len(v)) + var i int + for k, _ := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + ee.EncodeString(c_UTF8, v[string(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + ee.EncodeString(c_UTF8, v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapStringUintR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapStringUintV(rv2i(rv).(map[string]uint), e) +} +func (_ fastpathT) EncMapStringUintV(v map[string]uint, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 + if e.h.Canonical { + v2 := make([]string, len(v)) + var i int + for k, _ := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[string(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapStringUint8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapStringUint8V(rv2i(rv).(map[string]uint8), e) +} +func (_ fastpathT) EncMapStringUint8V(v map[string]uint8, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 + if e.h.Canonical { + v2 := make([]string, len(v)) + var i int + for k, _ := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[string(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapStringUint16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapStringUint16V(rv2i(rv).(map[string]uint16), e) +} +func (_ fastpathT) EncMapStringUint16V(v map[string]uint16, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 + if e.h.Canonical { + v2 := make([]string, len(v)) + var i int + for k, _ := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[string(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapStringUint32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapStringUint32V(rv2i(rv).(map[string]uint32), e) +} +func (_ fastpathT) EncMapStringUint32V(v map[string]uint32, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 + if e.h.Canonical { + v2 := make([]string, len(v)) + var i int + for k, _ := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[string(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapStringUint64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapStringUint64V(rv2i(rv).(map[string]uint64), e) +} +func (_ fastpathT) EncMapStringUint64V(v map[string]uint64, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 + if e.h.Canonical { + v2 := make([]string, len(v)) + var i int + for k, _ := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[string(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapStringUintptrR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapStringUintptrV(rv2i(rv).(map[string]uintptr), e) +} +func (_ fastpathT) EncMapStringUintptrV(v map[string]uintptr, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 + if e.h.Canonical { + v2 := make([]string, len(v)) + var i int + for k, _ := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + e.encode(v[string(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapStringIntR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapStringIntV(rv2i(rv).(map[string]int), e) +} +func (_ fastpathT) EncMapStringIntV(v map[string]int, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 + if e.h.Canonical { + v2 := make([]string, len(v)) + var i int + for k, _ := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[string(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapStringInt8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapStringInt8V(rv2i(rv).(map[string]int8), e) +} +func (_ fastpathT) EncMapStringInt8V(v map[string]int8, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 + if e.h.Canonical { + v2 := make([]string, len(v)) + var i int + for k, _ := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[string(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapStringInt16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapStringInt16V(rv2i(rv).(map[string]int16), e) +} +func (_ fastpathT) EncMapStringInt16V(v map[string]int16, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 + if e.h.Canonical { + v2 := make([]string, len(v)) + var i int + for k, _ := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[string(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapStringInt32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapStringInt32V(rv2i(rv).(map[string]int32), e) +} +func (_ fastpathT) EncMapStringInt32V(v map[string]int32, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 + if e.h.Canonical { + v2 := make([]string, len(v)) + var i int + for k, _ := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[string(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapStringInt64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapStringInt64V(rv2i(rv).(map[string]int64), e) +} +func (_ fastpathT) EncMapStringInt64V(v map[string]int64, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 + if e.h.Canonical { + v2 := make([]string, len(v)) + var i int + for k, _ := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[string(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapStringFloat32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapStringFloat32V(rv2i(rv).(map[string]float32), e) +} +func (_ fastpathT) EncMapStringFloat32V(v map[string]float32, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 + if e.h.Canonical { + v2 := make([]string, len(v)) + var i int + for k, _ := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v[string(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapStringFloat64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapStringFloat64V(rv2i(rv).(map[string]float64), e) +} +func (_ fastpathT) EncMapStringFloat64V(v map[string]float64, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 + if e.h.Canonical { + v2 := make([]string, len(v)) + var i int + for k, _ := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v[string(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapStringBoolR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapStringBoolV(rv2i(rv).(map[string]bool), e) +} +func (_ fastpathT) EncMapStringBoolV(v map[string]bool, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 + if e.h.Canonical { + v2 := make([]string, len(v)) + var i int + for k, _ := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v[string(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat32IntfR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat32IntfV(rv2i(rv).(map[float32]interface{}), e) +} +func (_ fastpathT) EncMapFloat32IntfV(v map[float32]interface{}, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(float32(k2)) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[float32(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(k2) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat32StringR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat32StringV(rv2i(rv).(map[float32]string), e) +} +func (_ fastpathT) EncMapFloat32StringV(v map[float32]string, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(float32(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeString(c_UTF8, v[float32(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeString(c_UTF8, v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat32UintR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat32UintV(rv2i(rv).(map[float32]uint), e) +} +func (_ fastpathT) EncMapFloat32UintV(v map[float32]uint, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(float32(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[float32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat32Uint8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat32Uint8V(rv2i(rv).(map[float32]uint8), e) +} +func (_ fastpathT) EncMapFloat32Uint8V(v map[float32]uint8, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(float32(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[float32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat32Uint16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat32Uint16V(rv2i(rv).(map[float32]uint16), e) +} +func (_ fastpathT) EncMapFloat32Uint16V(v map[float32]uint16, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(float32(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[float32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat32Uint32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat32Uint32V(rv2i(rv).(map[float32]uint32), e) +} +func (_ fastpathT) EncMapFloat32Uint32V(v map[float32]uint32, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(float32(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[float32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat32Uint64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat32Uint64V(rv2i(rv).(map[float32]uint64), e) +} +func (_ fastpathT) EncMapFloat32Uint64V(v map[float32]uint64, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(float32(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[float32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat32UintptrR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat32UintptrV(rv2i(rv).(map[float32]uintptr), e) +} +func (_ fastpathT) EncMapFloat32UintptrV(v map[float32]uintptr, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(float32(k2)) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[float32(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(k2) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat32IntR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat32IntV(rv2i(rv).(map[float32]int), e) +} +func (_ fastpathT) EncMapFloat32IntV(v map[float32]int, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(float32(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[float32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat32Int8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat32Int8V(rv2i(rv).(map[float32]int8), e) +} +func (_ fastpathT) EncMapFloat32Int8V(v map[float32]int8, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(float32(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[float32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat32Int16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat32Int16V(rv2i(rv).(map[float32]int16), e) +} +func (_ fastpathT) EncMapFloat32Int16V(v map[float32]int16, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(float32(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[float32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat32Int32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat32Int32V(rv2i(rv).(map[float32]int32), e) +} +func (_ fastpathT) EncMapFloat32Int32V(v map[float32]int32, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(float32(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[float32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat32Int64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat32Int64V(rv2i(rv).(map[float32]int64), e) +} +func (_ fastpathT) EncMapFloat32Int64V(v map[float32]int64, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(float32(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[float32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat32Float32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat32Float32V(rv2i(rv).(map[float32]float32), e) +} +func (_ fastpathT) EncMapFloat32Float32V(v map[float32]float32, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(float32(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v[float32(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat32Float64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat32Float64V(rv2i(rv).(map[float32]float64), e) +} +func (_ fastpathT) EncMapFloat32Float64V(v map[float32]float64, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(float32(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v[float32(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat32BoolR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat32BoolV(rv2i(rv).(map[float32]bool), e) +} +func (_ fastpathT) EncMapFloat32BoolV(v map[float32]bool, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(float32(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v[float32(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat64IntfR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat64IntfV(rv2i(rv).(map[float64]interface{}), e) +} +func (_ fastpathT) EncMapFloat64IntfV(v map[float64]interface{}, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(float64(k2)) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[float64(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(k2) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat64StringR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat64StringV(rv2i(rv).(map[float64]string), e) +} +func (_ fastpathT) EncMapFloat64StringV(v map[float64]string, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(float64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeString(c_UTF8, v[float64(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeString(c_UTF8, v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat64UintR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat64UintV(rv2i(rv).(map[float64]uint), e) +} +func (_ fastpathT) EncMapFloat64UintV(v map[float64]uint, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(float64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[float64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat64Uint8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat64Uint8V(rv2i(rv).(map[float64]uint8), e) +} +func (_ fastpathT) EncMapFloat64Uint8V(v map[float64]uint8, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(float64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[float64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat64Uint16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat64Uint16V(rv2i(rv).(map[float64]uint16), e) +} +func (_ fastpathT) EncMapFloat64Uint16V(v map[float64]uint16, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(float64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[float64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat64Uint32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat64Uint32V(rv2i(rv).(map[float64]uint32), e) +} +func (_ fastpathT) EncMapFloat64Uint32V(v map[float64]uint32, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(float64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[float64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat64Uint64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat64Uint64V(rv2i(rv).(map[float64]uint64), e) +} +func (_ fastpathT) EncMapFloat64Uint64V(v map[float64]uint64, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(float64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[float64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat64UintptrR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat64UintptrV(rv2i(rv).(map[float64]uintptr), e) +} +func (_ fastpathT) EncMapFloat64UintptrV(v map[float64]uintptr, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(float64(k2)) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[float64(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(k2) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat64IntR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat64IntV(rv2i(rv).(map[float64]int), e) +} +func (_ fastpathT) EncMapFloat64IntV(v map[float64]int, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(float64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[float64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat64Int8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat64Int8V(rv2i(rv).(map[float64]int8), e) +} +func (_ fastpathT) EncMapFloat64Int8V(v map[float64]int8, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(float64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[float64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat64Int16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat64Int16V(rv2i(rv).(map[float64]int16), e) +} +func (_ fastpathT) EncMapFloat64Int16V(v map[float64]int16, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(float64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[float64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat64Int32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat64Int32V(rv2i(rv).(map[float64]int32), e) +} +func (_ fastpathT) EncMapFloat64Int32V(v map[float64]int32, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(float64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[float64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat64Int64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat64Int64V(rv2i(rv).(map[float64]int64), e) +} +func (_ fastpathT) EncMapFloat64Int64V(v map[float64]int64, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(float64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[float64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat64Float32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat64Float32V(rv2i(rv).(map[float64]float32), e) +} +func (_ fastpathT) EncMapFloat64Float32V(v map[float64]float32, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(float64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v[float64(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat64Float64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat64Float64V(rv2i(rv).(map[float64]float64), e) +} +func (_ fastpathT) EncMapFloat64Float64V(v map[float64]float64, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(float64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v[float64(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat64BoolR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat64BoolV(rv2i(rv).(map[float64]bool), e) +} +func (_ fastpathT) EncMapFloat64BoolV(v map[float64]bool, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(float64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v[float64(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintIntfR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintIntfV(rv2i(rv).(map[uint]interface{}), e) +} +func (_ fastpathT) EncMapUintIntfV(v map[uint]interface{}, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint(k2))) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[uint(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintStringR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintStringV(rv2i(rv).(map[uint]string), e) +} +func (_ fastpathT) EncMapUintStringV(v map[uint]string, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeString(c_UTF8, v[uint(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeString(c_UTF8, v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintUintR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintUintV(rv2i(rv).(map[uint]uint), e) +} +func (_ fastpathT) EncMapUintUintV(v map[uint]uint, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uint(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintUint8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintUint8V(rv2i(rv).(map[uint]uint8), e) +} +func (_ fastpathT) EncMapUintUint8V(v map[uint]uint8, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uint(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintUint16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintUint16V(rv2i(rv).(map[uint]uint16), e) +} +func (_ fastpathT) EncMapUintUint16V(v map[uint]uint16, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uint(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintUint32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintUint32V(rv2i(rv).(map[uint]uint32), e) +} +func (_ fastpathT) EncMapUintUint32V(v map[uint]uint32, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uint(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintUint64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintUint64V(rv2i(rv).(map[uint]uint64), e) +} +func (_ fastpathT) EncMapUintUint64V(v map[uint]uint64, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uint(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintUintptrR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintUintptrV(rv2i(rv).(map[uint]uintptr), e) +} +func (_ fastpathT) EncMapUintUintptrV(v map[uint]uintptr, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint(k2))) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[uint(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintIntR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintIntV(rv2i(rv).(map[uint]int), e) +} +func (_ fastpathT) EncMapUintIntV(v map[uint]int, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uint(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintInt8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintInt8V(rv2i(rv).(map[uint]int8), e) +} +func (_ fastpathT) EncMapUintInt8V(v map[uint]int8, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uint(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintInt16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintInt16V(rv2i(rv).(map[uint]int16), e) +} +func (_ fastpathT) EncMapUintInt16V(v map[uint]int16, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uint(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintInt32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintInt32V(rv2i(rv).(map[uint]int32), e) +} +func (_ fastpathT) EncMapUintInt32V(v map[uint]int32, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uint(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintInt64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintInt64V(rv2i(rv).(map[uint]int64), e) +} +func (_ fastpathT) EncMapUintInt64V(v map[uint]int64, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uint(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintFloat32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintFloat32V(rv2i(rv).(map[uint]float32), e) +} +func (_ fastpathT) EncMapUintFloat32V(v map[uint]float32, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v[uint(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintFloat64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintFloat64V(rv2i(rv).(map[uint]float64), e) +} +func (_ fastpathT) EncMapUintFloat64V(v map[uint]float64, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v[uint(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintBoolR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintBoolV(rv2i(rv).(map[uint]bool), e) +} +func (_ fastpathT) EncMapUintBoolV(v map[uint]bool, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v[uint(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint8IntfR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint8IntfV(rv2i(rv).(map[uint8]interface{}), e) +} +func (_ fastpathT) EncMapUint8IntfV(v map[uint8]interface{}, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint8(k2))) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[uint8(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint8StringR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint8StringV(rv2i(rv).(map[uint8]string), e) +} +func (_ fastpathT) EncMapUint8StringV(v map[uint8]string, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint8(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeString(c_UTF8, v[uint8(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeString(c_UTF8, v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint8UintR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint8UintV(rv2i(rv).(map[uint8]uint), e) +} +func (_ fastpathT) EncMapUint8UintV(v map[uint8]uint, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint8(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uint8(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint8Uint8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint8Uint8V(rv2i(rv).(map[uint8]uint8), e) +} +func (_ fastpathT) EncMapUint8Uint8V(v map[uint8]uint8, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint8(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uint8(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint8Uint16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint8Uint16V(rv2i(rv).(map[uint8]uint16), e) +} +func (_ fastpathT) EncMapUint8Uint16V(v map[uint8]uint16, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint8(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uint8(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint8Uint32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint8Uint32V(rv2i(rv).(map[uint8]uint32), e) +} +func (_ fastpathT) EncMapUint8Uint32V(v map[uint8]uint32, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint8(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uint8(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint8Uint64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint8Uint64V(rv2i(rv).(map[uint8]uint64), e) +} +func (_ fastpathT) EncMapUint8Uint64V(v map[uint8]uint64, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint8(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uint8(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint8UintptrR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint8UintptrV(rv2i(rv).(map[uint8]uintptr), e) +} +func (_ fastpathT) EncMapUint8UintptrV(v map[uint8]uintptr, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint8(k2))) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[uint8(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint8IntR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint8IntV(rv2i(rv).(map[uint8]int), e) +} +func (_ fastpathT) EncMapUint8IntV(v map[uint8]int, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint8(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uint8(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint8Int8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint8Int8V(rv2i(rv).(map[uint8]int8), e) +} +func (_ fastpathT) EncMapUint8Int8V(v map[uint8]int8, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint8(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uint8(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint8Int16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint8Int16V(rv2i(rv).(map[uint8]int16), e) +} +func (_ fastpathT) EncMapUint8Int16V(v map[uint8]int16, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint8(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uint8(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint8Int32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint8Int32V(rv2i(rv).(map[uint8]int32), e) +} +func (_ fastpathT) EncMapUint8Int32V(v map[uint8]int32, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint8(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uint8(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint8Int64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint8Int64V(rv2i(rv).(map[uint8]int64), e) +} +func (_ fastpathT) EncMapUint8Int64V(v map[uint8]int64, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint8(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uint8(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint8Float32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint8Float32V(rv2i(rv).(map[uint8]float32), e) +} +func (_ fastpathT) EncMapUint8Float32V(v map[uint8]float32, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint8(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v[uint8(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint8Float64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint8Float64V(rv2i(rv).(map[uint8]float64), e) +} +func (_ fastpathT) EncMapUint8Float64V(v map[uint8]float64, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint8(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v[uint8(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint8BoolR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint8BoolV(rv2i(rv).(map[uint8]bool), e) +} +func (_ fastpathT) EncMapUint8BoolV(v map[uint8]bool, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint8(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v[uint8(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint16IntfR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint16IntfV(rv2i(rv).(map[uint16]interface{}), e) +} +func (_ fastpathT) EncMapUint16IntfV(v map[uint16]interface{}, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint16(k2))) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[uint16(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint16StringR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint16StringV(rv2i(rv).(map[uint16]string), e) +} +func (_ fastpathT) EncMapUint16StringV(v map[uint16]string, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint16(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeString(c_UTF8, v[uint16(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeString(c_UTF8, v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint16UintR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint16UintV(rv2i(rv).(map[uint16]uint), e) +} +func (_ fastpathT) EncMapUint16UintV(v map[uint16]uint, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint16(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uint16(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint16Uint8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint16Uint8V(rv2i(rv).(map[uint16]uint8), e) +} +func (_ fastpathT) EncMapUint16Uint8V(v map[uint16]uint8, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint16(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uint16(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint16Uint16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint16Uint16V(rv2i(rv).(map[uint16]uint16), e) +} +func (_ fastpathT) EncMapUint16Uint16V(v map[uint16]uint16, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint16(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uint16(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint16Uint32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint16Uint32V(rv2i(rv).(map[uint16]uint32), e) +} +func (_ fastpathT) EncMapUint16Uint32V(v map[uint16]uint32, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint16(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uint16(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint16Uint64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint16Uint64V(rv2i(rv).(map[uint16]uint64), e) +} +func (_ fastpathT) EncMapUint16Uint64V(v map[uint16]uint64, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint16(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uint16(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint16UintptrR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint16UintptrV(rv2i(rv).(map[uint16]uintptr), e) +} +func (_ fastpathT) EncMapUint16UintptrV(v map[uint16]uintptr, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint16(k2))) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[uint16(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint16IntR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint16IntV(rv2i(rv).(map[uint16]int), e) +} +func (_ fastpathT) EncMapUint16IntV(v map[uint16]int, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint16(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uint16(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint16Int8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint16Int8V(rv2i(rv).(map[uint16]int8), e) +} +func (_ fastpathT) EncMapUint16Int8V(v map[uint16]int8, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint16(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uint16(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint16Int16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint16Int16V(rv2i(rv).(map[uint16]int16), e) +} +func (_ fastpathT) EncMapUint16Int16V(v map[uint16]int16, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint16(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uint16(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint16Int32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint16Int32V(rv2i(rv).(map[uint16]int32), e) +} +func (_ fastpathT) EncMapUint16Int32V(v map[uint16]int32, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint16(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uint16(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint16Int64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint16Int64V(rv2i(rv).(map[uint16]int64), e) +} +func (_ fastpathT) EncMapUint16Int64V(v map[uint16]int64, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint16(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uint16(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint16Float32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint16Float32V(rv2i(rv).(map[uint16]float32), e) +} +func (_ fastpathT) EncMapUint16Float32V(v map[uint16]float32, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint16(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v[uint16(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint16Float64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint16Float64V(rv2i(rv).(map[uint16]float64), e) +} +func (_ fastpathT) EncMapUint16Float64V(v map[uint16]float64, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint16(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v[uint16(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint16BoolR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint16BoolV(rv2i(rv).(map[uint16]bool), e) +} +func (_ fastpathT) EncMapUint16BoolV(v map[uint16]bool, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint16(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v[uint16(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint32IntfR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint32IntfV(rv2i(rv).(map[uint32]interface{}), e) +} +func (_ fastpathT) EncMapUint32IntfV(v map[uint32]interface{}, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint32(k2))) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[uint32(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint32StringR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint32StringV(rv2i(rv).(map[uint32]string), e) +} +func (_ fastpathT) EncMapUint32StringV(v map[uint32]string, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint32(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeString(c_UTF8, v[uint32(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeString(c_UTF8, v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint32UintR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint32UintV(rv2i(rv).(map[uint32]uint), e) +} +func (_ fastpathT) EncMapUint32UintV(v map[uint32]uint, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint32(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uint32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint32Uint8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint32Uint8V(rv2i(rv).(map[uint32]uint8), e) +} +func (_ fastpathT) EncMapUint32Uint8V(v map[uint32]uint8, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint32(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uint32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint32Uint16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint32Uint16V(rv2i(rv).(map[uint32]uint16), e) +} +func (_ fastpathT) EncMapUint32Uint16V(v map[uint32]uint16, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint32(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uint32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint32Uint32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint32Uint32V(rv2i(rv).(map[uint32]uint32), e) +} +func (_ fastpathT) EncMapUint32Uint32V(v map[uint32]uint32, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint32(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uint32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint32Uint64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint32Uint64V(rv2i(rv).(map[uint32]uint64), e) +} +func (_ fastpathT) EncMapUint32Uint64V(v map[uint32]uint64, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint32(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uint32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint32UintptrR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint32UintptrV(rv2i(rv).(map[uint32]uintptr), e) +} +func (_ fastpathT) EncMapUint32UintptrV(v map[uint32]uintptr, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint32(k2))) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[uint32(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint32IntR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint32IntV(rv2i(rv).(map[uint32]int), e) +} +func (_ fastpathT) EncMapUint32IntV(v map[uint32]int, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint32(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uint32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint32Int8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint32Int8V(rv2i(rv).(map[uint32]int8), e) +} +func (_ fastpathT) EncMapUint32Int8V(v map[uint32]int8, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint32(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uint32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint32Int16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint32Int16V(rv2i(rv).(map[uint32]int16), e) +} +func (_ fastpathT) EncMapUint32Int16V(v map[uint32]int16, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint32(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uint32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint32Int32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint32Int32V(rv2i(rv).(map[uint32]int32), e) +} +func (_ fastpathT) EncMapUint32Int32V(v map[uint32]int32, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint32(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uint32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint32Int64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint32Int64V(rv2i(rv).(map[uint32]int64), e) +} +func (_ fastpathT) EncMapUint32Int64V(v map[uint32]int64, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint32(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uint32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint32Float32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint32Float32V(rv2i(rv).(map[uint32]float32), e) +} +func (_ fastpathT) EncMapUint32Float32V(v map[uint32]float32, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint32(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v[uint32(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint32Float64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint32Float64V(rv2i(rv).(map[uint32]float64), e) +} +func (_ fastpathT) EncMapUint32Float64V(v map[uint32]float64, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint32(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v[uint32(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint32BoolR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint32BoolV(rv2i(rv).(map[uint32]bool), e) +} +func (_ fastpathT) EncMapUint32BoolV(v map[uint32]bool, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint32(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v[uint32(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint64IntfR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint64IntfV(rv2i(rv).(map[uint64]interface{}), e) +} +func (_ fastpathT) EncMapUint64IntfV(v map[uint64]interface{}, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint64(k2))) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[uint64(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint64StringR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint64StringV(rv2i(rv).(map[uint64]string), e) +} +func (_ fastpathT) EncMapUint64StringV(v map[uint64]string, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint64(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeString(c_UTF8, v[uint64(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeString(c_UTF8, v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint64UintR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint64UintV(rv2i(rv).(map[uint64]uint), e) +} +func (_ fastpathT) EncMapUint64UintV(v map[uint64]uint, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint64(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uint64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint64Uint8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint64Uint8V(rv2i(rv).(map[uint64]uint8), e) +} +func (_ fastpathT) EncMapUint64Uint8V(v map[uint64]uint8, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint64(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uint64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint64Uint16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint64Uint16V(rv2i(rv).(map[uint64]uint16), e) +} +func (_ fastpathT) EncMapUint64Uint16V(v map[uint64]uint16, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint64(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uint64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint64Uint32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint64Uint32V(rv2i(rv).(map[uint64]uint32), e) +} +func (_ fastpathT) EncMapUint64Uint32V(v map[uint64]uint32, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint64(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uint64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint64Uint64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint64Uint64V(rv2i(rv).(map[uint64]uint64), e) +} +func (_ fastpathT) EncMapUint64Uint64V(v map[uint64]uint64, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint64(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uint64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint64UintptrR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint64UintptrV(rv2i(rv).(map[uint64]uintptr), e) +} +func (_ fastpathT) EncMapUint64UintptrV(v map[uint64]uintptr, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint64(k2))) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[uint64(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint64IntR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint64IntV(rv2i(rv).(map[uint64]int), e) +} +func (_ fastpathT) EncMapUint64IntV(v map[uint64]int, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint64(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uint64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint64Int8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint64Int8V(rv2i(rv).(map[uint64]int8), e) +} +func (_ fastpathT) EncMapUint64Int8V(v map[uint64]int8, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint64(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uint64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint64Int16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint64Int16V(rv2i(rv).(map[uint64]int16), e) +} +func (_ fastpathT) EncMapUint64Int16V(v map[uint64]int16, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint64(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uint64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint64Int32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint64Int32V(rv2i(rv).(map[uint64]int32), e) +} +func (_ fastpathT) EncMapUint64Int32V(v map[uint64]int32, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint64(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uint64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint64Int64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint64Int64V(rv2i(rv).(map[uint64]int64), e) +} +func (_ fastpathT) EncMapUint64Int64V(v map[uint64]int64, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint64(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uint64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint64Float32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint64Float32V(rv2i(rv).(map[uint64]float32), e) +} +func (_ fastpathT) EncMapUint64Float32V(v map[uint64]float32, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint64(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v[uint64(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint64Float64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint64Float64V(rv2i(rv).(map[uint64]float64), e) +} +func (_ fastpathT) EncMapUint64Float64V(v map[uint64]float64, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint64(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v[uint64(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint64BoolR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint64BoolV(rv2i(rv).(map[uint64]bool), e) +} +func (_ fastpathT) EncMapUint64BoolV(v map[uint64]bool, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint64(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v[uint64(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintptrIntfR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintptrIntfV(rv2i(rv).(map[uintptr]interface{}), e) +} +func (_ fastpathT) EncMapUintptrIntfV(v map[uintptr]interface{}, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.encode(uintptr(k2)) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[uintptr(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintptrStringR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintptrStringV(rv2i(rv).(map[uintptr]string), e) +} +func (_ fastpathT) EncMapUintptrStringV(v map[uintptr]string, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.encode(uintptr(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeString(c_UTF8, v[uintptr(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeString(c_UTF8, v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintptrUintR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintptrUintV(rv2i(rv).(map[uintptr]uint), e) +} +func (_ fastpathT) EncMapUintptrUintV(v map[uintptr]uint, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.encode(uintptr(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uintptr(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintptrUint8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintptrUint8V(rv2i(rv).(map[uintptr]uint8), e) +} +func (_ fastpathT) EncMapUintptrUint8V(v map[uintptr]uint8, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.encode(uintptr(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uintptr(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintptrUint16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintptrUint16V(rv2i(rv).(map[uintptr]uint16), e) +} +func (_ fastpathT) EncMapUintptrUint16V(v map[uintptr]uint16, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.encode(uintptr(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uintptr(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintptrUint32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintptrUint32V(rv2i(rv).(map[uintptr]uint32), e) +} +func (_ fastpathT) EncMapUintptrUint32V(v map[uintptr]uint32, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.encode(uintptr(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uintptr(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintptrUint64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintptrUint64V(rv2i(rv).(map[uintptr]uint64), e) +} +func (_ fastpathT) EncMapUintptrUint64V(v map[uintptr]uint64, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.encode(uintptr(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uintptr(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintptrUintptrR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintptrUintptrV(rv2i(rv).(map[uintptr]uintptr), e) +} +func (_ fastpathT) EncMapUintptrUintptrV(v map[uintptr]uintptr, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.encode(uintptr(k2)) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[uintptr(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintptrIntR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintptrIntV(rv2i(rv).(map[uintptr]int), e) +} +func (_ fastpathT) EncMapUintptrIntV(v map[uintptr]int, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.encode(uintptr(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uintptr(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintptrInt8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintptrInt8V(rv2i(rv).(map[uintptr]int8), e) +} +func (_ fastpathT) EncMapUintptrInt8V(v map[uintptr]int8, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.encode(uintptr(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uintptr(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintptrInt16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintptrInt16V(rv2i(rv).(map[uintptr]int16), e) +} +func (_ fastpathT) EncMapUintptrInt16V(v map[uintptr]int16, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.encode(uintptr(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uintptr(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintptrInt32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintptrInt32V(rv2i(rv).(map[uintptr]int32), e) +} +func (_ fastpathT) EncMapUintptrInt32V(v map[uintptr]int32, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.encode(uintptr(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uintptr(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintptrInt64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintptrInt64V(rv2i(rv).(map[uintptr]int64), e) +} +func (_ fastpathT) EncMapUintptrInt64V(v map[uintptr]int64, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.encode(uintptr(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uintptr(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintptrFloat32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintptrFloat32V(rv2i(rv).(map[uintptr]float32), e) +} +func (_ fastpathT) EncMapUintptrFloat32V(v map[uintptr]float32, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.encode(uintptr(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v[uintptr(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintptrFloat64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintptrFloat64V(rv2i(rv).(map[uintptr]float64), e) +} +func (_ fastpathT) EncMapUintptrFloat64V(v map[uintptr]float64, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.encode(uintptr(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v[uintptr(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintptrBoolR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintptrBoolV(rv2i(rv).(map[uintptr]bool), e) +} +func (_ fastpathT) EncMapUintptrBoolV(v map[uintptr]bool, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.encode(uintptr(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v[uintptr(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntIntfR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntIntfV(rv2i(rv).(map[int]interface{}), e) +} +func (_ fastpathT) EncMapIntIntfV(v map[int]interface{}, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int(k2))) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[int(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntStringR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntStringV(rv2i(rv).(map[int]string), e) +} +func (_ fastpathT) EncMapIntStringV(v map[int]string, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeString(c_UTF8, v[int(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeString(c_UTF8, v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntUintR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntUintV(rv2i(rv).(map[int]uint), e) +} +func (_ fastpathT) EncMapIntUintV(v map[int]uint, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[int(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntUint8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntUint8V(rv2i(rv).(map[int]uint8), e) +} +func (_ fastpathT) EncMapIntUint8V(v map[int]uint8, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[int(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntUint16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntUint16V(rv2i(rv).(map[int]uint16), e) +} +func (_ fastpathT) EncMapIntUint16V(v map[int]uint16, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[int(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntUint32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntUint32V(rv2i(rv).(map[int]uint32), e) +} +func (_ fastpathT) EncMapIntUint32V(v map[int]uint32, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[int(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntUint64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntUint64V(rv2i(rv).(map[int]uint64), e) +} +func (_ fastpathT) EncMapIntUint64V(v map[int]uint64, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[int(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntUintptrR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntUintptrV(rv2i(rv).(map[int]uintptr), e) +} +func (_ fastpathT) EncMapIntUintptrV(v map[int]uintptr, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int(k2))) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[int(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntIntR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntIntV(rv2i(rv).(map[int]int), e) +} +func (_ fastpathT) EncMapIntIntV(v map[int]int, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[int(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntInt8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntInt8V(rv2i(rv).(map[int]int8), e) +} +func (_ fastpathT) EncMapIntInt8V(v map[int]int8, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[int(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntInt16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntInt16V(rv2i(rv).(map[int]int16), e) +} +func (_ fastpathT) EncMapIntInt16V(v map[int]int16, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[int(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntInt32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntInt32V(rv2i(rv).(map[int]int32), e) +} +func (_ fastpathT) EncMapIntInt32V(v map[int]int32, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[int(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntInt64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntInt64V(rv2i(rv).(map[int]int64), e) +} +func (_ fastpathT) EncMapIntInt64V(v map[int]int64, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[int(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntFloat32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntFloat32V(rv2i(rv).(map[int]float32), e) +} +func (_ fastpathT) EncMapIntFloat32V(v map[int]float32, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v[int(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntFloat64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntFloat64V(rv2i(rv).(map[int]float64), e) +} +func (_ fastpathT) EncMapIntFloat64V(v map[int]float64, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v[int(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntBoolR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntBoolV(rv2i(rv).(map[int]bool), e) +} +func (_ fastpathT) EncMapIntBoolV(v map[int]bool, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v[int(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt8IntfR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt8IntfV(rv2i(rv).(map[int8]interface{}), e) +} +func (_ fastpathT) EncMapInt8IntfV(v map[int8]interface{}, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int8(k2))) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[int8(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt8StringR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt8StringV(rv2i(rv).(map[int8]string), e) +} +func (_ fastpathT) EncMapInt8StringV(v map[int8]string, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int8(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeString(c_UTF8, v[int8(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeString(c_UTF8, v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt8UintR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt8UintV(rv2i(rv).(map[int8]uint), e) +} +func (_ fastpathT) EncMapInt8UintV(v map[int8]uint, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int8(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[int8(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt8Uint8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt8Uint8V(rv2i(rv).(map[int8]uint8), e) +} +func (_ fastpathT) EncMapInt8Uint8V(v map[int8]uint8, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int8(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[int8(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt8Uint16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt8Uint16V(rv2i(rv).(map[int8]uint16), e) +} +func (_ fastpathT) EncMapInt8Uint16V(v map[int8]uint16, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int8(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[int8(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt8Uint32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt8Uint32V(rv2i(rv).(map[int8]uint32), e) +} +func (_ fastpathT) EncMapInt8Uint32V(v map[int8]uint32, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int8(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[int8(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt8Uint64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt8Uint64V(rv2i(rv).(map[int8]uint64), e) +} +func (_ fastpathT) EncMapInt8Uint64V(v map[int8]uint64, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int8(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[int8(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt8UintptrR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt8UintptrV(rv2i(rv).(map[int8]uintptr), e) +} +func (_ fastpathT) EncMapInt8UintptrV(v map[int8]uintptr, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int8(k2))) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[int8(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt8IntR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt8IntV(rv2i(rv).(map[int8]int), e) +} +func (_ fastpathT) EncMapInt8IntV(v map[int8]int, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int8(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[int8(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt8Int8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt8Int8V(rv2i(rv).(map[int8]int8), e) +} +func (_ fastpathT) EncMapInt8Int8V(v map[int8]int8, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int8(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[int8(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt8Int16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt8Int16V(rv2i(rv).(map[int8]int16), e) +} +func (_ fastpathT) EncMapInt8Int16V(v map[int8]int16, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int8(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[int8(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt8Int32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt8Int32V(rv2i(rv).(map[int8]int32), e) +} +func (_ fastpathT) EncMapInt8Int32V(v map[int8]int32, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int8(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[int8(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt8Int64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt8Int64V(rv2i(rv).(map[int8]int64), e) +} +func (_ fastpathT) EncMapInt8Int64V(v map[int8]int64, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int8(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[int8(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt8Float32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt8Float32V(rv2i(rv).(map[int8]float32), e) +} +func (_ fastpathT) EncMapInt8Float32V(v map[int8]float32, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int8(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v[int8(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt8Float64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt8Float64V(rv2i(rv).(map[int8]float64), e) +} +func (_ fastpathT) EncMapInt8Float64V(v map[int8]float64, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int8(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v[int8(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt8BoolR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt8BoolV(rv2i(rv).(map[int8]bool), e) +} +func (_ fastpathT) EncMapInt8BoolV(v map[int8]bool, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int8(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v[int8(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt16IntfR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt16IntfV(rv2i(rv).(map[int16]interface{}), e) +} +func (_ fastpathT) EncMapInt16IntfV(v map[int16]interface{}, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int16(k2))) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[int16(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt16StringR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt16StringV(rv2i(rv).(map[int16]string), e) +} +func (_ fastpathT) EncMapInt16StringV(v map[int16]string, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int16(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeString(c_UTF8, v[int16(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeString(c_UTF8, v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt16UintR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt16UintV(rv2i(rv).(map[int16]uint), e) +} +func (_ fastpathT) EncMapInt16UintV(v map[int16]uint, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int16(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[int16(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt16Uint8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt16Uint8V(rv2i(rv).(map[int16]uint8), e) +} +func (_ fastpathT) EncMapInt16Uint8V(v map[int16]uint8, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int16(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[int16(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt16Uint16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt16Uint16V(rv2i(rv).(map[int16]uint16), e) +} +func (_ fastpathT) EncMapInt16Uint16V(v map[int16]uint16, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int16(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[int16(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt16Uint32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt16Uint32V(rv2i(rv).(map[int16]uint32), e) +} +func (_ fastpathT) EncMapInt16Uint32V(v map[int16]uint32, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int16(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[int16(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt16Uint64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt16Uint64V(rv2i(rv).(map[int16]uint64), e) +} +func (_ fastpathT) EncMapInt16Uint64V(v map[int16]uint64, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int16(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[int16(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt16UintptrR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt16UintptrV(rv2i(rv).(map[int16]uintptr), e) +} +func (_ fastpathT) EncMapInt16UintptrV(v map[int16]uintptr, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int16(k2))) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[int16(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt16IntR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt16IntV(rv2i(rv).(map[int16]int), e) +} +func (_ fastpathT) EncMapInt16IntV(v map[int16]int, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int16(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[int16(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt16Int8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt16Int8V(rv2i(rv).(map[int16]int8), e) +} +func (_ fastpathT) EncMapInt16Int8V(v map[int16]int8, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int16(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[int16(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt16Int16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt16Int16V(rv2i(rv).(map[int16]int16), e) +} +func (_ fastpathT) EncMapInt16Int16V(v map[int16]int16, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int16(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[int16(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt16Int32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt16Int32V(rv2i(rv).(map[int16]int32), e) +} +func (_ fastpathT) EncMapInt16Int32V(v map[int16]int32, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int16(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[int16(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt16Int64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt16Int64V(rv2i(rv).(map[int16]int64), e) +} +func (_ fastpathT) EncMapInt16Int64V(v map[int16]int64, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int16(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[int16(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt16Float32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt16Float32V(rv2i(rv).(map[int16]float32), e) +} +func (_ fastpathT) EncMapInt16Float32V(v map[int16]float32, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int16(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v[int16(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt16Float64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt16Float64V(rv2i(rv).(map[int16]float64), e) +} +func (_ fastpathT) EncMapInt16Float64V(v map[int16]float64, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int16(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v[int16(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt16BoolR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt16BoolV(rv2i(rv).(map[int16]bool), e) +} +func (_ fastpathT) EncMapInt16BoolV(v map[int16]bool, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int16(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v[int16(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt32IntfR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt32IntfV(rv2i(rv).(map[int32]interface{}), e) +} +func (_ fastpathT) EncMapInt32IntfV(v map[int32]interface{}, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int32(k2))) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[int32(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt32StringR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt32StringV(rv2i(rv).(map[int32]string), e) +} +func (_ fastpathT) EncMapInt32StringV(v map[int32]string, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int32(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeString(c_UTF8, v[int32(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeString(c_UTF8, v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt32UintR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt32UintV(rv2i(rv).(map[int32]uint), e) +} +func (_ fastpathT) EncMapInt32UintV(v map[int32]uint, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int32(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[int32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt32Uint8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt32Uint8V(rv2i(rv).(map[int32]uint8), e) +} +func (_ fastpathT) EncMapInt32Uint8V(v map[int32]uint8, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int32(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[int32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt32Uint16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt32Uint16V(rv2i(rv).(map[int32]uint16), e) +} +func (_ fastpathT) EncMapInt32Uint16V(v map[int32]uint16, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int32(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[int32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt32Uint32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt32Uint32V(rv2i(rv).(map[int32]uint32), e) +} +func (_ fastpathT) EncMapInt32Uint32V(v map[int32]uint32, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int32(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[int32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt32Uint64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt32Uint64V(rv2i(rv).(map[int32]uint64), e) +} +func (_ fastpathT) EncMapInt32Uint64V(v map[int32]uint64, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int32(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[int32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt32UintptrR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt32UintptrV(rv2i(rv).(map[int32]uintptr), e) +} +func (_ fastpathT) EncMapInt32UintptrV(v map[int32]uintptr, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int32(k2))) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[int32(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt32IntR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt32IntV(rv2i(rv).(map[int32]int), e) +} +func (_ fastpathT) EncMapInt32IntV(v map[int32]int, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int32(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[int32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt32Int8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt32Int8V(rv2i(rv).(map[int32]int8), e) +} +func (_ fastpathT) EncMapInt32Int8V(v map[int32]int8, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int32(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[int32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt32Int16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt32Int16V(rv2i(rv).(map[int32]int16), e) +} +func (_ fastpathT) EncMapInt32Int16V(v map[int32]int16, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int32(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[int32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt32Int32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt32Int32V(rv2i(rv).(map[int32]int32), e) +} +func (_ fastpathT) EncMapInt32Int32V(v map[int32]int32, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int32(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[int32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt32Int64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt32Int64V(rv2i(rv).(map[int32]int64), e) +} +func (_ fastpathT) EncMapInt32Int64V(v map[int32]int64, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int32(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[int32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt32Float32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt32Float32V(rv2i(rv).(map[int32]float32), e) +} +func (_ fastpathT) EncMapInt32Float32V(v map[int32]float32, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int32(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v[int32(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt32Float64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt32Float64V(rv2i(rv).(map[int32]float64), e) +} +func (_ fastpathT) EncMapInt32Float64V(v map[int32]float64, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int32(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v[int32(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt32BoolR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt32BoolV(rv2i(rv).(map[int32]bool), e) +} +func (_ fastpathT) EncMapInt32BoolV(v map[int32]bool, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int32(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v[int32(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt64IntfR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt64IntfV(rv2i(rv).(map[int64]interface{}), e) +} +func (_ fastpathT) EncMapInt64IntfV(v map[int64]interface{}, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int64(k2))) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[int64(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt64StringR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt64StringV(rv2i(rv).(map[int64]string), e) +} +func (_ fastpathT) EncMapInt64StringV(v map[int64]string, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int64(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeString(c_UTF8, v[int64(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeString(c_UTF8, v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt64UintR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt64UintV(rv2i(rv).(map[int64]uint), e) +} +func (_ fastpathT) EncMapInt64UintV(v map[int64]uint, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int64(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[int64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt64Uint8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt64Uint8V(rv2i(rv).(map[int64]uint8), e) +} +func (_ fastpathT) EncMapInt64Uint8V(v map[int64]uint8, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int64(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[int64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt64Uint16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt64Uint16V(rv2i(rv).(map[int64]uint16), e) +} +func (_ fastpathT) EncMapInt64Uint16V(v map[int64]uint16, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int64(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[int64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt64Uint32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt64Uint32V(rv2i(rv).(map[int64]uint32), e) +} +func (_ fastpathT) EncMapInt64Uint32V(v map[int64]uint32, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int64(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[int64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt64Uint64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt64Uint64V(rv2i(rv).(map[int64]uint64), e) +} +func (_ fastpathT) EncMapInt64Uint64V(v map[int64]uint64, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int64(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[int64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt64UintptrR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt64UintptrV(rv2i(rv).(map[int64]uintptr), e) +} +func (_ fastpathT) EncMapInt64UintptrV(v map[int64]uintptr, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int64(k2))) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[int64(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt64IntR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt64IntV(rv2i(rv).(map[int64]int), e) +} +func (_ fastpathT) EncMapInt64IntV(v map[int64]int, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int64(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[int64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt64Int8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt64Int8V(rv2i(rv).(map[int64]int8), e) +} +func (_ fastpathT) EncMapInt64Int8V(v map[int64]int8, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int64(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[int64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt64Int16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt64Int16V(rv2i(rv).(map[int64]int16), e) +} +func (_ fastpathT) EncMapInt64Int16V(v map[int64]int16, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int64(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[int64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt64Int32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt64Int32V(rv2i(rv).(map[int64]int32), e) +} +func (_ fastpathT) EncMapInt64Int32V(v map[int64]int32, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int64(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[int64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt64Int64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt64Int64V(rv2i(rv).(map[int64]int64), e) +} +func (_ fastpathT) EncMapInt64Int64V(v map[int64]int64, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int64(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[int64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt64Float32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt64Float32V(rv2i(rv).(map[int64]float32), e) +} +func (_ fastpathT) EncMapInt64Float32V(v map[int64]float32, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int64(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v[int64(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt64Float64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt64Float64V(rv2i(rv).(map[int64]float64), e) +} +func (_ fastpathT) EncMapInt64Float64V(v map[int64]float64, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int64(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v[int64(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt64BoolR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt64BoolV(rv2i(rv).(map[int64]bool), e) +} +func (_ fastpathT) EncMapInt64BoolV(v map[int64]bool, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int64(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v[int64(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapBoolIntfR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapBoolIntfV(rv2i(rv).(map[bool]interface{}), e) +} +func (_ fastpathT) EncMapBoolIntfV(v map[bool]interface{}, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i int + for k, _ := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(bool(k2)) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[bool(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(k2) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapBoolStringR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapBoolStringV(rv2i(rv).(map[bool]string), e) +} +func (_ fastpathT) EncMapBoolStringV(v map[bool]string, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i int + for k, _ := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(bool(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeString(c_UTF8, v[bool(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeString(c_UTF8, v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapBoolUintR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapBoolUintV(rv2i(rv).(map[bool]uint), e) +} +func (_ fastpathT) EncMapBoolUintV(v map[bool]uint, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i int + for k, _ := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(bool(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[bool(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapBoolUint8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapBoolUint8V(rv2i(rv).(map[bool]uint8), e) +} +func (_ fastpathT) EncMapBoolUint8V(v map[bool]uint8, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i int + for k, _ := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(bool(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[bool(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapBoolUint16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapBoolUint16V(rv2i(rv).(map[bool]uint16), e) +} +func (_ fastpathT) EncMapBoolUint16V(v map[bool]uint16, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i int + for k, _ := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(bool(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[bool(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapBoolUint32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapBoolUint32V(rv2i(rv).(map[bool]uint32), e) +} +func (_ fastpathT) EncMapBoolUint32V(v map[bool]uint32, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i int + for k, _ := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(bool(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[bool(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapBoolUint64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapBoolUint64V(rv2i(rv).(map[bool]uint64), e) +} +func (_ fastpathT) EncMapBoolUint64V(v map[bool]uint64, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i int + for k, _ := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(bool(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[bool(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapBoolUintptrR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapBoolUintptrV(rv2i(rv).(map[bool]uintptr), e) +} +func (_ fastpathT) EncMapBoolUintptrV(v map[bool]uintptr, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i int + for k, _ := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(bool(k2)) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[bool(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(k2) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapBoolIntR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapBoolIntV(rv2i(rv).(map[bool]int), e) +} +func (_ fastpathT) EncMapBoolIntV(v map[bool]int, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i int + for k, _ := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(bool(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[bool(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapBoolInt8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapBoolInt8V(rv2i(rv).(map[bool]int8), e) +} +func (_ fastpathT) EncMapBoolInt8V(v map[bool]int8, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i int + for k, _ := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(bool(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[bool(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapBoolInt16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapBoolInt16V(rv2i(rv).(map[bool]int16), e) +} +func (_ fastpathT) EncMapBoolInt16V(v map[bool]int16, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i int + for k, _ := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(bool(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[bool(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapBoolInt32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapBoolInt32V(rv2i(rv).(map[bool]int32), e) +} +func (_ fastpathT) EncMapBoolInt32V(v map[bool]int32, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i int + for k, _ := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(bool(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[bool(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapBoolInt64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapBoolInt64V(rv2i(rv).(map[bool]int64), e) +} +func (_ fastpathT) EncMapBoolInt64V(v map[bool]int64, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i int + for k, _ := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(bool(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[bool(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapBoolFloat32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapBoolFloat32V(rv2i(rv).(map[bool]float32), e) +} +func (_ fastpathT) EncMapBoolFloat32V(v map[bool]float32, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i int + for k, _ := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(bool(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v[bool(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapBoolFloat64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapBoolFloat64V(rv2i(rv).(map[bool]float64), e) +} +func (_ fastpathT) EncMapBoolFloat64V(v map[bool]float64, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i int + for k, _ := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(bool(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v[bool(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapBoolBoolR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapBoolBoolV(rv2i(rv).(map[bool]bool), e) +} +func (_ fastpathT) EncMapBoolBoolV(v map[bool]bool, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i int + for k, _ := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(bool(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v[bool(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v2) + } + } + ee.WriteMapEnd() +} + +// -- decode + +// -- -- fast path type switch +func fastpathDecodeTypeSwitch(iv interface{}, d *Decoder) bool { + switch v := iv.(type) { + + case []interface{}: + fastpathTV.DecSliceIntfV(v, false, d) + case *[]interface{}: + if v2, changed2 := fastpathTV.DecSliceIntfV(*v, true, d); changed2 { + *v = v2 + } + + case map[interface{}]interface{}: + fastpathTV.DecMapIntfIntfV(v, false, d) + case *map[interface{}]interface{}: + if v2, changed2 := fastpathTV.DecMapIntfIntfV(*v, true, d); changed2 { + *v = v2 + } + + case map[interface{}]string: + fastpathTV.DecMapIntfStringV(v, false, d) + case *map[interface{}]string: + if v2, changed2 := fastpathTV.DecMapIntfStringV(*v, true, d); changed2 { + *v = v2 + } + + case map[interface{}]uint: + fastpathTV.DecMapIntfUintV(v, false, d) + case *map[interface{}]uint: + if v2, changed2 := fastpathTV.DecMapIntfUintV(*v, true, d); changed2 { + *v = v2 + } + + case map[interface{}]uint8: + fastpathTV.DecMapIntfUint8V(v, false, d) + case *map[interface{}]uint8: + if v2, changed2 := fastpathTV.DecMapIntfUint8V(*v, true, d); changed2 { + *v = v2 + } + + case map[interface{}]uint16: + fastpathTV.DecMapIntfUint16V(v, false, d) + case *map[interface{}]uint16: + if v2, changed2 := fastpathTV.DecMapIntfUint16V(*v, true, d); changed2 { + *v = v2 + } + + case map[interface{}]uint32: + fastpathTV.DecMapIntfUint32V(v, false, d) + case *map[interface{}]uint32: + if v2, changed2 := fastpathTV.DecMapIntfUint32V(*v, true, d); changed2 { + *v = v2 + } + + case map[interface{}]uint64: + fastpathTV.DecMapIntfUint64V(v, false, d) + case *map[interface{}]uint64: + if v2, changed2 := fastpathTV.DecMapIntfUint64V(*v, true, d); changed2 { + *v = v2 + } + + case map[interface{}]uintptr: + fastpathTV.DecMapIntfUintptrV(v, false, d) + case *map[interface{}]uintptr: + if v2, changed2 := fastpathTV.DecMapIntfUintptrV(*v, true, d); changed2 { + *v = v2 + } + + case map[interface{}]int: + fastpathTV.DecMapIntfIntV(v, false, d) + case *map[interface{}]int: + if v2, changed2 := fastpathTV.DecMapIntfIntV(*v, true, d); changed2 { + *v = v2 + } + + case map[interface{}]int8: + fastpathTV.DecMapIntfInt8V(v, false, d) + case *map[interface{}]int8: + if v2, changed2 := fastpathTV.DecMapIntfInt8V(*v, true, d); changed2 { + *v = v2 + } + + case map[interface{}]int16: + fastpathTV.DecMapIntfInt16V(v, false, d) + case *map[interface{}]int16: + if v2, changed2 := fastpathTV.DecMapIntfInt16V(*v, true, d); changed2 { + *v = v2 + } + + case map[interface{}]int32: + fastpathTV.DecMapIntfInt32V(v, false, d) + case *map[interface{}]int32: + if v2, changed2 := fastpathTV.DecMapIntfInt32V(*v, true, d); changed2 { + *v = v2 + } + + case map[interface{}]int64: + fastpathTV.DecMapIntfInt64V(v, false, d) + case *map[interface{}]int64: + if v2, changed2 := fastpathTV.DecMapIntfInt64V(*v, true, d); changed2 { + *v = v2 + } + + case map[interface{}]float32: + fastpathTV.DecMapIntfFloat32V(v, false, d) + case *map[interface{}]float32: + if v2, changed2 := fastpathTV.DecMapIntfFloat32V(*v, true, d); changed2 { + *v = v2 + } + + case map[interface{}]float64: + fastpathTV.DecMapIntfFloat64V(v, false, d) + case *map[interface{}]float64: + if v2, changed2 := fastpathTV.DecMapIntfFloat64V(*v, true, d); changed2 { + *v = v2 + } + + case map[interface{}]bool: + fastpathTV.DecMapIntfBoolV(v, false, d) + case *map[interface{}]bool: + if v2, changed2 := fastpathTV.DecMapIntfBoolV(*v, true, d); changed2 { + *v = v2 + } + + case []string: + fastpathTV.DecSliceStringV(v, false, d) + case *[]string: + if v2, changed2 := fastpathTV.DecSliceStringV(*v, true, d); changed2 { + *v = v2 + } + + case map[string]interface{}: + fastpathTV.DecMapStringIntfV(v, false, d) + case *map[string]interface{}: + if v2, changed2 := fastpathTV.DecMapStringIntfV(*v, true, d); changed2 { + *v = v2 + } + + case map[string]string: + fastpathTV.DecMapStringStringV(v, false, d) + case *map[string]string: + if v2, changed2 := fastpathTV.DecMapStringStringV(*v, true, d); changed2 { + *v = v2 + } + + case map[string]uint: + fastpathTV.DecMapStringUintV(v, false, d) + case *map[string]uint: + if v2, changed2 := fastpathTV.DecMapStringUintV(*v, true, d); changed2 { + *v = v2 + } + + case map[string]uint8: + fastpathTV.DecMapStringUint8V(v, false, d) + case *map[string]uint8: + if v2, changed2 := fastpathTV.DecMapStringUint8V(*v, true, d); changed2 { + *v = v2 + } + + case map[string]uint16: + fastpathTV.DecMapStringUint16V(v, false, d) + case *map[string]uint16: + if v2, changed2 := fastpathTV.DecMapStringUint16V(*v, true, d); changed2 { + *v = v2 + } + + case map[string]uint32: + fastpathTV.DecMapStringUint32V(v, false, d) + case *map[string]uint32: + if v2, changed2 := fastpathTV.DecMapStringUint32V(*v, true, d); changed2 { + *v = v2 + } + + case map[string]uint64: + fastpathTV.DecMapStringUint64V(v, false, d) + case *map[string]uint64: + if v2, changed2 := fastpathTV.DecMapStringUint64V(*v, true, d); changed2 { + *v = v2 + } + + case map[string]uintptr: + fastpathTV.DecMapStringUintptrV(v, false, d) + case *map[string]uintptr: + if v2, changed2 := fastpathTV.DecMapStringUintptrV(*v, true, d); changed2 { + *v = v2 + } + + case map[string]int: + fastpathTV.DecMapStringIntV(v, false, d) + case *map[string]int: + if v2, changed2 := fastpathTV.DecMapStringIntV(*v, true, d); changed2 { + *v = v2 + } + + case map[string]int8: + fastpathTV.DecMapStringInt8V(v, false, d) + case *map[string]int8: + if v2, changed2 := fastpathTV.DecMapStringInt8V(*v, true, d); changed2 { + *v = v2 + } + + case map[string]int16: + fastpathTV.DecMapStringInt16V(v, false, d) + case *map[string]int16: + if v2, changed2 := fastpathTV.DecMapStringInt16V(*v, true, d); changed2 { + *v = v2 + } + + case map[string]int32: + fastpathTV.DecMapStringInt32V(v, false, d) + case *map[string]int32: + if v2, changed2 := fastpathTV.DecMapStringInt32V(*v, true, d); changed2 { + *v = v2 + } + + case map[string]int64: + fastpathTV.DecMapStringInt64V(v, false, d) + case *map[string]int64: + if v2, changed2 := fastpathTV.DecMapStringInt64V(*v, true, d); changed2 { + *v = v2 + } + + case map[string]float32: + fastpathTV.DecMapStringFloat32V(v, false, d) + case *map[string]float32: + if v2, changed2 := fastpathTV.DecMapStringFloat32V(*v, true, d); changed2 { + *v = v2 + } + + case map[string]float64: + fastpathTV.DecMapStringFloat64V(v, false, d) + case *map[string]float64: + if v2, changed2 := fastpathTV.DecMapStringFloat64V(*v, true, d); changed2 { + *v = v2 + } + + case map[string]bool: + fastpathTV.DecMapStringBoolV(v, false, d) + case *map[string]bool: + if v2, changed2 := fastpathTV.DecMapStringBoolV(*v, true, d); changed2 { + *v = v2 + } + + case []float32: + fastpathTV.DecSliceFloat32V(v, false, d) + case *[]float32: + if v2, changed2 := fastpathTV.DecSliceFloat32V(*v, true, d); changed2 { + *v = v2 + } + + case map[float32]interface{}: + fastpathTV.DecMapFloat32IntfV(v, false, d) + case *map[float32]interface{}: + if v2, changed2 := fastpathTV.DecMapFloat32IntfV(*v, true, d); changed2 { + *v = v2 + } + + case map[float32]string: + fastpathTV.DecMapFloat32StringV(v, false, d) + case *map[float32]string: + if v2, changed2 := fastpathTV.DecMapFloat32StringV(*v, true, d); changed2 { + *v = v2 + } + + case map[float32]uint: + fastpathTV.DecMapFloat32UintV(v, false, d) + case *map[float32]uint: + if v2, changed2 := fastpathTV.DecMapFloat32UintV(*v, true, d); changed2 { + *v = v2 + } + + case map[float32]uint8: + fastpathTV.DecMapFloat32Uint8V(v, false, d) + case *map[float32]uint8: + if v2, changed2 := fastpathTV.DecMapFloat32Uint8V(*v, true, d); changed2 { + *v = v2 + } + + case map[float32]uint16: + fastpathTV.DecMapFloat32Uint16V(v, false, d) + case *map[float32]uint16: + if v2, changed2 := fastpathTV.DecMapFloat32Uint16V(*v, true, d); changed2 { + *v = v2 + } + + case map[float32]uint32: + fastpathTV.DecMapFloat32Uint32V(v, false, d) + case *map[float32]uint32: + if v2, changed2 := fastpathTV.DecMapFloat32Uint32V(*v, true, d); changed2 { + *v = v2 + } + + case map[float32]uint64: + fastpathTV.DecMapFloat32Uint64V(v, false, d) + case *map[float32]uint64: + if v2, changed2 := fastpathTV.DecMapFloat32Uint64V(*v, true, d); changed2 { + *v = v2 + } + + case map[float32]uintptr: + fastpathTV.DecMapFloat32UintptrV(v, false, d) + case *map[float32]uintptr: + if v2, changed2 := fastpathTV.DecMapFloat32UintptrV(*v, true, d); changed2 { + *v = v2 + } + + case map[float32]int: + fastpathTV.DecMapFloat32IntV(v, false, d) + case *map[float32]int: + if v2, changed2 := fastpathTV.DecMapFloat32IntV(*v, true, d); changed2 { + *v = v2 + } + + case map[float32]int8: + fastpathTV.DecMapFloat32Int8V(v, false, d) + case *map[float32]int8: + if v2, changed2 := fastpathTV.DecMapFloat32Int8V(*v, true, d); changed2 { + *v = v2 + } + + case map[float32]int16: + fastpathTV.DecMapFloat32Int16V(v, false, d) + case *map[float32]int16: + if v2, changed2 := fastpathTV.DecMapFloat32Int16V(*v, true, d); changed2 { + *v = v2 + } + + case map[float32]int32: + fastpathTV.DecMapFloat32Int32V(v, false, d) + case *map[float32]int32: + if v2, changed2 := fastpathTV.DecMapFloat32Int32V(*v, true, d); changed2 { + *v = v2 + } + + case map[float32]int64: + fastpathTV.DecMapFloat32Int64V(v, false, d) + case *map[float32]int64: + if v2, changed2 := fastpathTV.DecMapFloat32Int64V(*v, true, d); changed2 { + *v = v2 + } + + case map[float32]float32: + fastpathTV.DecMapFloat32Float32V(v, false, d) + case *map[float32]float32: + if v2, changed2 := fastpathTV.DecMapFloat32Float32V(*v, true, d); changed2 { + *v = v2 + } + + case map[float32]float64: + fastpathTV.DecMapFloat32Float64V(v, false, d) + case *map[float32]float64: + if v2, changed2 := fastpathTV.DecMapFloat32Float64V(*v, true, d); changed2 { + *v = v2 + } + + case map[float32]bool: + fastpathTV.DecMapFloat32BoolV(v, false, d) + case *map[float32]bool: + if v2, changed2 := fastpathTV.DecMapFloat32BoolV(*v, true, d); changed2 { + *v = v2 + } + + case []float64: + fastpathTV.DecSliceFloat64V(v, false, d) + case *[]float64: + if v2, changed2 := fastpathTV.DecSliceFloat64V(*v, true, d); changed2 { + *v = v2 + } + + case map[float64]interface{}: + fastpathTV.DecMapFloat64IntfV(v, false, d) + case *map[float64]interface{}: + if v2, changed2 := fastpathTV.DecMapFloat64IntfV(*v, true, d); changed2 { + *v = v2 + } + + case map[float64]string: + fastpathTV.DecMapFloat64StringV(v, false, d) + case *map[float64]string: + if v2, changed2 := fastpathTV.DecMapFloat64StringV(*v, true, d); changed2 { + *v = v2 + } + + case map[float64]uint: + fastpathTV.DecMapFloat64UintV(v, false, d) + case *map[float64]uint: + if v2, changed2 := fastpathTV.DecMapFloat64UintV(*v, true, d); changed2 { + *v = v2 + } + + case map[float64]uint8: + fastpathTV.DecMapFloat64Uint8V(v, false, d) + case *map[float64]uint8: + if v2, changed2 := fastpathTV.DecMapFloat64Uint8V(*v, true, d); changed2 { + *v = v2 + } + + case map[float64]uint16: + fastpathTV.DecMapFloat64Uint16V(v, false, d) + case *map[float64]uint16: + if v2, changed2 := fastpathTV.DecMapFloat64Uint16V(*v, true, d); changed2 { + *v = v2 + } + + case map[float64]uint32: + fastpathTV.DecMapFloat64Uint32V(v, false, d) + case *map[float64]uint32: + if v2, changed2 := fastpathTV.DecMapFloat64Uint32V(*v, true, d); changed2 { + *v = v2 + } + + case map[float64]uint64: + fastpathTV.DecMapFloat64Uint64V(v, false, d) + case *map[float64]uint64: + if v2, changed2 := fastpathTV.DecMapFloat64Uint64V(*v, true, d); changed2 { + *v = v2 + } + + case map[float64]uintptr: + fastpathTV.DecMapFloat64UintptrV(v, false, d) + case *map[float64]uintptr: + if v2, changed2 := fastpathTV.DecMapFloat64UintptrV(*v, true, d); changed2 { + *v = v2 + } + + case map[float64]int: + fastpathTV.DecMapFloat64IntV(v, false, d) + case *map[float64]int: + if v2, changed2 := fastpathTV.DecMapFloat64IntV(*v, true, d); changed2 { + *v = v2 + } + + case map[float64]int8: + fastpathTV.DecMapFloat64Int8V(v, false, d) + case *map[float64]int8: + if v2, changed2 := fastpathTV.DecMapFloat64Int8V(*v, true, d); changed2 { + *v = v2 + } + + case map[float64]int16: + fastpathTV.DecMapFloat64Int16V(v, false, d) + case *map[float64]int16: + if v2, changed2 := fastpathTV.DecMapFloat64Int16V(*v, true, d); changed2 { + *v = v2 + } + + case map[float64]int32: + fastpathTV.DecMapFloat64Int32V(v, false, d) + case *map[float64]int32: + if v2, changed2 := fastpathTV.DecMapFloat64Int32V(*v, true, d); changed2 { + *v = v2 + } + + case map[float64]int64: + fastpathTV.DecMapFloat64Int64V(v, false, d) + case *map[float64]int64: + if v2, changed2 := fastpathTV.DecMapFloat64Int64V(*v, true, d); changed2 { + *v = v2 + } + + case map[float64]float32: + fastpathTV.DecMapFloat64Float32V(v, false, d) + case *map[float64]float32: + if v2, changed2 := fastpathTV.DecMapFloat64Float32V(*v, true, d); changed2 { + *v = v2 + } + + case map[float64]float64: + fastpathTV.DecMapFloat64Float64V(v, false, d) + case *map[float64]float64: + if v2, changed2 := fastpathTV.DecMapFloat64Float64V(*v, true, d); changed2 { + *v = v2 + } + + case map[float64]bool: + fastpathTV.DecMapFloat64BoolV(v, false, d) + case *map[float64]bool: + if v2, changed2 := fastpathTV.DecMapFloat64BoolV(*v, true, d); changed2 { + *v = v2 + } + + case []uint: + fastpathTV.DecSliceUintV(v, false, d) + case *[]uint: + if v2, changed2 := fastpathTV.DecSliceUintV(*v, true, d); changed2 { + *v = v2 + } + + case map[uint]interface{}: + fastpathTV.DecMapUintIntfV(v, false, d) + case *map[uint]interface{}: + if v2, changed2 := fastpathTV.DecMapUintIntfV(*v, true, d); changed2 { + *v = v2 + } + + case map[uint]string: + fastpathTV.DecMapUintStringV(v, false, d) + case *map[uint]string: + if v2, changed2 := fastpathTV.DecMapUintStringV(*v, true, d); changed2 { + *v = v2 + } + + case map[uint]uint: + fastpathTV.DecMapUintUintV(v, false, d) + case *map[uint]uint: + if v2, changed2 := fastpathTV.DecMapUintUintV(*v, true, d); changed2 { + *v = v2 + } + + case map[uint]uint8: + fastpathTV.DecMapUintUint8V(v, false, d) + case *map[uint]uint8: + if v2, changed2 := fastpathTV.DecMapUintUint8V(*v, true, d); changed2 { + *v = v2 + } + + case map[uint]uint16: + fastpathTV.DecMapUintUint16V(v, false, d) + case *map[uint]uint16: + if v2, changed2 := fastpathTV.DecMapUintUint16V(*v, true, d); changed2 { + *v = v2 + } + + case map[uint]uint32: + fastpathTV.DecMapUintUint32V(v, false, d) + case *map[uint]uint32: + if v2, changed2 := fastpathTV.DecMapUintUint32V(*v, true, d); changed2 { + *v = v2 + } + + case map[uint]uint64: + fastpathTV.DecMapUintUint64V(v, false, d) + case *map[uint]uint64: + if v2, changed2 := fastpathTV.DecMapUintUint64V(*v, true, d); changed2 { + *v = v2 + } + + case map[uint]uintptr: + fastpathTV.DecMapUintUintptrV(v, false, d) + case *map[uint]uintptr: + if v2, changed2 := fastpathTV.DecMapUintUintptrV(*v, true, d); changed2 { + *v = v2 + } + + case map[uint]int: + fastpathTV.DecMapUintIntV(v, false, d) + case *map[uint]int: + if v2, changed2 := fastpathTV.DecMapUintIntV(*v, true, d); changed2 { + *v = v2 + } + + case map[uint]int8: + fastpathTV.DecMapUintInt8V(v, false, d) + case *map[uint]int8: + if v2, changed2 := fastpathTV.DecMapUintInt8V(*v, true, d); changed2 { + *v = v2 + } + + case map[uint]int16: + fastpathTV.DecMapUintInt16V(v, false, d) + case *map[uint]int16: + if v2, changed2 := fastpathTV.DecMapUintInt16V(*v, true, d); changed2 { + *v = v2 + } + + case map[uint]int32: + fastpathTV.DecMapUintInt32V(v, false, d) + case *map[uint]int32: + if v2, changed2 := fastpathTV.DecMapUintInt32V(*v, true, d); changed2 { + *v = v2 + } + + case map[uint]int64: + fastpathTV.DecMapUintInt64V(v, false, d) + case *map[uint]int64: + if v2, changed2 := fastpathTV.DecMapUintInt64V(*v, true, d); changed2 { + *v = v2 + } + + case map[uint]float32: + fastpathTV.DecMapUintFloat32V(v, false, d) + case *map[uint]float32: + if v2, changed2 := fastpathTV.DecMapUintFloat32V(*v, true, d); changed2 { + *v = v2 + } + + case map[uint]float64: + fastpathTV.DecMapUintFloat64V(v, false, d) + case *map[uint]float64: + if v2, changed2 := fastpathTV.DecMapUintFloat64V(*v, true, d); changed2 { + *v = v2 + } + + case map[uint]bool: + fastpathTV.DecMapUintBoolV(v, false, d) + case *map[uint]bool: + if v2, changed2 := fastpathTV.DecMapUintBoolV(*v, true, d); changed2 { + *v = v2 + } + + case map[uint8]interface{}: + fastpathTV.DecMapUint8IntfV(v, false, d) + case *map[uint8]interface{}: + if v2, changed2 := fastpathTV.DecMapUint8IntfV(*v, true, d); changed2 { + *v = v2 + } + + case map[uint8]string: + fastpathTV.DecMapUint8StringV(v, false, d) + case *map[uint8]string: + if v2, changed2 := fastpathTV.DecMapUint8StringV(*v, true, d); changed2 { + *v = v2 + } + + case map[uint8]uint: + fastpathTV.DecMapUint8UintV(v, false, d) + case *map[uint8]uint: + if v2, changed2 := fastpathTV.DecMapUint8UintV(*v, true, d); changed2 { + *v = v2 + } + + case map[uint8]uint8: + fastpathTV.DecMapUint8Uint8V(v, false, d) + case *map[uint8]uint8: + if v2, changed2 := fastpathTV.DecMapUint8Uint8V(*v, true, d); changed2 { + *v = v2 + } + + case map[uint8]uint16: + fastpathTV.DecMapUint8Uint16V(v, false, d) + case *map[uint8]uint16: + if v2, changed2 := fastpathTV.DecMapUint8Uint16V(*v, true, d); changed2 { + *v = v2 + } + + case map[uint8]uint32: + fastpathTV.DecMapUint8Uint32V(v, false, d) + case *map[uint8]uint32: + if v2, changed2 := fastpathTV.DecMapUint8Uint32V(*v, true, d); changed2 { + *v = v2 + } + + case map[uint8]uint64: + fastpathTV.DecMapUint8Uint64V(v, false, d) + case *map[uint8]uint64: + if v2, changed2 := fastpathTV.DecMapUint8Uint64V(*v, true, d); changed2 { + *v = v2 + } + + case map[uint8]uintptr: + fastpathTV.DecMapUint8UintptrV(v, false, d) + case *map[uint8]uintptr: + if v2, changed2 := fastpathTV.DecMapUint8UintptrV(*v, true, d); changed2 { + *v = v2 + } + + case map[uint8]int: + fastpathTV.DecMapUint8IntV(v, false, d) + case *map[uint8]int: + if v2, changed2 := fastpathTV.DecMapUint8IntV(*v, true, d); changed2 { + *v = v2 + } + + case map[uint8]int8: + fastpathTV.DecMapUint8Int8V(v, false, d) + case *map[uint8]int8: + if v2, changed2 := fastpathTV.DecMapUint8Int8V(*v, true, d); changed2 { + *v = v2 + } + + case map[uint8]int16: + fastpathTV.DecMapUint8Int16V(v, false, d) + case *map[uint8]int16: + if v2, changed2 := fastpathTV.DecMapUint8Int16V(*v, true, d); changed2 { + *v = v2 + } + + case map[uint8]int32: + fastpathTV.DecMapUint8Int32V(v, false, d) + case *map[uint8]int32: + if v2, changed2 := fastpathTV.DecMapUint8Int32V(*v, true, d); changed2 { + *v = v2 + } + + case map[uint8]int64: + fastpathTV.DecMapUint8Int64V(v, false, d) + case *map[uint8]int64: + if v2, changed2 := fastpathTV.DecMapUint8Int64V(*v, true, d); changed2 { + *v = v2 + } + + case map[uint8]float32: + fastpathTV.DecMapUint8Float32V(v, false, d) + case *map[uint8]float32: + if v2, changed2 := fastpathTV.DecMapUint8Float32V(*v, true, d); changed2 { + *v = v2 + } + + case map[uint8]float64: + fastpathTV.DecMapUint8Float64V(v, false, d) + case *map[uint8]float64: + if v2, changed2 := fastpathTV.DecMapUint8Float64V(*v, true, d); changed2 { + *v = v2 + } + + case map[uint8]bool: + fastpathTV.DecMapUint8BoolV(v, false, d) + case *map[uint8]bool: + if v2, changed2 := fastpathTV.DecMapUint8BoolV(*v, true, d); changed2 { + *v = v2 + } + + case []uint16: + fastpathTV.DecSliceUint16V(v, false, d) + case *[]uint16: + if v2, changed2 := fastpathTV.DecSliceUint16V(*v, true, d); changed2 { + *v = v2 + } + + case map[uint16]interface{}: + fastpathTV.DecMapUint16IntfV(v, false, d) + case *map[uint16]interface{}: + if v2, changed2 := fastpathTV.DecMapUint16IntfV(*v, true, d); changed2 { + *v = v2 + } + + case map[uint16]string: + fastpathTV.DecMapUint16StringV(v, false, d) + case *map[uint16]string: + if v2, changed2 := fastpathTV.DecMapUint16StringV(*v, true, d); changed2 { + *v = v2 + } + + case map[uint16]uint: + fastpathTV.DecMapUint16UintV(v, false, d) + case *map[uint16]uint: + if v2, changed2 := fastpathTV.DecMapUint16UintV(*v, true, d); changed2 { + *v = v2 + } + + case map[uint16]uint8: + fastpathTV.DecMapUint16Uint8V(v, false, d) + case *map[uint16]uint8: + if v2, changed2 := fastpathTV.DecMapUint16Uint8V(*v, true, d); changed2 { + *v = v2 + } + + case map[uint16]uint16: + fastpathTV.DecMapUint16Uint16V(v, false, d) + case *map[uint16]uint16: + if v2, changed2 := fastpathTV.DecMapUint16Uint16V(*v, true, d); changed2 { + *v = v2 + } + + case map[uint16]uint32: + fastpathTV.DecMapUint16Uint32V(v, false, d) + case *map[uint16]uint32: + if v2, changed2 := fastpathTV.DecMapUint16Uint32V(*v, true, d); changed2 { + *v = v2 + } + + case map[uint16]uint64: + fastpathTV.DecMapUint16Uint64V(v, false, d) + case *map[uint16]uint64: + if v2, changed2 := fastpathTV.DecMapUint16Uint64V(*v, true, d); changed2 { + *v = v2 + } + + case map[uint16]uintptr: + fastpathTV.DecMapUint16UintptrV(v, false, d) + case *map[uint16]uintptr: + if v2, changed2 := fastpathTV.DecMapUint16UintptrV(*v, true, d); changed2 { + *v = v2 + } + + case map[uint16]int: + fastpathTV.DecMapUint16IntV(v, false, d) + case *map[uint16]int: + if v2, changed2 := fastpathTV.DecMapUint16IntV(*v, true, d); changed2 { + *v = v2 + } + + case map[uint16]int8: + fastpathTV.DecMapUint16Int8V(v, false, d) + case *map[uint16]int8: + if v2, changed2 := fastpathTV.DecMapUint16Int8V(*v, true, d); changed2 { + *v = v2 + } + + case map[uint16]int16: + fastpathTV.DecMapUint16Int16V(v, false, d) + case *map[uint16]int16: + if v2, changed2 := fastpathTV.DecMapUint16Int16V(*v, true, d); changed2 { + *v = v2 + } + + case map[uint16]int32: + fastpathTV.DecMapUint16Int32V(v, false, d) + case *map[uint16]int32: + if v2, changed2 := fastpathTV.DecMapUint16Int32V(*v, true, d); changed2 { + *v = v2 + } + + case map[uint16]int64: + fastpathTV.DecMapUint16Int64V(v, false, d) + case *map[uint16]int64: + if v2, changed2 := fastpathTV.DecMapUint16Int64V(*v, true, d); changed2 { + *v = v2 + } + + case map[uint16]float32: + fastpathTV.DecMapUint16Float32V(v, false, d) + case *map[uint16]float32: + if v2, changed2 := fastpathTV.DecMapUint16Float32V(*v, true, d); changed2 { + *v = v2 + } + + case map[uint16]float64: + fastpathTV.DecMapUint16Float64V(v, false, d) + case *map[uint16]float64: + if v2, changed2 := fastpathTV.DecMapUint16Float64V(*v, true, d); changed2 { + *v = v2 + } + + case map[uint16]bool: + fastpathTV.DecMapUint16BoolV(v, false, d) + case *map[uint16]bool: + if v2, changed2 := fastpathTV.DecMapUint16BoolV(*v, true, d); changed2 { + *v = v2 + } + + case []uint32: + fastpathTV.DecSliceUint32V(v, false, d) + case *[]uint32: + if v2, changed2 := fastpathTV.DecSliceUint32V(*v, true, d); changed2 { + *v = v2 + } + + case map[uint32]interface{}: + fastpathTV.DecMapUint32IntfV(v, false, d) + case *map[uint32]interface{}: + if v2, changed2 := fastpathTV.DecMapUint32IntfV(*v, true, d); changed2 { + *v = v2 + } + + case map[uint32]string: + fastpathTV.DecMapUint32StringV(v, false, d) + case *map[uint32]string: + if v2, changed2 := fastpathTV.DecMapUint32StringV(*v, true, d); changed2 { + *v = v2 + } + + case map[uint32]uint: + fastpathTV.DecMapUint32UintV(v, false, d) + case *map[uint32]uint: + if v2, changed2 := fastpathTV.DecMapUint32UintV(*v, true, d); changed2 { + *v = v2 + } + + case map[uint32]uint8: + fastpathTV.DecMapUint32Uint8V(v, false, d) + case *map[uint32]uint8: + if v2, changed2 := fastpathTV.DecMapUint32Uint8V(*v, true, d); changed2 { + *v = v2 + } + + case map[uint32]uint16: + fastpathTV.DecMapUint32Uint16V(v, false, d) + case *map[uint32]uint16: + if v2, changed2 := fastpathTV.DecMapUint32Uint16V(*v, true, d); changed2 { + *v = v2 + } + + case map[uint32]uint32: + fastpathTV.DecMapUint32Uint32V(v, false, d) + case *map[uint32]uint32: + if v2, changed2 := fastpathTV.DecMapUint32Uint32V(*v, true, d); changed2 { + *v = v2 + } + + case map[uint32]uint64: + fastpathTV.DecMapUint32Uint64V(v, false, d) + case *map[uint32]uint64: + if v2, changed2 := fastpathTV.DecMapUint32Uint64V(*v, true, d); changed2 { + *v = v2 + } + + case map[uint32]uintptr: + fastpathTV.DecMapUint32UintptrV(v, false, d) + case *map[uint32]uintptr: + if v2, changed2 := fastpathTV.DecMapUint32UintptrV(*v, true, d); changed2 { + *v = v2 + } + + case map[uint32]int: + fastpathTV.DecMapUint32IntV(v, false, d) + case *map[uint32]int: + if v2, changed2 := fastpathTV.DecMapUint32IntV(*v, true, d); changed2 { + *v = v2 + } + + case map[uint32]int8: + fastpathTV.DecMapUint32Int8V(v, false, d) + case *map[uint32]int8: + if v2, changed2 := fastpathTV.DecMapUint32Int8V(*v, true, d); changed2 { + *v = v2 + } + + case map[uint32]int16: + fastpathTV.DecMapUint32Int16V(v, false, d) + case *map[uint32]int16: + if v2, changed2 := fastpathTV.DecMapUint32Int16V(*v, true, d); changed2 { + *v = v2 + } + + case map[uint32]int32: + fastpathTV.DecMapUint32Int32V(v, false, d) + case *map[uint32]int32: + if v2, changed2 := fastpathTV.DecMapUint32Int32V(*v, true, d); changed2 { + *v = v2 + } + + case map[uint32]int64: + fastpathTV.DecMapUint32Int64V(v, false, d) + case *map[uint32]int64: + if v2, changed2 := fastpathTV.DecMapUint32Int64V(*v, true, d); changed2 { + *v = v2 + } + + case map[uint32]float32: + fastpathTV.DecMapUint32Float32V(v, false, d) + case *map[uint32]float32: + if v2, changed2 := fastpathTV.DecMapUint32Float32V(*v, true, d); changed2 { + *v = v2 + } + + case map[uint32]float64: + fastpathTV.DecMapUint32Float64V(v, false, d) + case *map[uint32]float64: + if v2, changed2 := fastpathTV.DecMapUint32Float64V(*v, true, d); changed2 { + *v = v2 + } + + case map[uint32]bool: + fastpathTV.DecMapUint32BoolV(v, false, d) + case *map[uint32]bool: + if v2, changed2 := fastpathTV.DecMapUint32BoolV(*v, true, d); changed2 { + *v = v2 + } + + case []uint64: + fastpathTV.DecSliceUint64V(v, false, d) + case *[]uint64: + if v2, changed2 := fastpathTV.DecSliceUint64V(*v, true, d); changed2 { + *v = v2 + } + + case map[uint64]interface{}: + fastpathTV.DecMapUint64IntfV(v, false, d) + case *map[uint64]interface{}: + if v2, changed2 := fastpathTV.DecMapUint64IntfV(*v, true, d); changed2 { + *v = v2 + } + + case map[uint64]string: + fastpathTV.DecMapUint64StringV(v, false, d) + case *map[uint64]string: + if v2, changed2 := fastpathTV.DecMapUint64StringV(*v, true, d); changed2 { + *v = v2 + } + + case map[uint64]uint: + fastpathTV.DecMapUint64UintV(v, false, d) + case *map[uint64]uint: + if v2, changed2 := fastpathTV.DecMapUint64UintV(*v, true, d); changed2 { + *v = v2 + } + + case map[uint64]uint8: + fastpathTV.DecMapUint64Uint8V(v, false, d) + case *map[uint64]uint8: + if v2, changed2 := fastpathTV.DecMapUint64Uint8V(*v, true, d); changed2 { + *v = v2 + } + + case map[uint64]uint16: + fastpathTV.DecMapUint64Uint16V(v, false, d) + case *map[uint64]uint16: + if v2, changed2 := fastpathTV.DecMapUint64Uint16V(*v, true, d); changed2 { + *v = v2 + } + + case map[uint64]uint32: + fastpathTV.DecMapUint64Uint32V(v, false, d) + case *map[uint64]uint32: + if v2, changed2 := fastpathTV.DecMapUint64Uint32V(*v, true, d); changed2 { + *v = v2 + } + + case map[uint64]uint64: + fastpathTV.DecMapUint64Uint64V(v, false, d) + case *map[uint64]uint64: + if v2, changed2 := fastpathTV.DecMapUint64Uint64V(*v, true, d); changed2 { + *v = v2 + } + + case map[uint64]uintptr: + fastpathTV.DecMapUint64UintptrV(v, false, d) + case *map[uint64]uintptr: + if v2, changed2 := fastpathTV.DecMapUint64UintptrV(*v, true, d); changed2 { + *v = v2 + } + + case map[uint64]int: + fastpathTV.DecMapUint64IntV(v, false, d) + case *map[uint64]int: + if v2, changed2 := fastpathTV.DecMapUint64IntV(*v, true, d); changed2 { + *v = v2 + } + + case map[uint64]int8: + fastpathTV.DecMapUint64Int8V(v, false, d) + case *map[uint64]int8: + if v2, changed2 := fastpathTV.DecMapUint64Int8V(*v, true, d); changed2 { + *v = v2 + } + + case map[uint64]int16: + fastpathTV.DecMapUint64Int16V(v, false, d) + case *map[uint64]int16: + if v2, changed2 := fastpathTV.DecMapUint64Int16V(*v, true, d); changed2 { + *v = v2 + } + + case map[uint64]int32: + fastpathTV.DecMapUint64Int32V(v, false, d) + case *map[uint64]int32: + if v2, changed2 := fastpathTV.DecMapUint64Int32V(*v, true, d); changed2 { + *v = v2 + } + + case map[uint64]int64: + fastpathTV.DecMapUint64Int64V(v, false, d) + case *map[uint64]int64: + if v2, changed2 := fastpathTV.DecMapUint64Int64V(*v, true, d); changed2 { + *v = v2 + } + + case map[uint64]float32: + fastpathTV.DecMapUint64Float32V(v, false, d) + case *map[uint64]float32: + if v2, changed2 := fastpathTV.DecMapUint64Float32V(*v, true, d); changed2 { + *v = v2 + } + + case map[uint64]float64: + fastpathTV.DecMapUint64Float64V(v, false, d) + case *map[uint64]float64: + if v2, changed2 := fastpathTV.DecMapUint64Float64V(*v, true, d); changed2 { + *v = v2 + } + + case map[uint64]bool: + fastpathTV.DecMapUint64BoolV(v, false, d) + case *map[uint64]bool: + if v2, changed2 := fastpathTV.DecMapUint64BoolV(*v, true, d); changed2 { + *v = v2 + } + + case []uintptr: + fastpathTV.DecSliceUintptrV(v, false, d) + case *[]uintptr: + if v2, changed2 := fastpathTV.DecSliceUintptrV(*v, true, d); changed2 { + *v = v2 + } + + case map[uintptr]interface{}: + fastpathTV.DecMapUintptrIntfV(v, false, d) + case *map[uintptr]interface{}: + if v2, changed2 := fastpathTV.DecMapUintptrIntfV(*v, true, d); changed2 { + *v = v2 + } + + case map[uintptr]string: + fastpathTV.DecMapUintptrStringV(v, false, d) + case *map[uintptr]string: + if v2, changed2 := fastpathTV.DecMapUintptrStringV(*v, true, d); changed2 { + *v = v2 + } + + case map[uintptr]uint: + fastpathTV.DecMapUintptrUintV(v, false, d) + case *map[uintptr]uint: + if v2, changed2 := fastpathTV.DecMapUintptrUintV(*v, true, d); changed2 { + *v = v2 + } + + case map[uintptr]uint8: + fastpathTV.DecMapUintptrUint8V(v, false, d) + case *map[uintptr]uint8: + if v2, changed2 := fastpathTV.DecMapUintptrUint8V(*v, true, d); changed2 { + *v = v2 + } + + case map[uintptr]uint16: + fastpathTV.DecMapUintptrUint16V(v, false, d) + case *map[uintptr]uint16: + if v2, changed2 := fastpathTV.DecMapUintptrUint16V(*v, true, d); changed2 { + *v = v2 + } + + case map[uintptr]uint32: + fastpathTV.DecMapUintptrUint32V(v, false, d) + case *map[uintptr]uint32: + if v2, changed2 := fastpathTV.DecMapUintptrUint32V(*v, true, d); changed2 { + *v = v2 + } + + case map[uintptr]uint64: + fastpathTV.DecMapUintptrUint64V(v, false, d) + case *map[uintptr]uint64: + if v2, changed2 := fastpathTV.DecMapUintptrUint64V(*v, true, d); changed2 { + *v = v2 + } + + case map[uintptr]uintptr: + fastpathTV.DecMapUintptrUintptrV(v, false, d) + case *map[uintptr]uintptr: + if v2, changed2 := fastpathTV.DecMapUintptrUintptrV(*v, true, d); changed2 { + *v = v2 + } + + case map[uintptr]int: + fastpathTV.DecMapUintptrIntV(v, false, d) + case *map[uintptr]int: + if v2, changed2 := fastpathTV.DecMapUintptrIntV(*v, true, d); changed2 { + *v = v2 + } + + case map[uintptr]int8: + fastpathTV.DecMapUintptrInt8V(v, false, d) + case *map[uintptr]int8: + if v2, changed2 := fastpathTV.DecMapUintptrInt8V(*v, true, d); changed2 { + *v = v2 + } + + case map[uintptr]int16: + fastpathTV.DecMapUintptrInt16V(v, false, d) + case *map[uintptr]int16: + if v2, changed2 := fastpathTV.DecMapUintptrInt16V(*v, true, d); changed2 { + *v = v2 + } + + case map[uintptr]int32: + fastpathTV.DecMapUintptrInt32V(v, false, d) + case *map[uintptr]int32: + if v2, changed2 := fastpathTV.DecMapUintptrInt32V(*v, true, d); changed2 { + *v = v2 + } + + case map[uintptr]int64: + fastpathTV.DecMapUintptrInt64V(v, false, d) + case *map[uintptr]int64: + if v2, changed2 := fastpathTV.DecMapUintptrInt64V(*v, true, d); changed2 { + *v = v2 + } + + case map[uintptr]float32: + fastpathTV.DecMapUintptrFloat32V(v, false, d) + case *map[uintptr]float32: + if v2, changed2 := fastpathTV.DecMapUintptrFloat32V(*v, true, d); changed2 { + *v = v2 + } + + case map[uintptr]float64: + fastpathTV.DecMapUintptrFloat64V(v, false, d) + case *map[uintptr]float64: + if v2, changed2 := fastpathTV.DecMapUintptrFloat64V(*v, true, d); changed2 { + *v = v2 + } + + case map[uintptr]bool: + fastpathTV.DecMapUintptrBoolV(v, false, d) + case *map[uintptr]bool: + if v2, changed2 := fastpathTV.DecMapUintptrBoolV(*v, true, d); changed2 { + *v = v2 + } + + case []int: + fastpathTV.DecSliceIntV(v, false, d) + case *[]int: + if v2, changed2 := fastpathTV.DecSliceIntV(*v, true, d); changed2 { + *v = v2 + } + + case map[int]interface{}: + fastpathTV.DecMapIntIntfV(v, false, d) + case *map[int]interface{}: + if v2, changed2 := fastpathTV.DecMapIntIntfV(*v, true, d); changed2 { + *v = v2 + } + + case map[int]string: + fastpathTV.DecMapIntStringV(v, false, d) + case *map[int]string: + if v2, changed2 := fastpathTV.DecMapIntStringV(*v, true, d); changed2 { + *v = v2 + } + + case map[int]uint: + fastpathTV.DecMapIntUintV(v, false, d) + case *map[int]uint: + if v2, changed2 := fastpathTV.DecMapIntUintV(*v, true, d); changed2 { + *v = v2 + } + + case map[int]uint8: + fastpathTV.DecMapIntUint8V(v, false, d) + case *map[int]uint8: + if v2, changed2 := fastpathTV.DecMapIntUint8V(*v, true, d); changed2 { + *v = v2 + } + + case map[int]uint16: + fastpathTV.DecMapIntUint16V(v, false, d) + case *map[int]uint16: + if v2, changed2 := fastpathTV.DecMapIntUint16V(*v, true, d); changed2 { + *v = v2 + } + + case map[int]uint32: + fastpathTV.DecMapIntUint32V(v, false, d) + case *map[int]uint32: + if v2, changed2 := fastpathTV.DecMapIntUint32V(*v, true, d); changed2 { + *v = v2 + } + + case map[int]uint64: + fastpathTV.DecMapIntUint64V(v, false, d) + case *map[int]uint64: + if v2, changed2 := fastpathTV.DecMapIntUint64V(*v, true, d); changed2 { + *v = v2 + } + + case map[int]uintptr: + fastpathTV.DecMapIntUintptrV(v, false, d) + case *map[int]uintptr: + if v2, changed2 := fastpathTV.DecMapIntUintptrV(*v, true, d); changed2 { + *v = v2 + } + + case map[int]int: + fastpathTV.DecMapIntIntV(v, false, d) + case *map[int]int: + if v2, changed2 := fastpathTV.DecMapIntIntV(*v, true, d); changed2 { + *v = v2 + } + + case map[int]int8: + fastpathTV.DecMapIntInt8V(v, false, d) + case *map[int]int8: + if v2, changed2 := fastpathTV.DecMapIntInt8V(*v, true, d); changed2 { + *v = v2 + } + + case map[int]int16: + fastpathTV.DecMapIntInt16V(v, false, d) + case *map[int]int16: + if v2, changed2 := fastpathTV.DecMapIntInt16V(*v, true, d); changed2 { + *v = v2 + } + + case map[int]int32: + fastpathTV.DecMapIntInt32V(v, false, d) + case *map[int]int32: + if v2, changed2 := fastpathTV.DecMapIntInt32V(*v, true, d); changed2 { + *v = v2 + } + + case map[int]int64: + fastpathTV.DecMapIntInt64V(v, false, d) + case *map[int]int64: + if v2, changed2 := fastpathTV.DecMapIntInt64V(*v, true, d); changed2 { + *v = v2 + } + + case map[int]float32: + fastpathTV.DecMapIntFloat32V(v, false, d) + case *map[int]float32: + if v2, changed2 := fastpathTV.DecMapIntFloat32V(*v, true, d); changed2 { + *v = v2 + } + + case map[int]float64: + fastpathTV.DecMapIntFloat64V(v, false, d) + case *map[int]float64: + if v2, changed2 := fastpathTV.DecMapIntFloat64V(*v, true, d); changed2 { + *v = v2 + } + + case map[int]bool: + fastpathTV.DecMapIntBoolV(v, false, d) + case *map[int]bool: + if v2, changed2 := fastpathTV.DecMapIntBoolV(*v, true, d); changed2 { + *v = v2 + } + + case []int8: + fastpathTV.DecSliceInt8V(v, false, d) + case *[]int8: + if v2, changed2 := fastpathTV.DecSliceInt8V(*v, true, d); changed2 { + *v = v2 + } + + case map[int8]interface{}: + fastpathTV.DecMapInt8IntfV(v, false, d) + case *map[int8]interface{}: + if v2, changed2 := fastpathTV.DecMapInt8IntfV(*v, true, d); changed2 { + *v = v2 + } + + case map[int8]string: + fastpathTV.DecMapInt8StringV(v, false, d) + case *map[int8]string: + if v2, changed2 := fastpathTV.DecMapInt8StringV(*v, true, d); changed2 { + *v = v2 + } + + case map[int8]uint: + fastpathTV.DecMapInt8UintV(v, false, d) + case *map[int8]uint: + if v2, changed2 := fastpathTV.DecMapInt8UintV(*v, true, d); changed2 { + *v = v2 + } + + case map[int8]uint8: + fastpathTV.DecMapInt8Uint8V(v, false, d) + case *map[int8]uint8: + if v2, changed2 := fastpathTV.DecMapInt8Uint8V(*v, true, d); changed2 { + *v = v2 + } + + case map[int8]uint16: + fastpathTV.DecMapInt8Uint16V(v, false, d) + case *map[int8]uint16: + if v2, changed2 := fastpathTV.DecMapInt8Uint16V(*v, true, d); changed2 { + *v = v2 + } + + case map[int8]uint32: + fastpathTV.DecMapInt8Uint32V(v, false, d) + case *map[int8]uint32: + if v2, changed2 := fastpathTV.DecMapInt8Uint32V(*v, true, d); changed2 { + *v = v2 + } + + case map[int8]uint64: + fastpathTV.DecMapInt8Uint64V(v, false, d) + case *map[int8]uint64: + if v2, changed2 := fastpathTV.DecMapInt8Uint64V(*v, true, d); changed2 { + *v = v2 + } + + case map[int8]uintptr: + fastpathTV.DecMapInt8UintptrV(v, false, d) + case *map[int8]uintptr: + if v2, changed2 := fastpathTV.DecMapInt8UintptrV(*v, true, d); changed2 { + *v = v2 + } + + case map[int8]int: + fastpathTV.DecMapInt8IntV(v, false, d) + case *map[int8]int: + if v2, changed2 := fastpathTV.DecMapInt8IntV(*v, true, d); changed2 { + *v = v2 + } + + case map[int8]int8: + fastpathTV.DecMapInt8Int8V(v, false, d) + case *map[int8]int8: + if v2, changed2 := fastpathTV.DecMapInt8Int8V(*v, true, d); changed2 { + *v = v2 + } + + case map[int8]int16: + fastpathTV.DecMapInt8Int16V(v, false, d) + case *map[int8]int16: + if v2, changed2 := fastpathTV.DecMapInt8Int16V(*v, true, d); changed2 { + *v = v2 + } + + case map[int8]int32: + fastpathTV.DecMapInt8Int32V(v, false, d) + case *map[int8]int32: + if v2, changed2 := fastpathTV.DecMapInt8Int32V(*v, true, d); changed2 { + *v = v2 + } + + case map[int8]int64: + fastpathTV.DecMapInt8Int64V(v, false, d) + case *map[int8]int64: + if v2, changed2 := fastpathTV.DecMapInt8Int64V(*v, true, d); changed2 { + *v = v2 + } + + case map[int8]float32: + fastpathTV.DecMapInt8Float32V(v, false, d) + case *map[int8]float32: + if v2, changed2 := fastpathTV.DecMapInt8Float32V(*v, true, d); changed2 { + *v = v2 + } + + case map[int8]float64: + fastpathTV.DecMapInt8Float64V(v, false, d) + case *map[int8]float64: + if v2, changed2 := fastpathTV.DecMapInt8Float64V(*v, true, d); changed2 { + *v = v2 + } + + case map[int8]bool: + fastpathTV.DecMapInt8BoolV(v, false, d) + case *map[int8]bool: + if v2, changed2 := fastpathTV.DecMapInt8BoolV(*v, true, d); changed2 { + *v = v2 + } + + case []int16: + fastpathTV.DecSliceInt16V(v, false, d) + case *[]int16: + if v2, changed2 := fastpathTV.DecSliceInt16V(*v, true, d); changed2 { + *v = v2 + } + + case map[int16]interface{}: + fastpathTV.DecMapInt16IntfV(v, false, d) + case *map[int16]interface{}: + if v2, changed2 := fastpathTV.DecMapInt16IntfV(*v, true, d); changed2 { + *v = v2 + } + + case map[int16]string: + fastpathTV.DecMapInt16StringV(v, false, d) + case *map[int16]string: + if v2, changed2 := fastpathTV.DecMapInt16StringV(*v, true, d); changed2 { + *v = v2 + } + + case map[int16]uint: + fastpathTV.DecMapInt16UintV(v, false, d) + case *map[int16]uint: + if v2, changed2 := fastpathTV.DecMapInt16UintV(*v, true, d); changed2 { + *v = v2 + } + + case map[int16]uint8: + fastpathTV.DecMapInt16Uint8V(v, false, d) + case *map[int16]uint8: + if v2, changed2 := fastpathTV.DecMapInt16Uint8V(*v, true, d); changed2 { + *v = v2 + } + + case map[int16]uint16: + fastpathTV.DecMapInt16Uint16V(v, false, d) + case *map[int16]uint16: + if v2, changed2 := fastpathTV.DecMapInt16Uint16V(*v, true, d); changed2 { + *v = v2 + } + + case map[int16]uint32: + fastpathTV.DecMapInt16Uint32V(v, false, d) + case *map[int16]uint32: + if v2, changed2 := fastpathTV.DecMapInt16Uint32V(*v, true, d); changed2 { + *v = v2 + } + + case map[int16]uint64: + fastpathTV.DecMapInt16Uint64V(v, false, d) + case *map[int16]uint64: + if v2, changed2 := fastpathTV.DecMapInt16Uint64V(*v, true, d); changed2 { + *v = v2 + } + + case map[int16]uintptr: + fastpathTV.DecMapInt16UintptrV(v, false, d) + case *map[int16]uintptr: + if v2, changed2 := fastpathTV.DecMapInt16UintptrV(*v, true, d); changed2 { + *v = v2 + } + + case map[int16]int: + fastpathTV.DecMapInt16IntV(v, false, d) + case *map[int16]int: + if v2, changed2 := fastpathTV.DecMapInt16IntV(*v, true, d); changed2 { + *v = v2 + } + + case map[int16]int8: + fastpathTV.DecMapInt16Int8V(v, false, d) + case *map[int16]int8: + if v2, changed2 := fastpathTV.DecMapInt16Int8V(*v, true, d); changed2 { + *v = v2 + } + + case map[int16]int16: + fastpathTV.DecMapInt16Int16V(v, false, d) + case *map[int16]int16: + if v2, changed2 := fastpathTV.DecMapInt16Int16V(*v, true, d); changed2 { + *v = v2 + } + + case map[int16]int32: + fastpathTV.DecMapInt16Int32V(v, false, d) + case *map[int16]int32: + if v2, changed2 := fastpathTV.DecMapInt16Int32V(*v, true, d); changed2 { + *v = v2 + } + + case map[int16]int64: + fastpathTV.DecMapInt16Int64V(v, false, d) + case *map[int16]int64: + if v2, changed2 := fastpathTV.DecMapInt16Int64V(*v, true, d); changed2 { + *v = v2 + } + + case map[int16]float32: + fastpathTV.DecMapInt16Float32V(v, false, d) + case *map[int16]float32: + if v2, changed2 := fastpathTV.DecMapInt16Float32V(*v, true, d); changed2 { + *v = v2 + } + + case map[int16]float64: + fastpathTV.DecMapInt16Float64V(v, false, d) + case *map[int16]float64: + if v2, changed2 := fastpathTV.DecMapInt16Float64V(*v, true, d); changed2 { + *v = v2 + } + + case map[int16]bool: + fastpathTV.DecMapInt16BoolV(v, false, d) + case *map[int16]bool: + if v2, changed2 := fastpathTV.DecMapInt16BoolV(*v, true, d); changed2 { + *v = v2 + } + + case []int32: + fastpathTV.DecSliceInt32V(v, false, d) + case *[]int32: + if v2, changed2 := fastpathTV.DecSliceInt32V(*v, true, d); changed2 { + *v = v2 + } + + case map[int32]interface{}: + fastpathTV.DecMapInt32IntfV(v, false, d) + case *map[int32]interface{}: + if v2, changed2 := fastpathTV.DecMapInt32IntfV(*v, true, d); changed2 { + *v = v2 + } + + case map[int32]string: + fastpathTV.DecMapInt32StringV(v, false, d) + case *map[int32]string: + if v2, changed2 := fastpathTV.DecMapInt32StringV(*v, true, d); changed2 { + *v = v2 + } + + case map[int32]uint: + fastpathTV.DecMapInt32UintV(v, false, d) + case *map[int32]uint: + if v2, changed2 := fastpathTV.DecMapInt32UintV(*v, true, d); changed2 { + *v = v2 + } + + case map[int32]uint8: + fastpathTV.DecMapInt32Uint8V(v, false, d) + case *map[int32]uint8: + if v2, changed2 := fastpathTV.DecMapInt32Uint8V(*v, true, d); changed2 { + *v = v2 + } + + case map[int32]uint16: + fastpathTV.DecMapInt32Uint16V(v, false, d) + case *map[int32]uint16: + if v2, changed2 := fastpathTV.DecMapInt32Uint16V(*v, true, d); changed2 { + *v = v2 + } + + case map[int32]uint32: + fastpathTV.DecMapInt32Uint32V(v, false, d) + case *map[int32]uint32: + if v2, changed2 := fastpathTV.DecMapInt32Uint32V(*v, true, d); changed2 { + *v = v2 + } + + case map[int32]uint64: + fastpathTV.DecMapInt32Uint64V(v, false, d) + case *map[int32]uint64: + if v2, changed2 := fastpathTV.DecMapInt32Uint64V(*v, true, d); changed2 { + *v = v2 + } + + case map[int32]uintptr: + fastpathTV.DecMapInt32UintptrV(v, false, d) + case *map[int32]uintptr: + if v2, changed2 := fastpathTV.DecMapInt32UintptrV(*v, true, d); changed2 { + *v = v2 + } + + case map[int32]int: + fastpathTV.DecMapInt32IntV(v, false, d) + case *map[int32]int: + if v2, changed2 := fastpathTV.DecMapInt32IntV(*v, true, d); changed2 { + *v = v2 + } + + case map[int32]int8: + fastpathTV.DecMapInt32Int8V(v, false, d) + case *map[int32]int8: + if v2, changed2 := fastpathTV.DecMapInt32Int8V(*v, true, d); changed2 { + *v = v2 + } + + case map[int32]int16: + fastpathTV.DecMapInt32Int16V(v, false, d) + case *map[int32]int16: + if v2, changed2 := fastpathTV.DecMapInt32Int16V(*v, true, d); changed2 { + *v = v2 + } + + case map[int32]int32: + fastpathTV.DecMapInt32Int32V(v, false, d) + case *map[int32]int32: + if v2, changed2 := fastpathTV.DecMapInt32Int32V(*v, true, d); changed2 { + *v = v2 + } + + case map[int32]int64: + fastpathTV.DecMapInt32Int64V(v, false, d) + case *map[int32]int64: + if v2, changed2 := fastpathTV.DecMapInt32Int64V(*v, true, d); changed2 { + *v = v2 + } + + case map[int32]float32: + fastpathTV.DecMapInt32Float32V(v, false, d) + case *map[int32]float32: + if v2, changed2 := fastpathTV.DecMapInt32Float32V(*v, true, d); changed2 { + *v = v2 + } + + case map[int32]float64: + fastpathTV.DecMapInt32Float64V(v, false, d) + case *map[int32]float64: + if v2, changed2 := fastpathTV.DecMapInt32Float64V(*v, true, d); changed2 { + *v = v2 + } + + case map[int32]bool: + fastpathTV.DecMapInt32BoolV(v, false, d) + case *map[int32]bool: + if v2, changed2 := fastpathTV.DecMapInt32BoolV(*v, true, d); changed2 { + *v = v2 + } + + case []int64: + fastpathTV.DecSliceInt64V(v, false, d) + case *[]int64: + if v2, changed2 := fastpathTV.DecSliceInt64V(*v, true, d); changed2 { + *v = v2 + } + + case map[int64]interface{}: + fastpathTV.DecMapInt64IntfV(v, false, d) + case *map[int64]interface{}: + if v2, changed2 := fastpathTV.DecMapInt64IntfV(*v, true, d); changed2 { + *v = v2 + } + + case map[int64]string: + fastpathTV.DecMapInt64StringV(v, false, d) + case *map[int64]string: + if v2, changed2 := fastpathTV.DecMapInt64StringV(*v, true, d); changed2 { + *v = v2 + } + + case map[int64]uint: + fastpathTV.DecMapInt64UintV(v, false, d) + case *map[int64]uint: + if v2, changed2 := fastpathTV.DecMapInt64UintV(*v, true, d); changed2 { + *v = v2 + } + + case map[int64]uint8: + fastpathTV.DecMapInt64Uint8V(v, false, d) + case *map[int64]uint8: + if v2, changed2 := fastpathTV.DecMapInt64Uint8V(*v, true, d); changed2 { + *v = v2 + } + + case map[int64]uint16: + fastpathTV.DecMapInt64Uint16V(v, false, d) + case *map[int64]uint16: + if v2, changed2 := fastpathTV.DecMapInt64Uint16V(*v, true, d); changed2 { + *v = v2 + } + + case map[int64]uint32: + fastpathTV.DecMapInt64Uint32V(v, false, d) + case *map[int64]uint32: + if v2, changed2 := fastpathTV.DecMapInt64Uint32V(*v, true, d); changed2 { + *v = v2 + } + + case map[int64]uint64: + fastpathTV.DecMapInt64Uint64V(v, false, d) + case *map[int64]uint64: + if v2, changed2 := fastpathTV.DecMapInt64Uint64V(*v, true, d); changed2 { + *v = v2 + } + + case map[int64]uintptr: + fastpathTV.DecMapInt64UintptrV(v, false, d) + case *map[int64]uintptr: + if v2, changed2 := fastpathTV.DecMapInt64UintptrV(*v, true, d); changed2 { + *v = v2 + } + + case map[int64]int: + fastpathTV.DecMapInt64IntV(v, false, d) + case *map[int64]int: + if v2, changed2 := fastpathTV.DecMapInt64IntV(*v, true, d); changed2 { + *v = v2 + } + + case map[int64]int8: + fastpathTV.DecMapInt64Int8V(v, false, d) + case *map[int64]int8: + if v2, changed2 := fastpathTV.DecMapInt64Int8V(*v, true, d); changed2 { + *v = v2 + } + + case map[int64]int16: + fastpathTV.DecMapInt64Int16V(v, false, d) + case *map[int64]int16: + if v2, changed2 := fastpathTV.DecMapInt64Int16V(*v, true, d); changed2 { + *v = v2 + } + + case map[int64]int32: + fastpathTV.DecMapInt64Int32V(v, false, d) + case *map[int64]int32: + if v2, changed2 := fastpathTV.DecMapInt64Int32V(*v, true, d); changed2 { + *v = v2 + } + + case map[int64]int64: + fastpathTV.DecMapInt64Int64V(v, false, d) + case *map[int64]int64: + if v2, changed2 := fastpathTV.DecMapInt64Int64V(*v, true, d); changed2 { + *v = v2 + } + + case map[int64]float32: + fastpathTV.DecMapInt64Float32V(v, false, d) + case *map[int64]float32: + if v2, changed2 := fastpathTV.DecMapInt64Float32V(*v, true, d); changed2 { + *v = v2 + } + + case map[int64]float64: + fastpathTV.DecMapInt64Float64V(v, false, d) + case *map[int64]float64: + if v2, changed2 := fastpathTV.DecMapInt64Float64V(*v, true, d); changed2 { + *v = v2 + } + + case map[int64]bool: + fastpathTV.DecMapInt64BoolV(v, false, d) + case *map[int64]bool: + if v2, changed2 := fastpathTV.DecMapInt64BoolV(*v, true, d); changed2 { + *v = v2 + } + + case []bool: + fastpathTV.DecSliceBoolV(v, false, d) + case *[]bool: + if v2, changed2 := fastpathTV.DecSliceBoolV(*v, true, d); changed2 { + *v = v2 + } + + case map[bool]interface{}: + fastpathTV.DecMapBoolIntfV(v, false, d) + case *map[bool]interface{}: + if v2, changed2 := fastpathTV.DecMapBoolIntfV(*v, true, d); changed2 { + *v = v2 + } + + case map[bool]string: + fastpathTV.DecMapBoolStringV(v, false, d) + case *map[bool]string: + if v2, changed2 := fastpathTV.DecMapBoolStringV(*v, true, d); changed2 { + *v = v2 + } + + case map[bool]uint: + fastpathTV.DecMapBoolUintV(v, false, d) + case *map[bool]uint: + if v2, changed2 := fastpathTV.DecMapBoolUintV(*v, true, d); changed2 { + *v = v2 + } + + case map[bool]uint8: + fastpathTV.DecMapBoolUint8V(v, false, d) + case *map[bool]uint8: + if v2, changed2 := fastpathTV.DecMapBoolUint8V(*v, true, d); changed2 { + *v = v2 + } + + case map[bool]uint16: + fastpathTV.DecMapBoolUint16V(v, false, d) + case *map[bool]uint16: + if v2, changed2 := fastpathTV.DecMapBoolUint16V(*v, true, d); changed2 { + *v = v2 + } + + case map[bool]uint32: + fastpathTV.DecMapBoolUint32V(v, false, d) + case *map[bool]uint32: + if v2, changed2 := fastpathTV.DecMapBoolUint32V(*v, true, d); changed2 { + *v = v2 + } + + case map[bool]uint64: + fastpathTV.DecMapBoolUint64V(v, false, d) + case *map[bool]uint64: + if v2, changed2 := fastpathTV.DecMapBoolUint64V(*v, true, d); changed2 { + *v = v2 + } + + case map[bool]uintptr: + fastpathTV.DecMapBoolUintptrV(v, false, d) + case *map[bool]uintptr: + if v2, changed2 := fastpathTV.DecMapBoolUintptrV(*v, true, d); changed2 { + *v = v2 + } + + case map[bool]int: + fastpathTV.DecMapBoolIntV(v, false, d) + case *map[bool]int: + if v2, changed2 := fastpathTV.DecMapBoolIntV(*v, true, d); changed2 { + *v = v2 + } + + case map[bool]int8: + fastpathTV.DecMapBoolInt8V(v, false, d) + case *map[bool]int8: + if v2, changed2 := fastpathTV.DecMapBoolInt8V(*v, true, d); changed2 { + *v = v2 + } + + case map[bool]int16: + fastpathTV.DecMapBoolInt16V(v, false, d) + case *map[bool]int16: + if v2, changed2 := fastpathTV.DecMapBoolInt16V(*v, true, d); changed2 { + *v = v2 + } + + case map[bool]int32: + fastpathTV.DecMapBoolInt32V(v, false, d) + case *map[bool]int32: + if v2, changed2 := fastpathTV.DecMapBoolInt32V(*v, true, d); changed2 { + *v = v2 + } + + case map[bool]int64: + fastpathTV.DecMapBoolInt64V(v, false, d) + case *map[bool]int64: + if v2, changed2 := fastpathTV.DecMapBoolInt64V(*v, true, d); changed2 { + *v = v2 + } + + case map[bool]float32: + fastpathTV.DecMapBoolFloat32V(v, false, d) + case *map[bool]float32: + if v2, changed2 := fastpathTV.DecMapBoolFloat32V(*v, true, d); changed2 { + *v = v2 + } + + case map[bool]float64: + fastpathTV.DecMapBoolFloat64V(v, false, d) + case *map[bool]float64: + if v2, changed2 := fastpathTV.DecMapBoolFloat64V(*v, true, d); changed2 { + *v = v2 + } + + case map[bool]bool: + fastpathTV.DecMapBoolBoolV(v, false, d) + case *map[bool]bool: + if v2, changed2 := fastpathTV.DecMapBoolBoolV(*v, true, d); changed2 { + *v = v2 + } + + default: + _ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release) + return false + } + return true +} + +func fastpathDecodeSetZeroTypeSwitch(iv interface{}, d *Decoder) bool { + switch v := iv.(type) { + + case *[]interface{}: + *v = nil + + case *map[interface{}]interface{}: + *v = nil + + case *map[interface{}]string: + *v = nil + + case *map[interface{}]uint: + *v = nil + + case *map[interface{}]uint8: + *v = nil + + case *map[interface{}]uint16: + *v = nil + + case *map[interface{}]uint32: + *v = nil + + case *map[interface{}]uint64: + *v = nil + + case *map[interface{}]uintptr: + *v = nil + + case *map[interface{}]int: + *v = nil + + case *map[interface{}]int8: + *v = nil + + case *map[interface{}]int16: + *v = nil + + case *map[interface{}]int32: + *v = nil + + case *map[interface{}]int64: + *v = nil + + case *map[interface{}]float32: + *v = nil + + case *map[interface{}]float64: + *v = nil + + case *map[interface{}]bool: + *v = nil + + case *[]string: + *v = nil + + case *map[string]interface{}: + *v = nil + + case *map[string]string: + *v = nil + + case *map[string]uint: + *v = nil + + case *map[string]uint8: + *v = nil + + case *map[string]uint16: + *v = nil + + case *map[string]uint32: + *v = nil + + case *map[string]uint64: + *v = nil + + case *map[string]uintptr: + *v = nil + + case *map[string]int: + *v = nil + + case *map[string]int8: + *v = nil + + case *map[string]int16: + *v = nil + + case *map[string]int32: + *v = nil + + case *map[string]int64: + *v = nil + + case *map[string]float32: + *v = nil + + case *map[string]float64: + *v = nil + + case *map[string]bool: + *v = nil + + case *[]float32: + *v = nil + + case *map[float32]interface{}: + *v = nil + + case *map[float32]string: + *v = nil + + case *map[float32]uint: + *v = nil + + case *map[float32]uint8: + *v = nil + + case *map[float32]uint16: + *v = nil + + case *map[float32]uint32: + *v = nil + + case *map[float32]uint64: + *v = nil + + case *map[float32]uintptr: + *v = nil + + case *map[float32]int: + *v = nil + + case *map[float32]int8: + *v = nil + + case *map[float32]int16: + *v = nil + + case *map[float32]int32: + *v = nil + + case *map[float32]int64: + *v = nil + + case *map[float32]float32: + *v = nil + + case *map[float32]float64: + *v = nil + + case *map[float32]bool: + *v = nil + + case *[]float64: + *v = nil + + case *map[float64]interface{}: + *v = nil + + case *map[float64]string: + *v = nil + + case *map[float64]uint: + *v = nil + + case *map[float64]uint8: + *v = nil + + case *map[float64]uint16: + *v = nil + + case *map[float64]uint32: + *v = nil + + case *map[float64]uint64: + *v = nil + + case *map[float64]uintptr: + *v = nil + + case *map[float64]int: + *v = nil + + case *map[float64]int8: + *v = nil + + case *map[float64]int16: + *v = nil + + case *map[float64]int32: + *v = nil + + case *map[float64]int64: + *v = nil + + case *map[float64]float32: + *v = nil + + case *map[float64]float64: + *v = nil + + case *map[float64]bool: + *v = nil + + case *[]uint: + *v = nil + + case *map[uint]interface{}: + *v = nil + + case *map[uint]string: + *v = nil + + case *map[uint]uint: + *v = nil + + case *map[uint]uint8: + *v = nil + + case *map[uint]uint16: + *v = nil + + case *map[uint]uint32: + *v = nil + + case *map[uint]uint64: + *v = nil + + case *map[uint]uintptr: + *v = nil + + case *map[uint]int: + *v = nil + + case *map[uint]int8: + *v = nil + + case *map[uint]int16: + *v = nil + + case *map[uint]int32: + *v = nil + + case *map[uint]int64: + *v = nil + + case *map[uint]float32: + *v = nil + + case *map[uint]float64: + *v = nil + + case *map[uint]bool: + *v = nil + + case *map[uint8]interface{}: + *v = nil + + case *map[uint8]string: + *v = nil + + case *map[uint8]uint: + *v = nil + + case *map[uint8]uint8: + *v = nil + + case *map[uint8]uint16: + *v = nil + + case *map[uint8]uint32: + *v = nil + + case *map[uint8]uint64: + *v = nil + + case *map[uint8]uintptr: + *v = nil + + case *map[uint8]int: + *v = nil + + case *map[uint8]int8: + *v = nil + + case *map[uint8]int16: + *v = nil + + case *map[uint8]int32: + *v = nil + + case *map[uint8]int64: + *v = nil + + case *map[uint8]float32: + *v = nil + + case *map[uint8]float64: + *v = nil + + case *map[uint8]bool: + *v = nil + + case *[]uint16: + *v = nil + + case *map[uint16]interface{}: + *v = nil + + case *map[uint16]string: + *v = nil + + case *map[uint16]uint: + *v = nil + + case *map[uint16]uint8: + *v = nil + + case *map[uint16]uint16: + *v = nil + + case *map[uint16]uint32: + *v = nil + + case *map[uint16]uint64: + *v = nil + + case *map[uint16]uintptr: + *v = nil + + case *map[uint16]int: + *v = nil + + case *map[uint16]int8: + *v = nil + + case *map[uint16]int16: + *v = nil + + case *map[uint16]int32: + *v = nil + + case *map[uint16]int64: + *v = nil + + case *map[uint16]float32: + *v = nil + + case *map[uint16]float64: + *v = nil + + case *map[uint16]bool: + *v = nil + + case *[]uint32: + *v = nil + + case *map[uint32]interface{}: + *v = nil + + case *map[uint32]string: + *v = nil + + case *map[uint32]uint: + *v = nil + + case *map[uint32]uint8: + *v = nil + + case *map[uint32]uint16: + *v = nil + + case *map[uint32]uint32: + *v = nil + + case *map[uint32]uint64: + *v = nil + + case *map[uint32]uintptr: + *v = nil + + case *map[uint32]int: + *v = nil + + case *map[uint32]int8: + *v = nil + + case *map[uint32]int16: + *v = nil + + case *map[uint32]int32: + *v = nil + + case *map[uint32]int64: + *v = nil + + case *map[uint32]float32: + *v = nil + + case *map[uint32]float64: + *v = nil + + case *map[uint32]bool: + *v = nil + + case *[]uint64: + *v = nil + + case *map[uint64]interface{}: + *v = nil + + case *map[uint64]string: + *v = nil + + case *map[uint64]uint: + *v = nil + + case *map[uint64]uint8: + *v = nil + + case *map[uint64]uint16: + *v = nil + + case *map[uint64]uint32: + *v = nil + + case *map[uint64]uint64: + *v = nil + + case *map[uint64]uintptr: + *v = nil + + case *map[uint64]int: + *v = nil + + case *map[uint64]int8: + *v = nil + + case *map[uint64]int16: + *v = nil + + case *map[uint64]int32: + *v = nil + + case *map[uint64]int64: + *v = nil + + case *map[uint64]float32: + *v = nil + + case *map[uint64]float64: + *v = nil + + case *map[uint64]bool: + *v = nil + + case *[]uintptr: + *v = nil + + case *map[uintptr]interface{}: + *v = nil + + case *map[uintptr]string: + *v = nil + + case *map[uintptr]uint: + *v = nil + + case *map[uintptr]uint8: + *v = nil + + case *map[uintptr]uint16: + *v = nil + + case *map[uintptr]uint32: + *v = nil + + case *map[uintptr]uint64: + *v = nil + + case *map[uintptr]uintptr: + *v = nil + + case *map[uintptr]int: + *v = nil + + case *map[uintptr]int8: + *v = nil + + case *map[uintptr]int16: + *v = nil + + case *map[uintptr]int32: + *v = nil + + case *map[uintptr]int64: + *v = nil + + case *map[uintptr]float32: + *v = nil + + case *map[uintptr]float64: + *v = nil + + case *map[uintptr]bool: + *v = nil + + case *[]int: + *v = nil + + case *map[int]interface{}: + *v = nil + + case *map[int]string: + *v = nil + + case *map[int]uint: + *v = nil + + case *map[int]uint8: + *v = nil + + case *map[int]uint16: + *v = nil + + case *map[int]uint32: + *v = nil + + case *map[int]uint64: + *v = nil + + case *map[int]uintptr: + *v = nil + + case *map[int]int: + *v = nil + + case *map[int]int8: + *v = nil + + case *map[int]int16: + *v = nil + + case *map[int]int32: + *v = nil + + case *map[int]int64: + *v = nil + + case *map[int]float32: + *v = nil + + case *map[int]float64: + *v = nil + + case *map[int]bool: + *v = nil + + case *[]int8: + *v = nil + + case *map[int8]interface{}: + *v = nil + + case *map[int8]string: + *v = nil + + case *map[int8]uint: + *v = nil + + case *map[int8]uint8: + *v = nil + + case *map[int8]uint16: + *v = nil + + case *map[int8]uint32: + *v = nil + + case *map[int8]uint64: + *v = nil + + case *map[int8]uintptr: + *v = nil + + case *map[int8]int: + *v = nil + + case *map[int8]int8: + *v = nil + + case *map[int8]int16: + *v = nil + + case *map[int8]int32: + *v = nil + + case *map[int8]int64: + *v = nil + + case *map[int8]float32: + *v = nil + + case *map[int8]float64: + *v = nil + + case *map[int8]bool: + *v = nil + + case *[]int16: + *v = nil + + case *map[int16]interface{}: + *v = nil + + case *map[int16]string: + *v = nil + + case *map[int16]uint: + *v = nil + + case *map[int16]uint8: + *v = nil + + case *map[int16]uint16: + *v = nil + + case *map[int16]uint32: + *v = nil + + case *map[int16]uint64: + *v = nil + + case *map[int16]uintptr: + *v = nil + + case *map[int16]int: + *v = nil + + case *map[int16]int8: + *v = nil + + case *map[int16]int16: + *v = nil + + case *map[int16]int32: + *v = nil + + case *map[int16]int64: + *v = nil + + case *map[int16]float32: + *v = nil + + case *map[int16]float64: + *v = nil + + case *map[int16]bool: + *v = nil + + case *[]int32: + *v = nil + + case *map[int32]interface{}: + *v = nil + + case *map[int32]string: + *v = nil + + case *map[int32]uint: + *v = nil + + case *map[int32]uint8: + *v = nil + + case *map[int32]uint16: + *v = nil + + case *map[int32]uint32: + *v = nil + + case *map[int32]uint64: + *v = nil + + case *map[int32]uintptr: + *v = nil + + case *map[int32]int: + *v = nil + + case *map[int32]int8: + *v = nil + + case *map[int32]int16: + *v = nil + + case *map[int32]int32: + *v = nil + + case *map[int32]int64: + *v = nil + + case *map[int32]float32: + *v = nil + + case *map[int32]float64: + *v = nil + + case *map[int32]bool: + *v = nil + + case *[]int64: + *v = nil + + case *map[int64]interface{}: + *v = nil + + case *map[int64]string: + *v = nil + + case *map[int64]uint: + *v = nil + + case *map[int64]uint8: + *v = nil + + case *map[int64]uint16: + *v = nil + + case *map[int64]uint32: + *v = nil + + case *map[int64]uint64: + *v = nil + + case *map[int64]uintptr: + *v = nil + + case *map[int64]int: + *v = nil + + case *map[int64]int8: + *v = nil + + case *map[int64]int16: + *v = nil + + case *map[int64]int32: + *v = nil + + case *map[int64]int64: + *v = nil + + case *map[int64]float32: + *v = nil + + case *map[int64]float64: + *v = nil + + case *map[int64]bool: + *v = nil + + case *[]bool: + *v = nil + + case *map[bool]interface{}: + *v = nil + + case *map[bool]string: + *v = nil + + case *map[bool]uint: + *v = nil + + case *map[bool]uint8: + *v = nil + + case *map[bool]uint16: + *v = nil + + case *map[bool]uint32: + *v = nil + + case *map[bool]uint64: + *v = nil + + case *map[bool]uintptr: + *v = nil + + case *map[bool]int: + *v = nil + + case *map[bool]int8: + *v = nil + + case *map[bool]int16: + *v = nil + + case *map[bool]int32: + *v = nil + + case *map[bool]int64: + *v = nil + + case *map[bool]float32: + *v = nil + + case *map[bool]float64: + *v = nil + + case *map[bool]bool: + *v = nil + + default: + _ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release) + return false + } + return true +} + +// -- -- fast path functions + +func (d *Decoder) fastpathDecSliceIntfR(f *codecFnInfo, rv reflect.Value) { + if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { + var vp = rv2i(rv).(*[]interface{}) + if v, changed := fastpathTV.DecSliceIntfV(*vp, !array, d); changed { + *vp = v + } + } else { + fastpathTV.DecSliceIntfV(rv2i(rv).([]interface{}), !array, d) + } +} + +func (f fastpathT) DecSliceIntfX(vp *[]interface{}, d *Decoder) { + if v, changed := f.DecSliceIntfV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecSliceIntfV(v []interface{}, canChange bool, d *Decoder) (_ []interface{}, changed bool) { + dd := d.d + + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []interface{}{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + + hasLen := containerLenS > 0 + var xlen int + if hasLen && canChange { + if containerLenS > cap(v) { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 16) + if xlen <= cap(v) { + v = v[:xlen] + } else { + v = make([]interface{}, xlen) + } + changed = true + } else if containerLenS != len(v) { + v = v[:containerLenS] + changed = true + } + } + j := 0 + for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { + if j == 0 && len(v) == 0 { + if hasLen { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 16) + } else { + xlen = 8 + } + v = make([]interface{}, xlen) + changed = true + } + // if indefinite, etc, then expand the slice if necessary + var decodeIntoBlank bool + if j >= len(v) { + if canChange { + v = append(v, nil) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + decodeIntoBlank = true + } + } + slh.ElemContainerState(j) + if decodeIntoBlank { + d.swallow() + } else { + d.decode(&v[j]) + } + } + if canChange { + if j < len(v) { + v = v[:j] + changed = true + } else if j == 0 && v == nil { + v = make([]interface{}, 0) + changed = true + } + } + slh.End() + return v, changed +} + +func (d *Decoder) fastpathDecSliceStringR(f *codecFnInfo, rv reflect.Value) { + if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { + var vp = rv2i(rv).(*[]string) + if v, changed := fastpathTV.DecSliceStringV(*vp, !array, d); changed { + *vp = v + } + } else { + fastpathTV.DecSliceStringV(rv2i(rv).([]string), !array, d) + } +} + +func (f fastpathT) DecSliceStringX(vp *[]string, d *Decoder) { + if v, changed := f.DecSliceStringV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecSliceStringV(v []string, canChange bool, d *Decoder) (_ []string, changed bool) { + dd := d.d + + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []string{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + + hasLen := containerLenS > 0 + var xlen int + if hasLen && canChange { + if containerLenS > cap(v) { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 16) + if xlen <= cap(v) { + v = v[:xlen] + } else { + v = make([]string, xlen) + } + changed = true + } else if containerLenS != len(v) { + v = v[:containerLenS] + changed = true + } + } + j := 0 + for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { + if j == 0 && len(v) == 0 { + if hasLen { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 16) + } else { + xlen = 8 + } + v = make([]string, xlen) + changed = true + } + // if indefinite, etc, then expand the slice if necessary + var decodeIntoBlank bool + if j >= len(v) { + if canChange { + v = append(v, "") + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + decodeIntoBlank = true + } + } + slh.ElemContainerState(j) + if decodeIntoBlank { + d.swallow() + } else { + v[j] = dd.DecodeString() + } + } + if canChange { + if j < len(v) { + v = v[:j] + changed = true + } else if j == 0 && v == nil { + v = make([]string, 0) + changed = true + } + } + slh.End() + return v, changed +} + +func (d *Decoder) fastpathDecSliceFloat32R(f *codecFnInfo, rv reflect.Value) { + if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { + var vp = rv2i(rv).(*[]float32) + if v, changed := fastpathTV.DecSliceFloat32V(*vp, !array, d); changed { + *vp = v + } + } else { + fastpathTV.DecSliceFloat32V(rv2i(rv).([]float32), !array, d) + } +} + +func (f fastpathT) DecSliceFloat32X(vp *[]float32, d *Decoder) { + if v, changed := f.DecSliceFloat32V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecSliceFloat32V(v []float32, canChange bool, d *Decoder) (_ []float32, changed bool) { + dd := d.d + + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []float32{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + + hasLen := containerLenS > 0 + var xlen int + if hasLen && canChange { + if containerLenS > cap(v) { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 4) + if xlen <= cap(v) { + v = v[:xlen] + } else { + v = make([]float32, xlen) + } + changed = true + } else if containerLenS != len(v) { + v = v[:containerLenS] + changed = true + } + } + j := 0 + for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { + if j == 0 && len(v) == 0 { + if hasLen { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 4) + } else { + xlen = 8 + } + v = make([]float32, xlen) + changed = true + } + // if indefinite, etc, then expand the slice if necessary + var decodeIntoBlank bool + if j >= len(v) { + if canChange { + v = append(v, 0) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + decodeIntoBlank = true + } + } + slh.ElemContainerState(j) + if decodeIntoBlank { + d.swallow() + } else { + v[j] = float32(dd.DecodeFloat(true)) + } + } + if canChange { + if j < len(v) { + v = v[:j] + changed = true + } else if j == 0 && v == nil { + v = make([]float32, 0) + changed = true + } + } + slh.End() + return v, changed +} + +func (d *Decoder) fastpathDecSliceFloat64R(f *codecFnInfo, rv reflect.Value) { + if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { + var vp = rv2i(rv).(*[]float64) + if v, changed := fastpathTV.DecSliceFloat64V(*vp, !array, d); changed { + *vp = v + } + } else { + fastpathTV.DecSliceFloat64V(rv2i(rv).([]float64), !array, d) + } +} + +func (f fastpathT) DecSliceFloat64X(vp *[]float64, d *Decoder) { + if v, changed := f.DecSliceFloat64V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecSliceFloat64V(v []float64, canChange bool, d *Decoder) (_ []float64, changed bool) { + dd := d.d + + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []float64{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + + hasLen := containerLenS > 0 + var xlen int + if hasLen && canChange { + if containerLenS > cap(v) { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8) + if xlen <= cap(v) { + v = v[:xlen] + } else { + v = make([]float64, xlen) + } + changed = true + } else if containerLenS != len(v) { + v = v[:containerLenS] + changed = true + } + } + j := 0 + for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { + if j == 0 && len(v) == 0 { + if hasLen { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8) + } else { + xlen = 8 + } + v = make([]float64, xlen) + changed = true + } + // if indefinite, etc, then expand the slice if necessary + var decodeIntoBlank bool + if j >= len(v) { + if canChange { + v = append(v, 0) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + decodeIntoBlank = true + } + } + slh.ElemContainerState(j) + if decodeIntoBlank { + d.swallow() + } else { + v[j] = dd.DecodeFloat(false) + } + } + if canChange { + if j < len(v) { + v = v[:j] + changed = true + } else if j == 0 && v == nil { + v = make([]float64, 0) + changed = true + } + } + slh.End() + return v, changed +} + +func (d *Decoder) fastpathDecSliceUintR(f *codecFnInfo, rv reflect.Value) { + if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { + var vp = rv2i(rv).(*[]uint) + if v, changed := fastpathTV.DecSliceUintV(*vp, !array, d); changed { + *vp = v + } + } else { + fastpathTV.DecSliceUintV(rv2i(rv).([]uint), !array, d) + } +} + +func (f fastpathT) DecSliceUintX(vp *[]uint, d *Decoder) { + if v, changed := f.DecSliceUintV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecSliceUintV(v []uint, canChange bool, d *Decoder) (_ []uint, changed bool) { + dd := d.d + + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []uint{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + + hasLen := containerLenS > 0 + var xlen int + if hasLen && canChange { + if containerLenS > cap(v) { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8) + if xlen <= cap(v) { + v = v[:xlen] + } else { + v = make([]uint, xlen) + } + changed = true + } else if containerLenS != len(v) { + v = v[:containerLenS] + changed = true + } + } + j := 0 + for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { + if j == 0 && len(v) == 0 { + if hasLen { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8) + } else { + xlen = 8 + } + v = make([]uint, xlen) + changed = true + } + // if indefinite, etc, then expand the slice if necessary + var decodeIntoBlank bool + if j >= len(v) { + if canChange { + v = append(v, 0) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + decodeIntoBlank = true + } + } + slh.ElemContainerState(j) + if decodeIntoBlank { + d.swallow() + } else { + v[j] = uint(dd.DecodeUint(uintBitsize)) + } + } + if canChange { + if j < len(v) { + v = v[:j] + changed = true + } else if j == 0 && v == nil { + v = make([]uint, 0) + changed = true + } + } + slh.End() + return v, changed +} + +func (d *Decoder) fastpathDecSliceUint16R(f *codecFnInfo, rv reflect.Value) { + if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { + var vp = rv2i(rv).(*[]uint16) + if v, changed := fastpathTV.DecSliceUint16V(*vp, !array, d); changed { + *vp = v + } + } else { + fastpathTV.DecSliceUint16V(rv2i(rv).([]uint16), !array, d) + } +} + +func (f fastpathT) DecSliceUint16X(vp *[]uint16, d *Decoder) { + if v, changed := f.DecSliceUint16V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecSliceUint16V(v []uint16, canChange bool, d *Decoder) (_ []uint16, changed bool) { + dd := d.d + + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []uint16{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + + hasLen := containerLenS > 0 + var xlen int + if hasLen && canChange { + if containerLenS > cap(v) { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 2) + if xlen <= cap(v) { + v = v[:xlen] + } else { + v = make([]uint16, xlen) + } + changed = true + } else if containerLenS != len(v) { + v = v[:containerLenS] + changed = true + } + } + j := 0 + for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { + if j == 0 && len(v) == 0 { + if hasLen { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 2) + } else { + xlen = 8 + } + v = make([]uint16, xlen) + changed = true + } + // if indefinite, etc, then expand the slice if necessary + var decodeIntoBlank bool + if j >= len(v) { + if canChange { + v = append(v, 0) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + decodeIntoBlank = true + } + } + slh.ElemContainerState(j) + if decodeIntoBlank { + d.swallow() + } else { + v[j] = uint16(dd.DecodeUint(16)) + } + } + if canChange { + if j < len(v) { + v = v[:j] + changed = true + } else if j == 0 && v == nil { + v = make([]uint16, 0) + changed = true + } + } + slh.End() + return v, changed +} + +func (d *Decoder) fastpathDecSliceUint32R(f *codecFnInfo, rv reflect.Value) { + if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { + var vp = rv2i(rv).(*[]uint32) + if v, changed := fastpathTV.DecSliceUint32V(*vp, !array, d); changed { + *vp = v + } + } else { + fastpathTV.DecSliceUint32V(rv2i(rv).([]uint32), !array, d) + } +} + +func (f fastpathT) DecSliceUint32X(vp *[]uint32, d *Decoder) { + if v, changed := f.DecSliceUint32V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecSliceUint32V(v []uint32, canChange bool, d *Decoder) (_ []uint32, changed bool) { + dd := d.d + + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []uint32{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + + hasLen := containerLenS > 0 + var xlen int + if hasLen && canChange { + if containerLenS > cap(v) { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 4) + if xlen <= cap(v) { + v = v[:xlen] + } else { + v = make([]uint32, xlen) + } + changed = true + } else if containerLenS != len(v) { + v = v[:containerLenS] + changed = true + } + } + j := 0 + for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { + if j == 0 && len(v) == 0 { + if hasLen { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 4) + } else { + xlen = 8 + } + v = make([]uint32, xlen) + changed = true + } + // if indefinite, etc, then expand the slice if necessary + var decodeIntoBlank bool + if j >= len(v) { + if canChange { + v = append(v, 0) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + decodeIntoBlank = true + } + } + slh.ElemContainerState(j) + if decodeIntoBlank { + d.swallow() + } else { + v[j] = uint32(dd.DecodeUint(32)) + } + } + if canChange { + if j < len(v) { + v = v[:j] + changed = true + } else if j == 0 && v == nil { + v = make([]uint32, 0) + changed = true + } + } + slh.End() + return v, changed +} + +func (d *Decoder) fastpathDecSliceUint64R(f *codecFnInfo, rv reflect.Value) { + if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { + var vp = rv2i(rv).(*[]uint64) + if v, changed := fastpathTV.DecSliceUint64V(*vp, !array, d); changed { + *vp = v + } + } else { + fastpathTV.DecSliceUint64V(rv2i(rv).([]uint64), !array, d) + } +} + +func (f fastpathT) DecSliceUint64X(vp *[]uint64, d *Decoder) { + if v, changed := f.DecSliceUint64V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecSliceUint64V(v []uint64, canChange bool, d *Decoder) (_ []uint64, changed bool) { + dd := d.d + + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []uint64{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + + hasLen := containerLenS > 0 + var xlen int + if hasLen && canChange { + if containerLenS > cap(v) { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8) + if xlen <= cap(v) { + v = v[:xlen] + } else { + v = make([]uint64, xlen) + } + changed = true + } else if containerLenS != len(v) { + v = v[:containerLenS] + changed = true + } + } + j := 0 + for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { + if j == 0 && len(v) == 0 { + if hasLen { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8) + } else { + xlen = 8 + } + v = make([]uint64, xlen) + changed = true + } + // if indefinite, etc, then expand the slice if necessary + var decodeIntoBlank bool + if j >= len(v) { + if canChange { + v = append(v, 0) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + decodeIntoBlank = true + } + } + slh.ElemContainerState(j) + if decodeIntoBlank { + d.swallow() + } else { + v[j] = dd.DecodeUint(64) + } + } + if canChange { + if j < len(v) { + v = v[:j] + changed = true + } else if j == 0 && v == nil { + v = make([]uint64, 0) + changed = true + } + } + slh.End() + return v, changed +} + +func (d *Decoder) fastpathDecSliceUintptrR(f *codecFnInfo, rv reflect.Value) { + if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { + var vp = rv2i(rv).(*[]uintptr) + if v, changed := fastpathTV.DecSliceUintptrV(*vp, !array, d); changed { + *vp = v + } + } else { + fastpathTV.DecSliceUintptrV(rv2i(rv).([]uintptr), !array, d) + } +} + +func (f fastpathT) DecSliceUintptrX(vp *[]uintptr, d *Decoder) { + if v, changed := f.DecSliceUintptrV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecSliceUintptrV(v []uintptr, canChange bool, d *Decoder) (_ []uintptr, changed bool) { + dd := d.d + + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []uintptr{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + + hasLen := containerLenS > 0 + var xlen int + if hasLen && canChange { + if containerLenS > cap(v) { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8) + if xlen <= cap(v) { + v = v[:xlen] + } else { + v = make([]uintptr, xlen) + } + changed = true + } else if containerLenS != len(v) { + v = v[:containerLenS] + changed = true + } + } + j := 0 + for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { + if j == 0 && len(v) == 0 { + if hasLen { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8) + } else { + xlen = 8 + } + v = make([]uintptr, xlen) + changed = true + } + // if indefinite, etc, then expand the slice if necessary + var decodeIntoBlank bool + if j >= len(v) { + if canChange { + v = append(v, 0) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + decodeIntoBlank = true + } + } + slh.ElemContainerState(j) + if decodeIntoBlank { + d.swallow() + } else { + v[j] = uintptr(dd.DecodeUint(uintBitsize)) + } + } + if canChange { + if j < len(v) { + v = v[:j] + changed = true + } else if j == 0 && v == nil { + v = make([]uintptr, 0) + changed = true + } + } + slh.End() + return v, changed +} + +func (d *Decoder) fastpathDecSliceIntR(f *codecFnInfo, rv reflect.Value) { + if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { + var vp = rv2i(rv).(*[]int) + if v, changed := fastpathTV.DecSliceIntV(*vp, !array, d); changed { + *vp = v + } + } else { + fastpathTV.DecSliceIntV(rv2i(rv).([]int), !array, d) + } +} + +func (f fastpathT) DecSliceIntX(vp *[]int, d *Decoder) { + if v, changed := f.DecSliceIntV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecSliceIntV(v []int, canChange bool, d *Decoder) (_ []int, changed bool) { + dd := d.d + + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []int{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + + hasLen := containerLenS > 0 + var xlen int + if hasLen && canChange { + if containerLenS > cap(v) { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8) + if xlen <= cap(v) { + v = v[:xlen] + } else { + v = make([]int, xlen) + } + changed = true + } else if containerLenS != len(v) { + v = v[:containerLenS] + changed = true + } + } + j := 0 + for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { + if j == 0 && len(v) == 0 { + if hasLen { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8) + } else { + xlen = 8 + } + v = make([]int, xlen) + changed = true + } + // if indefinite, etc, then expand the slice if necessary + var decodeIntoBlank bool + if j >= len(v) { + if canChange { + v = append(v, 0) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + decodeIntoBlank = true + } + } + slh.ElemContainerState(j) + if decodeIntoBlank { + d.swallow() + } else { + v[j] = int(dd.DecodeInt(intBitsize)) + } + } + if canChange { + if j < len(v) { + v = v[:j] + changed = true + } else if j == 0 && v == nil { + v = make([]int, 0) + changed = true + } + } + slh.End() + return v, changed +} + +func (d *Decoder) fastpathDecSliceInt8R(f *codecFnInfo, rv reflect.Value) { + if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { + var vp = rv2i(rv).(*[]int8) + if v, changed := fastpathTV.DecSliceInt8V(*vp, !array, d); changed { + *vp = v + } + } else { + fastpathTV.DecSliceInt8V(rv2i(rv).([]int8), !array, d) + } +} + +func (f fastpathT) DecSliceInt8X(vp *[]int8, d *Decoder) { + if v, changed := f.DecSliceInt8V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecSliceInt8V(v []int8, canChange bool, d *Decoder) (_ []int8, changed bool) { + dd := d.d + + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []int8{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + + hasLen := containerLenS > 0 + var xlen int + if hasLen && canChange { + if containerLenS > cap(v) { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 1) + if xlen <= cap(v) { + v = v[:xlen] + } else { + v = make([]int8, xlen) + } + changed = true + } else if containerLenS != len(v) { + v = v[:containerLenS] + changed = true + } + } + j := 0 + for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { + if j == 0 && len(v) == 0 { + if hasLen { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 1) + } else { + xlen = 8 + } + v = make([]int8, xlen) + changed = true + } + // if indefinite, etc, then expand the slice if necessary + var decodeIntoBlank bool + if j >= len(v) { + if canChange { + v = append(v, 0) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + decodeIntoBlank = true + } + } + slh.ElemContainerState(j) + if decodeIntoBlank { + d.swallow() + } else { + v[j] = int8(dd.DecodeInt(8)) + } + } + if canChange { + if j < len(v) { + v = v[:j] + changed = true + } else if j == 0 && v == nil { + v = make([]int8, 0) + changed = true + } + } + slh.End() + return v, changed +} + +func (d *Decoder) fastpathDecSliceInt16R(f *codecFnInfo, rv reflect.Value) { + if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { + var vp = rv2i(rv).(*[]int16) + if v, changed := fastpathTV.DecSliceInt16V(*vp, !array, d); changed { + *vp = v + } + } else { + fastpathTV.DecSliceInt16V(rv2i(rv).([]int16), !array, d) + } +} + +func (f fastpathT) DecSliceInt16X(vp *[]int16, d *Decoder) { + if v, changed := f.DecSliceInt16V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecSliceInt16V(v []int16, canChange bool, d *Decoder) (_ []int16, changed bool) { + dd := d.d + + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []int16{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + + hasLen := containerLenS > 0 + var xlen int + if hasLen && canChange { + if containerLenS > cap(v) { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 2) + if xlen <= cap(v) { + v = v[:xlen] + } else { + v = make([]int16, xlen) + } + changed = true + } else if containerLenS != len(v) { + v = v[:containerLenS] + changed = true + } + } + j := 0 + for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { + if j == 0 && len(v) == 0 { + if hasLen { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 2) + } else { + xlen = 8 + } + v = make([]int16, xlen) + changed = true + } + // if indefinite, etc, then expand the slice if necessary + var decodeIntoBlank bool + if j >= len(v) { + if canChange { + v = append(v, 0) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + decodeIntoBlank = true + } + } + slh.ElemContainerState(j) + if decodeIntoBlank { + d.swallow() + } else { + v[j] = int16(dd.DecodeInt(16)) + } + } + if canChange { + if j < len(v) { + v = v[:j] + changed = true + } else if j == 0 && v == nil { + v = make([]int16, 0) + changed = true + } + } + slh.End() + return v, changed +} + +func (d *Decoder) fastpathDecSliceInt32R(f *codecFnInfo, rv reflect.Value) { + if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { + var vp = rv2i(rv).(*[]int32) + if v, changed := fastpathTV.DecSliceInt32V(*vp, !array, d); changed { + *vp = v + } + } else { + fastpathTV.DecSliceInt32V(rv2i(rv).([]int32), !array, d) + } +} + +func (f fastpathT) DecSliceInt32X(vp *[]int32, d *Decoder) { + if v, changed := f.DecSliceInt32V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecSliceInt32V(v []int32, canChange bool, d *Decoder) (_ []int32, changed bool) { + dd := d.d + + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []int32{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + + hasLen := containerLenS > 0 + var xlen int + if hasLen && canChange { + if containerLenS > cap(v) { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 4) + if xlen <= cap(v) { + v = v[:xlen] + } else { + v = make([]int32, xlen) + } + changed = true + } else if containerLenS != len(v) { + v = v[:containerLenS] + changed = true + } + } + j := 0 + for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { + if j == 0 && len(v) == 0 { + if hasLen { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 4) + } else { + xlen = 8 + } + v = make([]int32, xlen) + changed = true + } + // if indefinite, etc, then expand the slice if necessary + var decodeIntoBlank bool + if j >= len(v) { + if canChange { + v = append(v, 0) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + decodeIntoBlank = true + } + } + slh.ElemContainerState(j) + if decodeIntoBlank { + d.swallow() + } else { + v[j] = int32(dd.DecodeInt(32)) + } + } + if canChange { + if j < len(v) { + v = v[:j] + changed = true + } else if j == 0 && v == nil { + v = make([]int32, 0) + changed = true + } + } + slh.End() + return v, changed +} + +func (d *Decoder) fastpathDecSliceInt64R(f *codecFnInfo, rv reflect.Value) { + if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { + var vp = rv2i(rv).(*[]int64) + if v, changed := fastpathTV.DecSliceInt64V(*vp, !array, d); changed { + *vp = v + } + } else { + fastpathTV.DecSliceInt64V(rv2i(rv).([]int64), !array, d) + } +} + +func (f fastpathT) DecSliceInt64X(vp *[]int64, d *Decoder) { + if v, changed := f.DecSliceInt64V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecSliceInt64V(v []int64, canChange bool, d *Decoder) (_ []int64, changed bool) { + dd := d.d + + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []int64{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + + hasLen := containerLenS > 0 + var xlen int + if hasLen && canChange { + if containerLenS > cap(v) { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8) + if xlen <= cap(v) { + v = v[:xlen] + } else { + v = make([]int64, xlen) + } + changed = true + } else if containerLenS != len(v) { + v = v[:containerLenS] + changed = true + } + } + j := 0 + for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { + if j == 0 && len(v) == 0 { + if hasLen { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8) + } else { + xlen = 8 + } + v = make([]int64, xlen) + changed = true + } + // if indefinite, etc, then expand the slice if necessary + var decodeIntoBlank bool + if j >= len(v) { + if canChange { + v = append(v, 0) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + decodeIntoBlank = true + } + } + slh.ElemContainerState(j) + if decodeIntoBlank { + d.swallow() + } else { + v[j] = dd.DecodeInt(64) + } + } + if canChange { + if j < len(v) { + v = v[:j] + changed = true + } else if j == 0 && v == nil { + v = make([]int64, 0) + changed = true + } + } + slh.End() + return v, changed +} + +func (d *Decoder) fastpathDecSliceBoolR(f *codecFnInfo, rv reflect.Value) { + if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { + var vp = rv2i(rv).(*[]bool) + if v, changed := fastpathTV.DecSliceBoolV(*vp, !array, d); changed { + *vp = v + } + } else { + fastpathTV.DecSliceBoolV(rv2i(rv).([]bool), !array, d) + } +} + +func (f fastpathT) DecSliceBoolX(vp *[]bool, d *Decoder) { + if v, changed := f.DecSliceBoolV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecSliceBoolV(v []bool, canChange bool, d *Decoder) (_ []bool, changed bool) { + dd := d.d + + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []bool{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + + hasLen := containerLenS > 0 + var xlen int + if hasLen && canChange { + if containerLenS > cap(v) { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 1) + if xlen <= cap(v) { + v = v[:xlen] + } else { + v = make([]bool, xlen) + } + changed = true + } else if containerLenS != len(v) { + v = v[:containerLenS] + changed = true + } + } + j := 0 + for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { + if j == 0 && len(v) == 0 { + if hasLen { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 1) + } else { + xlen = 8 + } + v = make([]bool, xlen) + changed = true + } + // if indefinite, etc, then expand the slice if necessary + var decodeIntoBlank bool + if j >= len(v) { + if canChange { + v = append(v, false) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + decodeIntoBlank = true + } + } + slh.ElemContainerState(j) + if decodeIntoBlank { + d.swallow() + } else { + v[j] = dd.DecodeBool() + } + } + if canChange { + if j < len(v) { + v = v[:j] + changed = true + } else if j == 0 && v == nil { + v = make([]bool, 0) + changed = true + } + } + slh.End() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntfIntfR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[interface{}]interface{}) + if v, changed := fastpathTV.DecMapIntfIntfV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapIntfIntfV(rv2i(rv).(map[interface{}]interface{}), false, d) +} +func (f fastpathT) DecMapIntfIntfX(vp *map[interface{}]interface{}, d *Decoder) { + if v, changed := f.DecMapIntfIntfV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfIntfV(v map[interface{}]interface{}, canChange bool, + d *Decoder) (_ map[interface{}]interface{}, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 32) + v = make(map[interface{}]interface{}, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + var mk interface{} + var mv interface{} + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = nil + } + continue + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntfStringR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[interface{}]string) + if v, changed := fastpathTV.DecMapIntfStringV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapIntfStringV(rv2i(rv).(map[interface{}]string), false, d) +} +func (f fastpathT) DecMapIntfStringX(vp *map[interface{}]string, d *Decoder) { + if v, changed := f.DecMapIntfStringV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfStringV(v map[interface{}]string, canChange bool, + d *Decoder) (_ map[interface{}]string, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 32) + v = make(map[interface{}]string, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk interface{} + var mv string + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = "" + } + continue + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntfUintR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[interface{}]uint) + if v, changed := fastpathTV.DecMapIntfUintV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapIntfUintV(rv2i(rv).(map[interface{}]uint), false, d) +} +func (f fastpathT) DecMapIntfUintX(vp *map[interface{}]uint, d *Decoder) { + if v, changed := f.DecMapIntfUintV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfUintV(v map[interface{}]uint, canChange bool, + d *Decoder) (_ map[interface{}]uint, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[interface{}]uint, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk interface{} + var mv uint + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntfUint8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[interface{}]uint8) + if v, changed := fastpathTV.DecMapIntfUint8V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapIntfUint8V(rv2i(rv).(map[interface{}]uint8), false, d) +} +func (f fastpathT) DecMapIntfUint8X(vp *map[interface{}]uint8, d *Decoder) { + if v, changed := f.DecMapIntfUint8V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfUint8V(v map[interface{}]uint8, canChange bool, + d *Decoder) (_ map[interface{}]uint8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 17) + v = make(map[interface{}]uint8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk interface{} + var mv uint8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntfUint16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[interface{}]uint16) + if v, changed := fastpathTV.DecMapIntfUint16V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapIntfUint16V(rv2i(rv).(map[interface{}]uint16), false, d) +} +func (f fastpathT) DecMapIntfUint16X(vp *map[interface{}]uint16, d *Decoder) { + if v, changed := f.DecMapIntfUint16V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfUint16V(v map[interface{}]uint16, canChange bool, + d *Decoder) (_ map[interface{}]uint16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 18) + v = make(map[interface{}]uint16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk interface{} + var mv uint16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntfUint32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[interface{}]uint32) + if v, changed := fastpathTV.DecMapIntfUint32V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapIntfUint32V(rv2i(rv).(map[interface{}]uint32), false, d) +} +func (f fastpathT) DecMapIntfUint32X(vp *map[interface{}]uint32, d *Decoder) { + if v, changed := f.DecMapIntfUint32V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfUint32V(v map[interface{}]uint32, canChange bool, + d *Decoder) (_ map[interface{}]uint32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 20) + v = make(map[interface{}]uint32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk interface{} + var mv uint32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntfUint64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[interface{}]uint64) + if v, changed := fastpathTV.DecMapIntfUint64V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapIntfUint64V(rv2i(rv).(map[interface{}]uint64), false, d) +} +func (f fastpathT) DecMapIntfUint64X(vp *map[interface{}]uint64, d *Decoder) { + if v, changed := f.DecMapIntfUint64V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfUint64V(v map[interface{}]uint64, canChange bool, + d *Decoder) (_ map[interface{}]uint64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[interface{}]uint64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk interface{} + var mv uint64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntfUintptrR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[interface{}]uintptr) + if v, changed := fastpathTV.DecMapIntfUintptrV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapIntfUintptrV(rv2i(rv).(map[interface{}]uintptr), false, d) +} +func (f fastpathT) DecMapIntfUintptrX(vp *map[interface{}]uintptr, d *Decoder) { + if v, changed := f.DecMapIntfUintptrV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfUintptrV(v map[interface{}]uintptr, canChange bool, + d *Decoder) (_ map[interface{}]uintptr, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[interface{}]uintptr, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk interface{} + var mv uintptr + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntfIntR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[interface{}]int) + if v, changed := fastpathTV.DecMapIntfIntV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapIntfIntV(rv2i(rv).(map[interface{}]int), false, d) +} +func (f fastpathT) DecMapIntfIntX(vp *map[interface{}]int, d *Decoder) { + if v, changed := f.DecMapIntfIntV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfIntV(v map[interface{}]int, canChange bool, + d *Decoder) (_ map[interface{}]int, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[interface{}]int, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk interface{} + var mv int + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntfInt8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[interface{}]int8) + if v, changed := fastpathTV.DecMapIntfInt8V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapIntfInt8V(rv2i(rv).(map[interface{}]int8), false, d) +} +func (f fastpathT) DecMapIntfInt8X(vp *map[interface{}]int8, d *Decoder) { + if v, changed := f.DecMapIntfInt8V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfInt8V(v map[interface{}]int8, canChange bool, + d *Decoder) (_ map[interface{}]int8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 17) + v = make(map[interface{}]int8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk interface{} + var mv int8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntfInt16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[interface{}]int16) + if v, changed := fastpathTV.DecMapIntfInt16V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapIntfInt16V(rv2i(rv).(map[interface{}]int16), false, d) +} +func (f fastpathT) DecMapIntfInt16X(vp *map[interface{}]int16, d *Decoder) { + if v, changed := f.DecMapIntfInt16V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfInt16V(v map[interface{}]int16, canChange bool, + d *Decoder) (_ map[interface{}]int16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 18) + v = make(map[interface{}]int16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk interface{} + var mv int16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntfInt32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[interface{}]int32) + if v, changed := fastpathTV.DecMapIntfInt32V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapIntfInt32V(rv2i(rv).(map[interface{}]int32), false, d) +} +func (f fastpathT) DecMapIntfInt32X(vp *map[interface{}]int32, d *Decoder) { + if v, changed := f.DecMapIntfInt32V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfInt32V(v map[interface{}]int32, canChange bool, + d *Decoder) (_ map[interface{}]int32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 20) + v = make(map[interface{}]int32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk interface{} + var mv int32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntfInt64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[interface{}]int64) + if v, changed := fastpathTV.DecMapIntfInt64V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapIntfInt64V(rv2i(rv).(map[interface{}]int64), false, d) +} +func (f fastpathT) DecMapIntfInt64X(vp *map[interface{}]int64, d *Decoder) { + if v, changed := f.DecMapIntfInt64V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfInt64V(v map[interface{}]int64, canChange bool, + d *Decoder) (_ map[interface{}]int64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[interface{}]int64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk interface{} + var mv int64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntfFloat32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[interface{}]float32) + if v, changed := fastpathTV.DecMapIntfFloat32V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapIntfFloat32V(rv2i(rv).(map[interface{}]float32), false, d) +} +func (f fastpathT) DecMapIntfFloat32X(vp *map[interface{}]float32, d *Decoder) { + if v, changed := f.DecMapIntfFloat32V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfFloat32V(v map[interface{}]float32, canChange bool, + d *Decoder) (_ map[interface{}]float32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 20) + v = make(map[interface{}]float32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk interface{} + var mv float32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntfFloat64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[interface{}]float64) + if v, changed := fastpathTV.DecMapIntfFloat64V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapIntfFloat64V(rv2i(rv).(map[interface{}]float64), false, d) +} +func (f fastpathT) DecMapIntfFloat64X(vp *map[interface{}]float64, d *Decoder) { + if v, changed := f.DecMapIntfFloat64V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfFloat64V(v map[interface{}]float64, canChange bool, + d *Decoder) (_ map[interface{}]float64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[interface{}]float64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk interface{} + var mv float64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntfBoolR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[interface{}]bool) + if v, changed := fastpathTV.DecMapIntfBoolV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapIntfBoolV(rv2i(rv).(map[interface{}]bool), false, d) +} +func (f fastpathT) DecMapIntfBoolX(vp *map[interface{}]bool, d *Decoder) { + if v, changed := f.DecMapIntfBoolV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfBoolV(v map[interface{}]bool, canChange bool, + d *Decoder) (_ map[interface{}]bool, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 17) + v = make(map[interface{}]bool, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk interface{} + var mv bool + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = false + } + continue + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapStringIntfR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[string]interface{}) + if v, changed := fastpathTV.DecMapStringIntfV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapStringIntfV(rv2i(rv).(map[string]interface{}), false, d) +} +func (f fastpathT) DecMapStringIntfX(vp *map[string]interface{}, d *Decoder) { + if v, changed := f.DecMapStringIntfV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringIntfV(v map[string]interface{}, canChange bool, + d *Decoder) (_ map[string]interface{}, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 32) + v = make(map[string]interface{}, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + var mk string + var mv interface{} + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeString() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = nil + } + continue + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapStringStringR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[string]string) + if v, changed := fastpathTV.DecMapStringStringV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapStringStringV(rv2i(rv).(map[string]string), false, d) +} +func (f fastpathT) DecMapStringStringX(vp *map[string]string, d *Decoder) { + if v, changed := f.DecMapStringStringV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringStringV(v map[string]string, canChange bool, + d *Decoder) (_ map[string]string, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 32) + v = make(map[string]string, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk string + var mv string + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeString() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = "" + } + continue + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapStringUintR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[string]uint) + if v, changed := fastpathTV.DecMapStringUintV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapStringUintV(rv2i(rv).(map[string]uint), false, d) +} +func (f fastpathT) DecMapStringUintX(vp *map[string]uint, d *Decoder) { + if v, changed := f.DecMapStringUintV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringUintV(v map[string]uint, canChange bool, + d *Decoder) (_ map[string]uint, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[string]uint, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk string + var mv uint + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeString() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapStringUint8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[string]uint8) + if v, changed := fastpathTV.DecMapStringUint8V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapStringUint8V(rv2i(rv).(map[string]uint8), false, d) +} +func (f fastpathT) DecMapStringUint8X(vp *map[string]uint8, d *Decoder) { + if v, changed := f.DecMapStringUint8V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringUint8V(v map[string]uint8, canChange bool, + d *Decoder) (_ map[string]uint8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 17) + v = make(map[string]uint8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk string + var mv uint8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeString() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapStringUint16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[string]uint16) + if v, changed := fastpathTV.DecMapStringUint16V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapStringUint16V(rv2i(rv).(map[string]uint16), false, d) +} +func (f fastpathT) DecMapStringUint16X(vp *map[string]uint16, d *Decoder) { + if v, changed := f.DecMapStringUint16V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringUint16V(v map[string]uint16, canChange bool, + d *Decoder) (_ map[string]uint16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 18) + v = make(map[string]uint16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk string + var mv uint16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeString() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapStringUint32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[string]uint32) + if v, changed := fastpathTV.DecMapStringUint32V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapStringUint32V(rv2i(rv).(map[string]uint32), false, d) +} +func (f fastpathT) DecMapStringUint32X(vp *map[string]uint32, d *Decoder) { + if v, changed := f.DecMapStringUint32V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringUint32V(v map[string]uint32, canChange bool, + d *Decoder) (_ map[string]uint32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 20) + v = make(map[string]uint32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk string + var mv uint32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeString() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapStringUint64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[string]uint64) + if v, changed := fastpathTV.DecMapStringUint64V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapStringUint64V(rv2i(rv).(map[string]uint64), false, d) +} +func (f fastpathT) DecMapStringUint64X(vp *map[string]uint64, d *Decoder) { + if v, changed := f.DecMapStringUint64V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringUint64V(v map[string]uint64, canChange bool, + d *Decoder) (_ map[string]uint64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[string]uint64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk string + var mv uint64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeString() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapStringUintptrR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[string]uintptr) + if v, changed := fastpathTV.DecMapStringUintptrV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapStringUintptrV(rv2i(rv).(map[string]uintptr), false, d) +} +func (f fastpathT) DecMapStringUintptrX(vp *map[string]uintptr, d *Decoder) { + if v, changed := f.DecMapStringUintptrV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringUintptrV(v map[string]uintptr, canChange bool, + d *Decoder) (_ map[string]uintptr, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[string]uintptr, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk string + var mv uintptr + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeString() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapStringIntR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[string]int) + if v, changed := fastpathTV.DecMapStringIntV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapStringIntV(rv2i(rv).(map[string]int), false, d) +} +func (f fastpathT) DecMapStringIntX(vp *map[string]int, d *Decoder) { + if v, changed := f.DecMapStringIntV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringIntV(v map[string]int, canChange bool, + d *Decoder) (_ map[string]int, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[string]int, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk string + var mv int + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeString() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapStringInt8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[string]int8) + if v, changed := fastpathTV.DecMapStringInt8V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapStringInt8V(rv2i(rv).(map[string]int8), false, d) +} +func (f fastpathT) DecMapStringInt8X(vp *map[string]int8, d *Decoder) { + if v, changed := f.DecMapStringInt8V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringInt8V(v map[string]int8, canChange bool, + d *Decoder) (_ map[string]int8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 17) + v = make(map[string]int8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk string + var mv int8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeString() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapStringInt16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[string]int16) + if v, changed := fastpathTV.DecMapStringInt16V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapStringInt16V(rv2i(rv).(map[string]int16), false, d) +} +func (f fastpathT) DecMapStringInt16X(vp *map[string]int16, d *Decoder) { + if v, changed := f.DecMapStringInt16V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringInt16V(v map[string]int16, canChange bool, + d *Decoder) (_ map[string]int16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 18) + v = make(map[string]int16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk string + var mv int16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeString() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapStringInt32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[string]int32) + if v, changed := fastpathTV.DecMapStringInt32V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapStringInt32V(rv2i(rv).(map[string]int32), false, d) +} +func (f fastpathT) DecMapStringInt32X(vp *map[string]int32, d *Decoder) { + if v, changed := f.DecMapStringInt32V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringInt32V(v map[string]int32, canChange bool, + d *Decoder) (_ map[string]int32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 20) + v = make(map[string]int32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk string + var mv int32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeString() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapStringInt64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[string]int64) + if v, changed := fastpathTV.DecMapStringInt64V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapStringInt64V(rv2i(rv).(map[string]int64), false, d) +} +func (f fastpathT) DecMapStringInt64X(vp *map[string]int64, d *Decoder) { + if v, changed := f.DecMapStringInt64V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringInt64V(v map[string]int64, canChange bool, + d *Decoder) (_ map[string]int64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[string]int64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk string + var mv int64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeString() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapStringFloat32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[string]float32) + if v, changed := fastpathTV.DecMapStringFloat32V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapStringFloat32V(rv2i(rv).(map[string]float32), false, d) +} +func (f fastpathT) DecMapStringFloat32X(vp *map[string]float32, d *Decoder) { + if v, changed := f.DecMapStringFloat32V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringFloat32V(v map[string]float32, canChange bool, + d *Decoder) (_ map[string]float32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 20) + v = make(map[string]float32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk string + var mv float32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeString() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapStringFloat64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[string]float64) + if v, changed := fastpathTV.DecMapStringFloat64V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapStringFloat64V(rv2i(rv).(map[string]float64), false, d) +} +func (f fastpathT) DecMapStringFloat64X(vp *map[string]float64, d *Decoder) { + if v, changed := f.DecMapStringFloat64V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringFloat64V(v map[string]float64, canChange bool, + d *Decoder) (_ map[string]float64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[string]float64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk string + var mv float64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeString() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapStringBoolR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[string]bool) + if v, changed := fastpathTV.DecMapStringBoolV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapStringBoolV(rv2i(rv).(map[string]bool), false, d) +} +func (f fastpathT) DecMapStringBoolX(vp *map[string]bool, d *Decoder) { + if v, changed := f.DecMapStringBoolV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringBoolV(v map[string]bool, canChange bool, + d *Decoder) (_ map[string]bool, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 17) + v = make(map[string]bool, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk string + var mv bool + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeString() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = false + } + continue + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat32IntfR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float32]interface{}) + if v, changed := fastpathTV.DecMapFloat32IntfV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapFloat32IntfV(rv2i(rv).(map[float32]interface{}), false, d) +} +func (f fastpathT) DecMapFloat32IntfX(vp *map[float32]interface{}, d *Decoder) { + if v, changed := f.DecMapFloat32IntfV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32IntfV(v map[float32]interface{}, canChange bool, + d *Decoder) (_ map[float32]interface{}, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 20) + v = make(map[float32]interface{}, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + var mk float32 + var mv interface{} + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = float32(dd.DecodeFloat(true)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = nil + } + continue + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat32StringR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float32]string) + if v, changed := fastpathTV.DecMapFloat32StringV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapFloat32StringV(rv2i(rv).(map[float32]string), false, d) +} +func (f fastpathT) DecMapFloat32StringX(vp *map[float32]string, d *Decoder) { + if v, changed := f.DecMapFloat32StringV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32StringV(v map[float32]string, canChange bool, + d *Decoder) (_ map[float32]string, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 20) + v = make(map[float32]string, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk float32 + var mv string + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = float32(dd.DecodeFloat(true)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = "" + } + continue + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat32UintR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float32]uint) + if v, changed := fastpathTV.DecMapFloat32UintV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapFloat32UintV(rv2i(rv).(map[float32]uint), false, d) +} +func (f fastpathT) DecMapFloat32UintX(vp *map[float32]uint, d *Decoder) { + if v, changed := f.DecMapFloat32UintV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32UintV(v map[float32]uint, canChange bool, + d *Decoder) (_ map[float32]uint, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[float32]uint, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk float32 + var mv uint + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = float32(dd.DecodeFloat(true)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat32Uint8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float32]uint8) + if v, changed := fastpathTV.DecMapFloat32Uint8V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapFloat32Uint8V(rv2i(rv).(map[float32]uint8), false, d) +} +func (f fastpathT) DecMapFloat32Uint8X(vp *map[float32]uint8, d *Decoder) { + if v, changed := f.DecMapFloat32Uint8V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32Uint8V(v map[float32]uint8, canChange bool, + d *Decoder) (_ map[float32]uint8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[float32]uint8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk float32 + var mv uint8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = float32(dd.DecodeFloat(true)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat32Uint16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float32]uint16) + if v, changed := fastpathTV.DecMapFloat32Uint16V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapFloat32Uint16V(rv2i(rv).(map[float32]uint16), false, d) +} +func (f fastpathT) DecMapFloat32Uint16X(vp *map[float32]uint16, d *Decoder) { + if v, changed := f.DecMapFloat32Uint16V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32Uint16V(v map[float32]uint16, canChange bool, + d *Decoder) (_ map[float32]uint16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 6) + v = make(map[float32]uint16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk float32 + var mv uint16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = float32(dd.DecodeFloat(true)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat32Uint32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float32]uint32) + if v, changed := fastpathTV.DecMapFloat32Uint32V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapFloat32Uint32V(rv2i(rv).(map[float32]uint32), false, d) +} +func (f fastpathT) DecMapFloat32Uint32X(vp *map[float32]uint32, d *Decoder) { + if v, changed := f.DecMapFloat32Uint32V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32Uint32V(v map[float32]uint32, canChange bool, + d *Decoder) (_ map[float32]uint32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 8) + v = make(map[float32]uint32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk float32 + var mv uint32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = float32(dd.DecodeFloat(true)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat32Uint64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float32]uint64) + if v, changed := fastpathTV.DecMapFloat32Uint64V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapFloat32Uint64V(rv2i(rv).(map[float32]uint64), false, d) +} +func (f fastpathT) DecMapFloat32Uint64X(vp *map[float32]uint64, d *Decoder) { + if v, changed := f.DecMapFloat32Uint64V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32Uint64V(v map[float32]uint64, canChange bool, + d *Decoder) (_ map[float32]uint64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[float32]uint64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk float32 + var mv uint64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = float32(dd.DecodeFloat(true)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat32UintptrR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float32]uintptr) + if v, changed := fastpathTV.DecMapFloat32UintptrV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapFloat32UintptrV(rv2i(rv).(map[float32]uintptr), false, d) +} +func (f fastpathT) DecMapFloat32UintptrX(vp *map[float32]uintptr, d *Decoder) { + if v, changed := f.DecMapFloat32UintptrV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32UintptrV(v map[float32]uintptr, canChange bool, + d *Decoder) (_ map[float32]uintptr, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[float32]uintptr, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk float32 + var mv uintptr + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = float32(dd.DecodeFloat(true)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat32IntR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float32]int) + if v, changed := fastpathTV.DecMapFloat32IntV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapFloat32IntV(rv2i(rv).(map[float32]int), false, d) +} +func (f fastpathT) DecMapFloat32IntX(vp *map[float32]int, d *Decoder) { + if v, changed := f.DecMapFloat32IntV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32IntV(v map[float32]int, canChange bool, + d *Decoder) (_ map[float32]int, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[float32]int, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk float32 + var mv int + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = float32(dd.DecodeFloat(true)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat32Int8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float32]int8) + if v, changed := fastpathTV.DecMapFloat32Int8V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapFloat32Int8V(rv2i(rv).(map[float32]int8), false, d) +} +func (f fastpathT) DecMapFloat32Int8X(vp *map[float32]int8, d *Decoder) { + if v, changed := f.DecMapFloat32Int8V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32Int8V(v map[float32]int8, canChange bool, + d *Decoder) (_ map[float32]int8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[float32]int8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk float32 + var mv int8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = float32(dd.DecodeFloat(true)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat32Int16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float32]int16) + if v, changed := fastpathTV.DecMapFloat32Int16V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapFloat32Int16V(rv2i(rv).(map[float32]int16), false, d) +} +func (f fastpathT) DecMapFloat32Int16X(vp *map[float32]int16, d *Decoder) { + if v, changed := f.DecMapFloat32Int16V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32Int16V(v map[float32]int16, canChange bool, + d *Decoder) (_ map[float32]int16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 6) + v = make(map[float32]int16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk float32 + var mv int16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = float32(dd.DecodeFloat(true)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat32Int32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float32]int32) + if v, changed := fastpathTV.DecMapFloat32Int32V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapFloat32Int32V(rv2i(rv).(map[float32]int32), false, d) +} +func (f fastpathT) DecMapFloat32Int32X(vp *map[float32]int32, d *Decoder) { + if v, changed := f.DecMapFloat32Int32V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32Int32V(v map[float32]int32, canChange bool, + d *Decoder) (_ map[float32]int32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 8) + v = make(map[float32]int32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk float32 + var mv int32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = float32(dd.DecodeFloat(true)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat32Int64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float32]int64) + if v, changed := fastpathTV.DecMapFloat32Int64V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapFloat32Int64V(rv2i(rv).(map[float32]int64), false, d) +} +func (f fastpathT) DecMapFloat32Int64X(vp *map[float32]int64, d *Decoder) { + if v, changed := f.DecMapFloat32Int64V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32Int64V(v map[float32]int64, canChange bool, + d *Decoder) (_ map[float32]int64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[float32]int64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk float32 + var mv int64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = float32(dd.DecodeFloat(true)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat32Float32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float32]float32) + if v, changed := fastpathTV.DecMapFloat32Float32V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapFloat32Float32V(rv2i(rv).(map[float32]float32), false, d) +} +func (f fastpathT) DecMapFloat32Float32X(vp *map[float32]float32, d *Decoder) { + if v, changed := f.DecMapFloat32Float32V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32Float32V(v map[float32]float32, canChange bool, + d *Decoder) (_ map[float32]float32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 8) + v = make(map[float32]float32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk float32 + var mv float32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = float32(dd.DecodeFloat(true)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat32Float64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float32]float64) + if v, changed := fastpathTV.DecMapFloat32Float64V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapFloat32Float64V(rv2i(rv).(map[float32]float64), false, d) +} +func (f fastpathT) DecMapFloat32Float64X(vp *map[float32]float64, d *Decoder) { + if v, changed := f.DecMapFloat32Float64V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32Float64V(v map[float32]float64, canChange bool, + d *Decoder) (_ map[float32]float64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[float32]float64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk float32 + var mv float64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = float32(dd.DecodeFloat(true)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat32BoolR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float32]bool) + if v, changed := fastpathTV.DecMapFloat32BoolV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapFloat32BoolV(rv2i(rv).(map[float32]bool), false, d) +} +func (f fastpathT) DecMapFloat32BoolX(vp *map[float32]bool, d *Decoder) { + if v, changed := f.DecMapFloat32BoolV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32BoolV(v map[float32]bool, canChange bool, + d *Decoder) (_ map[float32]bool, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[float32]bool, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk float32 + var mv bool + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = float32(dd.DecodeFloat(true)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = false + } + continue + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat64IntfR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float64]interface{}) + if v, changed := fastpathTV.DecMapFloat64IntfV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapFloat64IntfV(rv2i(rv).(map[float64]interface{}), false, d) +} +func (f fastpathT) DecMapFloat64IntfX(vp *map[float64]interface{}, d *Decoder) { + if v, changed := f.DecMapFloat64IntfV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64IntfV(v map[float64]interface{}, canChange bool, + d *Decoder) (_ map[float64]interface{}, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[float64]interface{}, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + var mk float64 + var mv interface{} + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeFloat(false) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = nil + } + continue + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat64StringR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float64]string) + if v, changed := fastpathTV.DecMapFloat64StringV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapFloat64StringV(rv2i(rv).(map[float64]string), false, d) +} +func (f fastpathT) DecMapFloat64StringX(vp *map[float64]string, d *Decoder) { + if v, changed := f.DecMapFloat64StringV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64StringV(v map[float64]string, canChange bool, + d *Decoder) (_ map[float64]string, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[float64]string, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk float64 + var mv string + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeFloat(false) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = "" + } + continue + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat64UintR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float64]uint) + if v, changed := fastpathTV.DecMapFloat64UintV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapFloat64UintV(rv2i(rv).(map[float64]uint), false, d) +} +func (f fastpathT) DecMapFloat64UintX(vp *map[float64]uint, d *Decoder) { + if v, changed := f.DecMapFloat64UintV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64UintV(v map[float64]uint, canChange bool, + d *Decoder) (_ map[float64]uint, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[float64]uint, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk float64 + var mv uint + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeFloat(false) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat64Uint8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float64]uint8) + if v, changed := fastpathTV.DecMapFloat64Uint8V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapFloat64Uint8V(rv2i(rv).(map[float64]uint8), false, d) +} +func (f fastpathT) DecMapFloat64Uint8X(vp *map[float64]uint8, d *Decoder) { + if v, changed := f.DecMapFloat64Uint8V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64Uint8V(v map[float64]uint8, canChange bool, + d *Decoder) (_ map[float64]uint8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[float64]uint8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk float64 + var mv uint8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeFloat(false) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat64Uint16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float64]uint16) + if v, changed := fastpathTV.DecMapFloat64Uint16V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapFloat64Uint16V(rv2i(rv).(map[float64]uint16), false, d) +} +func (f fastpathT) DecMapFloat64Uint16X(vp *map[float64]uint16, d *Decoder) { + if v, changed := f.DecMapFloat64Uint16V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64Uint16V(v map[float64]uint16, canChange bool, + d *Decoder) (_ map[float64]uint16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[float64]uint16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk float64 + var mv uint16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeFloat(false) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat64Uint32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float64]uint32) + if v, changed := fastpathTV.DecMapFloat64Uint32V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapFloat64Uint32V(rv2i(rv).(map[float64]uint32), false, d) +} +func (f fastpathT) DecMapFloat64Uint32X(vp *map[float64]uint32, d *Decoder) { + if v, changed := f.DecMapFloat64Uint32V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64Uint32V(v map[float64]uint32, canChange bool, + d *Decoder) (_ map[float64]uint32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[float64]uint32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk float64 + var mv uint32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeFloat(false) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat64Uint64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float64]uint64) + if v, changed := fastpathTV.DecMapFloat64Uint64V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapFloat64Uint64V(rv2i(rv).(map[float64]uint64), false, d) +} +func (f fastpathT) DecMapFloat64Uint64X(vp *map[float64]uint64, d *Decoder) { + if v, changed := f.DecMapFloat64Uint64V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64Uint64V(v map[float64]uint64, canChange bool, + d *Decoder) (_ map[float64]uint64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[float64]uint64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk float64 + var mv uint64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeFloat(false) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat64UintptrR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float64]uintptr) + if v, changed := fastpathTV.DecMapFloat64UintptrV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapFloat64UintptrV(rv2i(rv).(map[float64]uintptr), false, d) +} +func (f fastpathT) DecMapFloat64UintptrX(vp *map[float64]uintptr, d *Decoder) { + if v, changed := f.DecMapFloat64UintptrV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64UintptrV(v map[float64]uintptr, canChange bool, + d *Decoder) (_ map[float64]uintptr, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[float64]uintptr, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk float64 + var mv uintptr + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeFloat(false) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat64IntR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float64]int) + if v, changed := fastpathTV.DecMapFloat64IntV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapFloat64IntV(rv2i(rv).(map[float64]int), false, d) +} +func (f fastpathT) DecMapFloat64IntX(vp *map[float64]int, d *Decoder) { + if v, changed := f.DecMapFloat64IntV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64IntV(v map[float64]int, canChange bool, + d *Decoder) (_ map[float64]int, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[float64]int, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk float64 + var mv int + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeFloat(false) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat64Int8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float64]int8) + if v, changed := fastpathTV.DecMapFloat64Int8V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapFloat64Int8V(rv2i(rv).(map[float64]int8), false, d) +} +func (f fastpathT) DecMapFloat64Int8X(vp *map[float64]int8, d *Decoder) { + if v, changed := f.DecMapFloat64Int8V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64Int8V(v map[float64]int8, canChange bool, + d *Decoder) (_ map[float64]int8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[float64]int8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk float64 + var mv int8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeFloat(false) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat64Int16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float64]int16) + if v, changed := fastpathTV.DecMapFloat64Int16V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapFloat64Int16V(rv2i(rv).(map[float64]int16), false, d) +} +func (f fastpathT) DecMapFloat64Int16X(vp *map[float64]int16, d *Decoder) { + if v, changed := f.DecMapFloat64Int16V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64Int16V(v map[float64]int16, canChange bool, + d *Decoder) (_ map[float64]int16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[float64]int16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk float64 + var mv int16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeFloat(false) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat64Int32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float64]int32) + if v, changed := fastpathTV.DecMapFloat64Int32V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapFloat64Int32V(rv2i(rv).(map[float64]int32), false, d) +} +func (f fastpathT) DecMapFloat64Int32X(vp *map[float64]int32, d *Decoder) { + if v, changed := f.DecMapFloat64Int32V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64Int32V(v map[float64]int32, canChange bool, + d *Decoder) (_ map[float64]int32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[float64]int32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk float64 + var mv int32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeFloat(false) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat64Int64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float64]int64) + if v, changed := fastpathTV.DecMapFloat64Int64V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapFloat64Int64V(rv2i(rv).(map[float64]int64), false, d) +} +func (f fastpathT) DecMapFloat64Int64X(vp *map[float64]int64, d *Decoder) { + if v, changed := f.DecMapFloat64Int64V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64Int64V(v map[float64]int64, canChange bool, + d *Decoder) (_ map[float64]int64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[float64]int64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk float64 + var mv int64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeFloat(false) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat64Float32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float64]float32) + if v, changed := fastpathTV.DecMapFloat64Float32V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapFloat64Float32V(rv2i(rv).(map[float64]float32), false, d) +} +func (f fastpathT) DecMapFloat64Float32X(vp *map[float64]float32, d *Decoder) { + if v, changed := f.DecMapFloat64Float32V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64Float32V(v map[float64]float32, canChange bool, + d *Decoder) (_ map[float64]float32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[float64]float32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk float64 + var mv float32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeFloat(false) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat64Float64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float64]float64) + if v, changed := fastpathTV.DecMapFloat64Float64V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapFloat64Float64V(rv2i(rv).(map[float64]float64), false, d) +} +func (f fastpathT) DecMapFloat64Float64X(vp *map[float64]float64, d *Decoder) { + if v, changed := f.DecMapFloat64Float64V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64Float64V(v map[float64]float64, canChange bool, + d *Decoder) (_ map[float64]float64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[float64]float64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk float64 + var mv float64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeFloat(false) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat64BoolR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float64]bool) + if v, changed := fastpathTV.DecMapFloat64BoolV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapFloat64BoolV(rv2i(rv).(map[float64]bool), false, d) +} +func (f fastpathT) DecMapFloat64BoolX(vp *map[float64]bool, d *Decoder) { + if v, changed := f.DecMapFloat64BoolV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64BoolV(v map[float64]bool, canChange bool, + d *Decoder) (_ map[float64]bool, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[float64]bool, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk float64 + var mv bool + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeFloat(false) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = false + } + continue + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintIntfR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint]interface{}) + if v, changed := fastpathTV.DecMapUintIntfV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUintIntfV(rv2i(rv).(map[uint]interface{}), false, d) +} +func (f fastpathT) DecMapUintIntfX(vp *map[uint]interface{}, d *Decoder) { + if v, changed := f.DecMapUintIntfV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintIntfV(v map[uint]interface{}, canChange bool, + d *Decoder) (_ map[uint]interface{}, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[uint]interface{}, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + var mk uint + var mv interface{} + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint(dd.DecodeUint(uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = nil + } + continue + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintStringR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint]string) + if v, changed := fastpathTV.DecMapUintStringV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUintStringV(rv2i(rv).(map[uint]string), false, d) +} +func (f fastpathT) DecMapUintStringX(vp *map[uint]string, d *Decoder) { + if v, changed := f.DecMapUintStringV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintStringV(v map[uint]string, canChange bool, + d *Decoder) (_ map[uint]string, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[uint]string, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint + var mv string + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint(dd.DecodeUint(uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = "" + } + continue + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintUintR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint]uint) + if v, changed := fastpathTV.DecMapUintUintV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUintUintV(rv2i(rv).(map[uint]uint), false, d) +} +func (f fastpathT) DecMapUintUintX(vp *map[uint]uint, d *Decoder) { + if v, changed := f.DecMapUintUintV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintUintV(v map[uint]uint, canChange bool, + d *Decoder) (_ map[uint]uint, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uint]uint, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint + var mv uint + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint(dd.DecodeUint(uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintUint8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint]uint8) + if v, changed := fastpathTV.DecMapUintUint8V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUintUint8V(rv2i(rv).(map[uint]uint8), false, d) +} +func (f fastpathT) DecMapUintUint8X(vp *map[uint]uint8, d *Decoder) { + if v, changed := f.DecMapUintUint8V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintUint8V(v map[uint]uint8, canChange bool, + d *Decoder) (_ map[uint]uint8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uint]uint8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint + var mv uint8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint(dd.DecodeUint(uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintUint16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint]uint16) + if v, changed := fastpathTV.DecMapUintUint16V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUintUint16V(rv2i(rv).(map[uint]uint16), false, d) +} +func (f fastpathT) DecMapUintUint16X(vp *map[uint]uint16, d *Decoder) { + if v, changed := f.DecMapUintUint16V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintUint16V(v map[uint]uint16, canChange bool, + d *Decoder) (_ map[uint]uint16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[uint]uint16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint + var mv uint16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint(dd.DecodeUint(uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintUint32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint]uint32) + if v, changed := fastpathTV.DecMapUintUint32V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUintUint32V(rv2i(rv).(map[uint]uint32), false, d) +} +func (f fastpathT) DecMapUintUint32X(vp *map[uint]uint32, d *Decoder) { + if v, changed := f.DecMapUintUint32V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintUint32V(v map[uint]uint32, canChange bool, + d *Decoder) (_ map[uint]uint32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uint]uint32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint + var mv uint32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint(dd.DecodeUint(uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintUint64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint]uint64) + if v, changed := fastpathTV.DecMapUintUint64V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUintUint64V(rv2i(rv).(map[uint]uint64), false, d) +} +func (f fastpathT) DecMapUintUint64X(vp *map[uint]uint64, d *Decoder) { + if v, changed := f.DecMapUintUint64V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintUint64V(v map[uint]uint64, canChange bool, + d *Decoder) (_ map[uint]uint64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uint]uint64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint + var mv uint64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint(dd.DecodeUint(uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintUintptrR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint]uintptr) + if v, changed := fastpathTV.DecMapUintUintptrV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUintUintptrV(rv2i(rv).(map[uint]uintptr), false, d) +} +func (f fastpathT) DecMapUintUintptrX(vp *map[uint]uintptr, d *Decoder) { + if v, changed := f.DecMapUintUintptrV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintUintptrV(v map[uint]uintptr, canChange bool, + d *Decoder) (_ map[uint]uintptr, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uint]uintptr, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint + var mv uintptr + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint(dd.DecodeUint(uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintIntR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint]int) + if v, changed := fastpathTV.DecMapUintIntV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUintIntV(rv2i(rv).(map[uint]int), false, d) +} +func (f fastpathT) DecMapUintIntX(vp *map[uint]int, d *Decoder) { + if v, changed := f.DecMapUintIntV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintIntV(v map[uint]int, canChange bool, + d *Decoder) (_ map[uint]int, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uint]int, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint + var mv int + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint(dd.DecodeUint(uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintInt8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint]int8) + if v, changed := fastpathTV.DecMapUintInt8V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUintInt8V(rv2i(rv).(map[uint]int8), false, d) +} +func (f fastpathT) DecMapUintInt8X(vp *map[uint]int8, d *Decoder) { + if v, changed := f.DecMapUintInt8V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintInt8V(v map[uint]int8, canChange bool, + d *Decoder) (_ map[uint]int8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uint]int8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint + var mv int8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint(dd.DecodeUint(uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintInt16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint]int16) + if v, changed := fastpathTV.DecMapUintInt16V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUintInt16V(rv2i(rv).(map[uint]int16), false, d) +} +func (f fastpathT) DecMapUintInt16X(vp *map[uint]int16, d *Decoder) { + if v, changed := f.DecMapUintInt16V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintInt16V(v map[uint]int16, canChange bool, + d *Decoder) (_ map[uint]int16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[uint]int16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint + var mv int16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint(dd.DecodeUint(uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintInt32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint]int32) + if v, changed := fastpathTV.DecMapUintInt32V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUintInt32V(rv2i(rv).(map[uint]int32), false, d) +} +func (f fastpathT) DecMapUintInt32X(vp *map[uint]int32, d *Decoder) { + if v, changed := f.DecMapUintInt32V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintInt32V(v map[uint]int32, canChange bool, + d *Decoder) (_ map[uint]int32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uint]int32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint + var mv int32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint(dd.DecodeUint(uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintInt64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint]int64) + if v, changed := fastpathTV.DecMapUintInt64V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUintInt64V(rv2i(rv).(map[uint]int64), false, d) +} +func (f fastpathT) DecMapUintInt64X(vp *map[uint]int64, d *Decoder) { + if v, changed := f.DecMapUintInt64V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintInt64V(v map[uint]int64, canChange bool, + d *Decoder) (_ map[uint]int64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uint]int64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint + var mv int64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint(dd.DecodeUint(uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintFloat32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint]float32) + if v, changed := fastpathTV.DecMapUintFloat32V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUintFloat32V(rv2i(rv).(map[uint]float32), false, d) +} +func (f fastpathT) DecMapUintFloat32X(vp *map[uint]float32, d *Decoder) { + if v, changed := f.DecMapUintFloat32V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintFloat32V(v map[uint]float32, canChange bool, + d *Decoder) (_ map[uint]float32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uint]float32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint + var mv float32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint(dd.DecodeUint(uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintFloat64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint]float64) + if v, changed := fastpathTV.DecMapUintFloat64V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUintFloat64V(rv2i(rv).(map[uint]float64), false, d) +} +func (f fastpathT) DecMapUintFloat64X(vp *map[uint]float64, d *Decoder) { + if v, changed := f.DecMapUintFloat64V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintFloat64V(v map[uint]float64, canChange bool, + d *Decoder) (_ map[uint]float64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uint]float64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint + var mv float64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint(dd.DecodeUint(uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintBoolR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint]bool) + if v, changed := fastpathTV.DecMapUintBoolV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUintBoolV(rv2i(rv).(map[uint]bool), false, d) +} +func (f fastpathT) DecMapUintBoolX(vp *map[uint]bool, d *Decoder) { + if v, changed := f.DecMapUintBoolV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintBoolV(v map[uint]bool, canChange bool, + d *Decoder) (_ map[uint]bool, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uint]bool, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint + var mv bool + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint(dd.DecodeUint(uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = false + } + continue + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint8IntfR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint8]interface{}) + if v, changed := fastpathTV.DecMapUint8IntfV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint8IntfV(rv2i(rv).(map[uint8]interface{}), false, d) +} +func (f fastpathT) DecMapUint8IntfX(vp *map[uint8]interface{}, d *Decoder) { + if v, changed := f.DecMapUint8IntfV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8IntfV(v map[uint8]interface{}, canChange bool, + d *Decoder) (_ map[uint8]interface{}, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 17) + v = make(map[uint8]interface{}, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + var mk uint8 + var mv interface{} + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint8(dd.DecodeUint(8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = nil + } + continue + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint8StringR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint8]string) + if v, changed := fastpathTV.DecMapUint8StringV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint8StringV(rv2i(rv).(map[uint8]string), false, d) +} +func (f fastpathT) DecMapUint8StringX(vp *map[uint8]string, d *Decoder) { + if v, changed := f.DecMapUint8StringV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8StringV(v map[uint8]string, canChange bool, + d *Decoder) (_ map[uint8]string, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 17) + v = make(map[uint8]string, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint8 + var mv string + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint8(dd.DecodeUint(8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = "" + } + continue + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint8UintR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint8]uint) + if v, changed := fastpathTV.DecMapUint8UintV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint8UintV(rv2i(rv).(map[uint8]uint), false, d) +} +func (f fastpathT) DecMapUint8UintX(vp *map[uint8]uint, d *Decoder) { + if v, changed := f.DecMapUint8UintV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8UintV(v map[uint8]uint, canChange bool, + d *Decoder) (_ map[uint8]uint, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uint8]uint, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint8 + var mv uint + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint8(dd.DecodeUint(8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint8Uint8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint8]uint8) + if v, changed := fastpathTV.DecMapUint8Uint8V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint8Uint8V(rv2i(rv).(map[uint8]uint8), false, d) +} +func (f fastpathT) DecMapUint8Uint8X(vp *map[uint8]uint8, d *Decoder) { + if v, changed := f.DecMapUint8Uint8V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8Uint8V(v map[uint8]uint8, canChange bool, + d *Decoder) (_ map[uint8]uint8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 2) + v = make(map[uint8]uint8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint8 + var mv uint8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint8(dd.DecodeUint(8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint8Uint16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint8]uint16) + if v, changed := fastpathTV.DecMapUint8Uint16V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint8Uint16V(rv2i(rv).(map[uint8]uint16), false, d) +} +func (f fastpathT) DecMapUint8Uint16X(vp *map[uint8]uint16, d *Decoder) { + if v, changed := f.DecMapUint8Uint16V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8Uint16V(v map[uint8]uint16, canChange bool, + d *Decoder) (_ map[uint8]uint16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 3) + v = make(map[uint8]uint16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint8 + var mv uint16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint8(dd.DecodeUint(8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint8Uint32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint8]uint32) + if v, changed := fastpathTV.DecMapUint8Uint32V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint8Uint32V(rv2i(rv).(map[uint8]uint32), false, d) +} +func (f fastpathT) DecMapUint8Uint32X(vp *map[uint8]uint32, d *Decoder) { + if v, changed := f.DecMapUint8Uint32V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8Uint32V(v map[uint8]uint32, canChange bool, + d *Decoder) (_ map[uint8]uint32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[uint8]uint32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint8 + var mv uint32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint8(dd.DecodeUint(8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint8Uint64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint8]uint64) + if v, changed := fastpathTV.DecMapUint8Uint64V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint8Uint64V(rv2i(rv).(map[uint8]uint64), false, d) +} +func (f fastpathT) DecMapUint8Uint64X(vp *map[uint8]uint64, d *Decoder) { + if v, changed := f.DecMapUint8Uint64V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8Uint64V(v map[uint8]uint64, canChange bool, + d *Decoder) (_ map[uint8]uint64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uint8]uint64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint8 + var mv uint64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint8(dd.DecodeUint(8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint8UintptrR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint8]uintptr) + if v, changed := fastpathTV.DecMapUint8UintptrV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint8UintptrV(rv2i(rv).(map[uint8]uintptr), false, d) +} +func (f fastpathT) DecMapUint8UintptrX(vp *map[uint8]uintptr, d *Decoder) { + if v, changed := f.DecMapUint8UintptrV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8UintptrV(v map[uint8]uintptr, canChange bool, + d *Decoder) (_ map[uint8]uintptr, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uint8]uintptr, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint8 + var mv uintptr + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint8(dd.DecodeUint(8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint8IntR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint8]int) + if v, changed := fastpathTV.DecMapUint8IntV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint8IntV(rv2i(rv).(map[uint8]int), false, d) +} +func (f fastpathT) DecMapUint8IntX(vp *map[uint8]int, d *Decoder) { + if v, changed := f.DecMapUint8IntV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8IntV(v map[uint8]int, canChange bool, + d *Decoder) (_ map[uint8]int, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uint8]int, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint8 + var mv int + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint8(dd.DecodeUint(8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint8Int8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint8]int8) + if v, changed := fastpathTV.DecMapUint8Int8V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint8Int8V(rv2i(rv).(map[uint8]int8), false, d) +} +func (f fastpathT) DecMapUint8Int8X(vp *map[uint8]int8, d *Decoder) { + if v, changed := f.DecMapUint8Int8V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8Int8V(v map[uint8]int8, canChange bool, + d *Decoder) (_ map[uint8]int8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 2) + v = make(map[uint8]int8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint8 + var mv int8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint8(dd.DecodeUint(8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint8Int16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint8]int16) + if v, changed := fastpathTV.DecMapUint8Int16V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint8Int16V(rv2i(rv).(map[uint8]int16), false, d) +} +func (f fastpathT) DecMapUint8Int16X(vp *map[uint8]int16, d *Decoder) { + if v, changed := f.DecMapUint8Int16V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8Int16V(v map[uint8]int16, canChange bool, + d *Decoder) (_ map[uint8]int16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 3) + v = make(map[uint8]int16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint8 + var mv int16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint8(dd.DecodeUint(8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint8Int32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint8]int32) + if v, changed := fastpathTV.DecMapUint8Int32V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint8Int32V(rv2i(rv).(map[uint8]int32), false, d) +} +func (f fastpathT) DecMapUint8Int32X(vp *map[uint8]int32, d *Decoder) { + if v, changed := f.DecMapUint8Int32V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8Int32V(v map[uint8]int32, canChange bool, + d *Decoder) (_ map[uint8]int32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[uint8]int32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint8 + var mv int32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint8(dd.DecodeUint(8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint8Int64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint8]int64) + if v, changed := fastpathTV.DecMapUint8Int64V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint8Int64V(rv2i(rv).(map[uint8]int64), false, d) +} +func (f fastpathT) DecMapUint8Int64X(vp *map[uint8]int64, d *Decoder) { + if v, changed := f.DecMapUint8Int64V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8Int64V(v map[uint8]int64, canChange bool, + d *Decoder) (_ map[uint8]int64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uint8]int64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint8 + var mv int64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint8(dd.DecodeUint(8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint8Float32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint8]float32) + if v, changed := fastpathTV.DecMapUint8Float32V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint8Float32V(rv2i(rv).(map[uint8]float32), false, d) +} +func (f fastpathT) DecMapUint8Float32X(vp *map[uint8]float32, d *Decoder) { + if v, changed := f.DecMapUint8Float32V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8Float32V(v map[uint8]float32, canChange bool, + d *Decoder) (_ map[uint8]float32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[uint8]float32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint8 + var mv float32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint8(dd.DecodeUint(8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint8Float64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint8]float64) + if v, changed := fastpathTV.DecMapUint8Float64V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint8Float64V(rv2i(rv).(map[uint8]float64), false, d) +} +func (f fastpathT) DecMapUint8Float64X(vp *map[uint8]float64, d *Decoder) { + if v, changed := f.DecMapUint8Float64V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8Float64V(v map[uint8]float64, canChange bool, + d *Decoder) (_ map[uint8]float64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uint8]float64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint8 + var mv float64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint8(dd.DecodeUint(8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint8BoolR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint8]bool) + if v, changed := fastpathTV.DecMapUint8BoolV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint8BoolV(rv2i(rv).(map[uint8]bool), false, d) +} +func (f fastpathT) DecMapUint8BoolX(vp *map[uint8]bool, d *Decoder) { + if v, changed := f.DecMapUint8BoolV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8BoolV(v map[uint8]bool, canChange bool, + d *Decoder) (_ map[uint8]bool, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 2) + v = make(map[uint8]bool, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint8 + var mv bool + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint8(dd.DecodeUint(8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = false + } + continue + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint16IntfR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint16]interface{}) + if v, changed := fastpathTV.DecMapUint16IntfV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint16IntfV(rv2i(rv).(map[uint16]interface{}), false, d) +} +func (f fastpathT) DecMapUint16IntfX(vp *map[uint16]interface{}, d *Decoder) { + if v, changed := f.DecMapUint16IntfV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16IntfV(v map[uint16]interface{}, canChange bool, + d *Decoder) (_ map[uint16]interface{}, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 18) + v = make(map[uint16]interface{}, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + var mk uint16 + var mv interface{} + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint16(dd.DecodeUint(16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = nil + } + continue + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint16StringR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint16]string) + if v, changed := fastpathTV.DecMapUint16StringV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint16StringV(rv2i(rv).(map[uint16]string), false, d) +} +func (f fastpathT) DecMapUint16StringX(vp *map[uint16]string, d *Decoder) { + if v, changed := f.DecMapUint16StringV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16StringV(v map[uint16]string, canChange bool, + d *Decoder) (_ map[uint16]string, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 18) + v = make(map[uint16]string, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint16 + var mv string + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint16(dd.DecodeUint(16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = "" + } + continue + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint16UintR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint16]uint) + if v, changed := fastpathTV.DecMapUint16UintV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint16UintV(rv2i(rv).(map[uint16]uint), false, d) +} +func (f fastpathT) DecMapUint16UintX(vp *map[uint16]uint, d *Decoder) { + if v, changed := f.DecMapUint16UintV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16UintV(v map[uint16]uint, canChange bool, + d *Decoder) (_ map[uint16]uint, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[uint16]uint, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint16 + var mv uint + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint16(dd.DecodeUint(16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint16Uint8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint16]uint8) + if v, changed := fastpathTV.DecMapUint16Uint8V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint16Uint8V(rv2i(rv).(map[uint16]uint8), false, d) +} +func (f fastpathT) DecMapUint16Uint8X(vp *map[uint16]uint8, d *Decoder) { + if v, changed := f.DecMapUint16Uint8V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16Uint8V(v map[uint16]uint8, canChange bool, + d *Decoder) (_ map[uint16]uint8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 3) + v = make(map[uint16]uint8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint16 + var mv uint8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint16(dd.DecodeUint(16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint16Uint16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint16]uint16) + if v, changed := fastpathTV.DecMapUint16Uint16V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint16Uint16V(rv2i(rv).(map[uint16]uint16), false, d) +} +func (f fastpathT) DecMapUint16Uint16X(vp *map[uint16]uint16, d *Decoder) { + if v, changed := f.DecMapUint16Uint16V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16Uint16V(v map[uint16]uint16, canChange bool, + d *Decoder) (_ map[uint16]uint16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 4) + v = make(map[uint16]uint16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint16 + var mv uint16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint16(dd.DecodeUint(16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint16Uint32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint16]uint32) + if v, changed := fastpathTV.DecMapUint16Uint32V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint16Uint32V(rv2i(rv).(map[uint16]uint32), false, d) +} +func (f fastpathT) DecMapUint16Uint32X(vp *map[uint16]uint32, d *Decoder) { + if v, changed := f.DecMapUint16Uint32V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16Uint32V(v map[uint16]uint32, canChange bool, + d *Decoder) (_ map[uint16]uint32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 6) + v = make(map[uint16]uint32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint16 + var mv uint32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint16(dd.DecodeUint(16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint16Uint64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint16]uint64) + if v, changed := fastpathTV.DecMapUint16Uint64V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint16Uint64V(rv2i(rv).(map[uint16]uint64), false, d) +} +func (f fastpathT) DecMapUint16Uint64X(vp *map[uint16]uint64, d *Decoder) { + if v, changed := f.DecMapUint16Uint64V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16Uint64V(v map[uint16]uint64, canChange bool, + d *Decoder) (_ map[uint16]uint64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[uint16]uint64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint16 + var mv uint64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint16(dd.DecodeUint(16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint16UintptrR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint16]uintptr) + if v, changed := fastpathTV.DecMapUint16UintptrV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint16UintptrV(rv2i(rv).(map[uint16]uintptr), false, d) +} +func (f fastpathT) DecMapUint16UintptrX(vp *map[uint16]uintptr, d *Decoder) { + if v, changed := f.DecMapUint16UintptrV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16UintptrV(v map[uint16]uintptr, canChange bool, + d *Decoder) (_ map[uint16]uintptr, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[uint16]uintptr, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint16 + var mv uintptr + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint16(dd.DecodeUint(16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint16IntR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint16]int) + if v, changed := fastpathTV.DecMapUint16IntV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint16IntV(rv2i(rv).(map[uint16]int), false, d) +} +func (f fastpathT) DecMapUint16IntX(vp *map[uint16]int, d *Decoder) { + if v, changed := f.DecMapUint16IntV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16IntV(v map[uint16]int, canChange bool, + d *Decoder) (_ map[uint16]int, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[uint16]int, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint16 + var mv int + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint16(dd.DecodeUint(16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint16Int8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint16]int8) + if v, changed := fastpathTV.DecMapUint16Int8V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint16Int8V(rv2i(rv).(map[uint16]int8), false, d) +} +func (f fastpathT) DecMapUint16Int8X(vp *map[uint16]int8, d *Decoder) { + if v, changed := f.DecMapUint16Int8V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16Int8V(v map[uint16]int8, canChange bool, + d *Decoder) (_ map[uint16]int8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 3) + v = make(map[uint16]int8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint16 + var mv int8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint16(dd.DecodeUint(16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint16Int16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint16]int16) + if v, changed := fastpathTV.DecMapUint16Int16V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint16Int16V(rv2i(rv).(map[uint16]int16), false, d) +} +func (f fastpathT) DecMapUint16Int16X(vp *map[uint16]int16, d *Decoder) { + if v, changed := f.DecMapUint16Int16V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16Int16V(v map[uint16]int16, canChange bool, + d *Decoder) (_ map[uint16]int16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 4) + v = make(map[uint16]int16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint16 + var mv int16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint16(dd.DecodeUint(16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint16Int32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint16]int32) + if v, changed := fastpathTV.DecMapUint16Int32V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint16Int32V(rv2i(rv).(map[uint16]int32), false, d) +} +func (f fastpathT) DecMapUint16Int32X(vp *map[uint16]int32, d *Decoder) { + if v, changed := f.DecMapUint16Int32V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16Int32V(v map[uint16]int32, canChange bool, + d *Decoder) (_ map[uint16]int32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 6) + v = make(map[uint16]int32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint16 + var mv int32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint16(dd.DecodeUint(16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint16Int64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint16]int64) + if v, changed := fastpathTV.DecMapUint16Int64V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint16Int64V(rv2i(rv).(map[uint16]int64), false, d) +} +func (f fastpathT) DecMapUint16Int64X(vp *map[uint16]int64, d *Decoder) { + if v, changed := f.DecMapUint16Int64V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16Int64V(v map[uint16]int64, canChange bool, + d *Decoder) (_ map[uint16]int64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[uint16]int64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint16 + var mv int64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint16(dd.DecodeUint(16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint16Float32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint16]float32) + if v, changed := fastpathTV.DecMapUint16Float32V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint16Float32V(rv2i(rv).(map[uint16]float32), false, d) +} +func (f fastpathT) DecMapUint16Float32X(vp *map[uint16]float32, d *Decoder) { + if v, changed := f.DecMapUint16Float32V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16Float32V(v map[uint16]float32, canChange bool, + d *Decoder) (_ map[uint16]float32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 6) + v = make(map[uint16]float32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint16 + var mv float32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint16(dd.DecodeUint(16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint16Float64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint16]float64) + if v, changed := fastpathTV.DecMapUint16Float64V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint16Float64V(rv2i(rv).(map[uint16]float64), false, d) +} +func (f fastpathT) DecMapUint16Float64X(vp *map[uint16]float64, d *Decoder) { + if v, changed := f.DecMapUint16Float64V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16Float64V(v map[uint16]float64, canChange bool, + d *Decoder) (_ map[uint16]float64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[uint16]float64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint16 + var mv float64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint16(dd.DecodeUint(16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint16BoolR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint16]bool) + if v, changed := fastpathTV.DecMapUint16BoolV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint16BoolV(rv2i(rv).(map[uint16]bool), false, d) +} +func (f fastpathT) DecMapUint16BoolX(vp *map[uint16]bool, d *Decoder) { + if v, changed := f.DecMapUint16BoolV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16BoolV(v map[uint16]bool, canChange bool, + d *Decoder) (_ map[uint16]bool, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 3) + v = make(map[uint16]bool, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint16 + var mv bool + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint16(dd.DecodeUint(16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = false + } + continue + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint32IntfR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint32]interface{}) + if v, changed := fastpathTV.DecMapUint32IntfV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint32IntfV(rv2i(rv).(map[uint32]interface{}), false, d) +} +func (f fastpathT) DecMapUint32IntfX(vp *map[uint32]interface{}, d *Decoder) { + if v, changed := f.DecMapUint32IntfV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32IntfV(v map[uint32]interface{}, canChange bool, + d *Decoder) (_ map[uint32]interface{}, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 20) + v = make(map[uint32]interface{}, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + var mk uint32 + var mv interface{} + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint32(dd.DecodeUint(32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = nil + } + continue + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint32StringR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint32]string) + if v, changed := fastpathTV.DecMapUint32StringV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint32StringV(rv2i(rv).(map[uint32]string), false, d) +} +func (f fastpathT) DecMapUint32StringX(vp *map[uint32]string, d *Decoder) { + if v, changed := f.DecMapUint32StringV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32StringV(v map[uint32]string, canChange bool, + d *Decoder) (_ map[uint32]string, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 20) + v = make(map[uint32]string, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint32 + var mv string + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint32(dd.DecodeUint(32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = "" + } + continue + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint32UintR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint32]uint) + if v, changed := fastpathTV.DecMapUint32UintV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint32UintV(rv2i(rv).(map[uint32]uint), false, d) +} +func (f fastpathT) DecMapUint32UintX(vp *map[uint32]uint, d *Decoder) { + if v, changed := f.DecMapUint32UintV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32UintV(v map[uint32]uint, canChange bool, + d *Decoder) (_ map[uint32]uint, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uint32]uint, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint32 + var mv uint + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint32(dd.DecodeUint(32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint32Uint8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint32]uint8) + if v, changed := fastpathTV.DecMapUint32Uint8V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint32Uint8V(rv2i(rv).(map[uint32]uint8), false, d) +} +func (f fastpathT) DecMapUint32Uint8X(vp *map[uint32]uint8, d *Decoder) { + if v, changed := f.DecMapUint32Uint8V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32Uint8V(v map[uint32]uint8, canChange bool, + d *Decoder) (_ map[uint32]uint8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[uint32]uint8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint32 + var mv uint8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint32(dd.DecodeUint(32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint32Uint16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint32]uint16) + if v, changed := fastpathTV.DecMapUint32Uint16V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint32Uint16V(rv2i(rv).(map[uint32]uint16), false, d) +} +func (f fastpathT) DecMapUint32Uint16X(vp *map[uint32]uint16, d *Decoder) { + if v, changed := f.DecMapUint32Uint16V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32Uint16V(v map[uint32]uint16, canChange bool, + d *Decoder) (_ map[uint32]uint16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 6) + v = make(map[uint32]uint16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint32 + var mv uint16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint32(dd.DecodeUint(32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint32Uint32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint32]uint32) + if v, changed := fastpathTV.DecMapUint32Uint32V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint32Uint32V(rv2i(rv).(map[uint32]uint32), false, d) +} +func (f fastpathT) DecMapUint32Uint32X(vp *map[uint32]uint32, d *Decoder) { + if v, changed := f.DecMapUint32Uint32V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32Uint32V(v map[uint32]uint32, canChange bool, + d *Decoder) (_ map[uint32]uint32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 8) + v = make(map[uint32]uint32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint32 + var mv uint32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint32(dd.DecodeUint(32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint32Uint64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint32]uint64) + if v, changed := fastpathTV.DecMapUint32Uint64V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint32Uint64V(rv2i(rv).(map[uint32]uint64), false, d) +} +func (f fastpathT) DecMapUint32Uint64X(vp *map[uint32]uint64, d *Decoder) { + if v, changed := f.DecMapUint32Uint64V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32Uint64V(v map[uint32]uint64, canChange bool, + d *Decoder) (_ map[uint32]uint64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uint32]uint64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint32 + var mv uint64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint32(dd.DecodeUint(32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint32UintptrR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint32]uintptr) + if v, changed := fastpathTV.DecMapUint32UintptrV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint32UintptrV(rv2i(rv).(map[uint32]uintptr), false, d) +} +func (f fastpathT) DecMapUint32UintptrX(vp *map[uint32]uintptr, d *Decoder) { + if v, changed := f.DecMapUint32UintptrV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32UintptrV(v map[uint32]uintptr, canChange bool, + d *Decoder) (_ map[uint32]uintptr, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uint32]uintptr, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint32 + var mv uintptr + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint32(dd.DecodeUint(32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint32IntR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint32]int) + if v, changed := fastpathTV.DecMapUint32IntV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint32IntV(rv2i(rv).(map[uint32]int), false, d) +} +func (f fastpathT) DecMapUint32IntX(vp *map[uint32]int, d *Decoder) { + if v, changed := f.DecMapUint32IntV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32IntV(v map[uint32]int, canChange bool, + d *Decoder) (_ map[uint32]int, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uint32]int, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint32 + var mv int + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint32(dd.DecodeUint(32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint32Int8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint32]int8) + if v, changed := fastpathTV.DecMapUint32Int8V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint32Int8V(rv2i(rv).(map[uint32]int8), false, d) +} +func (f fastpathT) DecMapUint32Int8X(vp *map[uint32]int8, d *Decoder) { + if v, changed := f.DecMapUint32Int8V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32Int8V(v map[uint32]int8, canChange bool, + d *Decoder) (_ map[uint32]int8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[uint32]int8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint32 + var mv int8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint32(dd.DecodeUint(32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint32Int16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint32]int16) + if v, changed := fastpathTV.DecMapUint32Int16V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint32Int16V(rv2i(rv).(map[uint32]int16), false, d) +} +func (f fastpathT) DecMapUint32Int16X(vp *map[uint32]int16, d *Decoder) { + if v, changed := f.DecMapUint32Int16V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32Int16V(v map[uint32]int16, canChange bool, + d *Decoder) (_ map[uint32]int16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 6) + v = make(map[uint32]int16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint32 + var mv int16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint32(dd.DecodeUint(32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint32Int32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint32]int32) + if v, changed := fastpathTV.DecMapUint32Int32V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint32Int32V(rv2i(rv).(map[uint32]int32), false, d) +} +func (f fastpathT) DecMapUint32Int32X(vp *map[uint32]int32, d *Decoder) { + if v, changed := f.DecMapUint32Int32V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32Int32V(v map[uint32]int32, canChange bool, + d *Decoder) (_ map[uint32]int32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 8) + v = make(map[uint32]int32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint32 + var mv int32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint32(dd.DecodeUint(32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint32Int64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint32]int64) + if v, changed := fastpathTV.DecMapUint32Int64V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint32Int64V(rv2i(rv).(map[uint32]int64), false, d) +} +func (f fastpathT) DecMapUint32Int64X(vp *map[uint32]int64, d *Decoder) { + if v, changed := f.DecMapUint32Int64V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32Int64V(v map[uint32]int64, canChange bool, + d *Decoder) (_ map[uint32]int64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uint32]int64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint32 + var mv int64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint32(dd.DecodeUint(32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint32Float32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint32]float32) + if v, changed := fastpathTV.DecMapUint32Float32V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint32Float32V(rv2i(rv).(map[uint32]float32), false, d) +} +func (f fastpathT) DecMapUint32Float32X(vp *map[uint32]float32, d *Decoder) { + if v, changed := f.DecMapUint32Float32V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32Float32V(v map[uint32]float32, canChange bool, + d *Decoder) (_ map[uint32]float32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 8) + v = make(map[uint32]float32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint32 + var mv float32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint32(dd.DecodeUint(32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint32Float64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint32]float64) + if v, changed := fastpathTV.DecMapUint32Float64V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint32Float64V(rv2i(rv).(map[uint32]float64), false, d) +} +func (f fastpathT) DecMapUint32Float64X(vp *map[uint32]float64, d *Decoder) { + if v, changed := f.DecMapUint32Float64V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32Float64V(v map[uint32]float64, canChange bool, + d *Decoder) (_ map[uint32]float64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uint32]float64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint32 + var mv float64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint32(dd.DecodeUint(32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint32BoolR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint32]bool) + if v, changed := fastpathTV.DecMapUint32BoolV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint32BoolV(rv2i(rv).(map[uint32]bool), false, d) +} +func (f fastpathT) DecMapUint32BoolX(vp *map[uint32]bool, d *Decoder) { + if v, changed := f.DecMapUint32BoolV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32BoolV(v map[uint32]bool, canChange bool, + d *Decoder) (_ map[uint32]bool, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[uint32]bool, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint32 + var mv bool + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint32(dd.DecodeUint(32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = false + } + continue + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint64IntfR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint64]interface{}) + if v, changed := fastpathTV.DecMapUint64IntfV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint64IntfV(rv2i(rv).(map[uint64]interface{}), false, d) +} +func (f fastpathT) DecMapUint64IntfX(vp *map[uint64]interface{}, d *Decoder) { + if v, changed := f.DecMapUint64IntfV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64IntfV(v map[uint64]interface{}, canChange bool, + d *Decoder) (_ map[uint64]interface{}, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[uint64]interface{}, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + var mk uint64 + var mv interface{} + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeUint(64) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = nil + } + continue + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint64StringR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint64]string) + if v, changed := fastpathTV.DecMapUint64StringV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint64StringV(rv2i(rv).(map[uint64]string), false, d) +} +func (f fastpathT) DecMapUint64StringX(vp *map[uint64]string, d *Decoder) { + if v, changed := f.DecMapUint64StringV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64StringV(v map[uint64]string, canChange bool, + d *Decoder) (_ map[uint64]string, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[uint64]string, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint64 + var mv string + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeUint(64) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = "" + } + continue + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint64UintR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint64]uint) + if v, changed := fastpathTV.DecMapUint64UintV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint64UintV(rv2i(rv).(map[uint64]uint), false, d) +} +func (f fastpathT) DecMapUint64UintX(vp *map[uint64]uint, d *Decoder) { + if v, changed := f.DecMapUint64UintV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64UintV(v map[uint64]uint, canChange bool, + d *Decoder) (_ map[uint64]uint, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uint64]uint, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint64 + var mv uint + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeUint(64) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint64Uint8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint64]uint8) + if v, changed := fastpathTV.DecMapUint64Uint8V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint64Uint8V(rv2i(rv).(map[uint64]uint8), false, d) +} +func (f fastpathT) DecMapUint64Uint8X(vp *map[uint64]uint8, d *Decoder) { + if v, changed := f.DecMapUint64Uint8V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64Uint8V(v map[uint64]uint8, canChange bool, + d *Decoder) (_ map[uint64]uint8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uint64]uint8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint64 + var mv uint8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeUint(64) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint64Uint16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint64]uint16) + if v, changed := fastpathTV.DecMapUint64Uint16V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint64Uint16V(rv2i(rv).(map[uint64]uint16), false, d) +} +func (f fastpathT) DecMapUint64Uint16X(vp *map[uint64]uint16, d *Decoder) { + if v, changed := f.DecMapUint64Uint16V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64Uint16V(v map[uint64]uint16, canChange bool, + d *Decoder) (_ map[uint64]uint16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[uint64]uint16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint64 + var mv uint16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeUint(64) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint64Uint32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint64]uint32) + if v, changed := fastpathTV.DecMapUint64Uint32V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint64Uint32V(rv2i(rv).(map[uint64]uint32), false, d) +} +func (f fastpathT) DecMapUint64Uint32X(vp *map[uint64]uint32, d *Decoder) { + if v, changed := f.DecMapUint64Uint32V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64Uint32V(v map[uint64]uint32, canChange bool, + d *Decoder) (_ map[uint64]uint32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uint64]uint32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint64 + var mv uint32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeUint(64) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint64Uint64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint64]uint64) + if v, changed := fastpathTV.DecMapUint64Uint64V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint64Uint64V(rv2i(rv).(map[uint64]uint64), false, d) +} +func (f fastpathT) DecMapUint64Uint64X(vp *map[uint64]uint64, d *Decoder) { + if v, changed := f.DecMapUint64Uint64V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64Uint64V(v map[uint64]uint64, canChange bool, + d *Decoder) (_ map[uint64]uint64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uint64]uint64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint64 + var mv uint64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeUint(64) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint64UintptrR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint64]uintptr) + if v, changed := fastpathTV.DecMapUint64UintptrV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint64UintptrV(rv2i(rv).(map[uint64]uintptr), false, d) +} +func (f fastpathT) DecMapUint64UintptrX(vp *map[uint64]uintptr, d *Decoder) { + if v, changed := f.DecMapUint64UintptrV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64UintptrV(v map[uint64]uintptr, canChange bool, + d *Decoder) (_ map[uint64]uintptr, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uint64]uintptr, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint64 + var mv uintptr + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeUint(64) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint64IntR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint64]int) + if v, changed := fastpathTV.DecMapUint64IntV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint64IntV(rv2i(rv).(map[uint64]int), false, d) +} +func (f fastpathT) DecMapUint64IntX(vp *map[uint64]int, d *Decoder) { + if v, changed := f.DecMapUint64IntV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64IntV(v map[uint64]int, canChange bool, + d *Decoder) (_ map[uint64]int, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uint64]int, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint64 + var mv int + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeUint(64) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint64Int8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint64]int8) + if v, changed := fastpathTV.DecMapUint64Int8V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint64Int8V(rv2i(rv).(map[uint64]int8), false, d) +} +func (f fastpathT) DecMapUint64Int8X(vp *map[uint64]int8, d *Decoder) { + if v, changed := f.DecMapUint64Int8V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64Int8V(v map[uint64]int8, canChange bool, + d *Decoder) (_ map[uint64]int8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uint64]int8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint64 + var mv int8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeUint(64) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint64Int16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint64]int16) + if v, changed := fastpathTV.DecMapUint64Int16V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint64Int16V(rv2i(rv).(map[uint64]int16), false, d) +} +func (f fastpathT) DecMapUint64Int16X(vp *map[uint64]int16, d *Decoder) { + if v, changed := f.DecMapUint64Int16V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64Int16V(v map[uint64]int16, canChange bool, + d *Decoder) (_ map[uint64]int16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[uint64]int16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint64 + var mv int16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeUint(64) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint64Int32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint64]int32) + if v, changed := fastpathTV.DecMapUint64Int32V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint64Int32V(rv2i(rv).(map[uint64]int32), false, d) +} +func (f fastpathT) DecMapUint64Int32X(vp *map[uint64]int32, d *Decoder) { + if v, changed := f.DecMapUint64Int32V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64Int32V(v map[uint64]int32, canChange bool, + d *Decoder) (_ map[uint64]int32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uint64]int32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint64 + var mv int32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeUint(64) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint64Int64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint64]int64) + if v, changed := fastpathTV.DecMapUint64Int64V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint64Int64V(rv2i(rv).(map[uint64]int64), false, d) +} +func (f fastpathT) DecMapUint64Int64X(vp *map[uint64]int64, d *Decoder) { + if v, changed := f.DecMapUint64Int64V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64Int64V(v map[uint64]int64, canChange bool, + d *Decoder) (_ map[uint64]int64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uint64]int64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint64 + var mv int64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeUint(64) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint64Float32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint64]float32) + if v, changed := fastpathTV.DecMapUint64Float32V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint64Float32V(rv2i(rv).(map[uint64]float32), false, d) +} +func (f fastpathT) DecMapUint64Float32X(vp *map[uint64]float32, d *Decoder) { + if v, changed := f.DecMapUint64Float32V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64Float32V(v map[uint64]float32, canChange bool, + d *Decoder) (_ map[uint64]float32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uint64]float32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint64 + var mv float32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeUint(64) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint64Float64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint64]float64) + if v, changed := fastpathTV.DecMapUint64Float64V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint64Float64V(rv2i(rv).(map[uint64]float64), false, d) +} +func (f fastpathT) DecMapUint64Float64X(vp *map[uint64]float64, d *Decoder) { + if v, changed := f.DecMapUint64Float64V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64Float64V(v map[uint64]float64, canChange bool, + d *Decoder) (_ map[uint64]float64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uint64]float64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint64 + var mv float64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeUint(64) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint64BoolR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint64]bool) + if v, changed := fastpathTV.DecMapUint64BoolV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint64BoolV(rv2i(rv).(map[uint64]bool), false, d) +} +func (f fastpathT) DecMapUint64BoolX(vp *map[uint64]bool, d *Decoder) { + if v, changed := f.DecMapUint64BoolV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64BoolV(v map[uint64]bool, canChange bool, + d *Decoder) (_ map[uint64]bool, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uint64]bool, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint64 + var mv bool + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeUint(64) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = false + } + continue + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintptrIntfR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uintptr]interface{}) + if v, changed := fastpathTV.DecMapUintptrIntfV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUintptrIntfV(rv2i(rv).(map[uintptr]interface{}), false, d) +} +func (f fastpathT) DecMapUintptrIntfX(vp *map[uintptr]interface{}, d *Decoder) { + if v, changed := f.DecMapUintptrIntfV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrIntfV(v map[uintptr]interface{}, canChange bool, + d *Decoder) (_ map[uintptr]interface{}, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[uintptr]interface{}, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + var mk uintptr + var mv interface{} + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = nil + } + continue + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintptrStringR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uintptr]string) + if v, changed := fastpathTV.DecMapUintptrStringV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUintptrStringV(rv2i(rv).(map[uintptr]string), false, d) +} +func (f fastpathT) DecMapUintptrStringX(vp *map[uintptr]string, d *Decoder) { + if v, changed := f.DecMapUintptrStringV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrStringV(v map[uintptr]string, canChange bool, + d *Decoder) (_ map[uintptr]string, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[uintptr]string, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uintptr + var mv string + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = "" + } + continue + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintptrUintR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uintptr]uint) + if v, changed := fastpathTV.DecMapUintptrUintV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUintptrUintV(rv2i(rv).(map[uintptr]uint), false, d) +} +func (f fastpathT) DecMapUintptrUintX(vp *map[uintptr]uint, d *Decoder) { + if v, changed := f.DecMapUintptrUintV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrUintV(v map[uintptr]uint, canChange bool, + d *Decoder) (_ map[uintptr]uint, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uintptr]uint, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uintptr + var mv uint + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintptrUint8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uintptr]uint8) + if v, changed := fastpathTV.DecMapUintptrUint8V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUintptrUint8V(rv2i(rv).(map[uintptr]uint8), false, d) +} +func (f fastpathT) DecMapUintptrUint8X(vp *map[uintptr]uint8, d *Decoder) { + if v, changed := f.DecMapUintptrUint8V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrUint8V(v map[uintptr]uint8, canChange bool, + d *Decoder) (_ map[uintptr]uint8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uintptr]uint8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uintptr + var mv uint8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintptrUint16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uintptr]uint16) + if v, changed := fastpathTV.DecMapUintptrUint16V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUintptrUint16V(rv2i(rv).(map[uintptr]uint16), false, d) +} +func (f fastpathT) DecMapUintptrUint16X(vp *map[uintptr]uint16, d *Decoder) { + if v, changed := f.DecMapUintptrUint16V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrUint16V(v map[uintptr]uint16, canChange bool, + d *Decoder) (_ map[uintptr]uint16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[uintptr]uint16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uintptr + var mv uint16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintptrUint32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uintptr]uint32) + if v, changed := fastpathTV.DecMapUintptrUint32V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUintptrUint32V(rv2i(rv).(map[uintptr]uint32), false, d) +} +func (f fastpathT) DecMapUintptrUint32X(vp *map[uintptr]uint32, d *Decoder) { + if v, changed := f.DecMapUintptrUint32V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrUint32V(v map[uintptr]uint32, canChange bool, + d *Decoder) (_ map[uintptr]uint32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uintptr]uint32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uintptr + var mv uint32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintptrUint64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uintptr]uint64) + if v, changed := fastpathTV.DecMapUintptrUint64V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUintptrUint64V(rv2i(rv).(map[uintptr]uint64), false, d) +} +func (f fastpathT) DecMapUintptrUint64X(vp *map[uintptr]uint64, d *Decoder) { + if v, changed := f.DecMapUintptrUint64V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrUint64V(v map[uintptr]uint64, canChange bool, + d *Decoder) (_ map[uintptr]uint64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uintptr]uint64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uintptr + var mv uint64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintptrUintptrR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uintptr]uintptr) + if v, changed := fastpathTV.DecMapUintptrUintptrV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUintptrUintptrV(rv2i(rv).(map[uintptr]uintptr), false, d) +} +func (f fastpathT) DecMapUintptrUintptrX(vp *map[uintptr]uintptr, d *Decoder) { + if v, changed := f.DecMapUintptrUintptrV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrUintptrV(v map[uintptr]uintptr, canChange bool, + d *Decoder) (_ map[uintptr]uintptr, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uintptr]uintptr, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uintptr + var mv uintptr + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintptrIntR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uintptr]int) + if v, changed := fastpathTV.DecMapUintptrIntV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUintptrIntV(rv2i(rv).(map[uintptr]int), false, d) +} +func (f fastpathT) DecMapUintptrIntX(vp *map[uintptr]int, d *Decoder) { + if v, changed := f.DecMapUintptrIntV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrIntV(v map[uintptr]int, canChange bool, + d *Decoder) (_ map[uintptr]int, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uintptr]int, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uintptr + var mv int + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintptrInt8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uintptr]int8) + if v, changed := fastpathTV.DecMapUintptrInt8V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUintptrInt8V(rv2i(rv).(map[uintptr]int8), false, d) +} +func (f fastpathT) DecMapUintptrInt8X(vp *map[uintptr]int8, d *Decoder) { + if v, changed := f.DecMapUintptrInt8V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrInt8V(v map[uintptr]int8, canChange bool, + d *Decoder) (_ map[uintptr]int8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uintptr]int8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uintptr + var mv int8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintptrInt16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uintptr]int16) + if v, changed := fastpathTV.DecMapUintptrInt16V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUintptrInt16V(rv2i(rv).(map[uintptr]int16), false, d) +} +func (f fastpathT) DecMapUintptrInt16X(vp *map[uintptr]int16, d *Decoder) { + if v, changed := f.DecMapUintptrInt16V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrInt16V(v map[uintptr]int16, canChange bool, + d *Decoder) (_ map[uintptr]int16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[uintptr]int16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uintptr + var mv int16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintptrInt32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uintptr]int32) + if v, changed := fastpathTV.DecMapUintptrInt32V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUintptrInt32V(rv2i(rv).(map[uintptr]int32), false, d) +} +func (f fastpathT) DecMapUintptrInt32X(vp *map[uintptr]int32, d *Decoder) { + if v, changed := f.DecMapUintptrInt32V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrInt32V(v map[uintptr]int32, canChange bool, + d *Decoder) (_ map[uintptr]int32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uintptr]int32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uintptr + var mv int32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintptrInt64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uintptr]int64) + if v, changed := fastpathTV.DecMapUintptrInt64V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUintptrInt64V(rv2i(rv).(map[uintptr]int64), false, d) +} +func (f fastpathT) DecMapUintptrInt64X(vp *map[uintptr]int64, d *Decoder) { + if v, changed := f.DecMapUintptrInt64V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrInt64V(v map[uintptr]int64, canChange bool, + d *Decoder) (_ map[uintptr]int64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uintptr]int64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uintptr + var mv int64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintptrFloat32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uintptr]float32) + if v, changed := fastpathTV.DecMapUintptrFloat32V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUintptrFloat32V(rv2i(rv).(map[uintptr]float32), false, d) +} +func (f fastpathT) DecMapUintptrFloat32X(vp *map[uintptr]float32, d *Decoder) { + if v, changed := f.DecMapUintptrFloat32V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrFloat32V(v map[uintptr]float32, canChange bool, + d *Decoder) (_ map[uintptr]float32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uintptr]float32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uintptr + var mv float32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintptrFloat64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uintptr]float64) + if v, changed := fastpathTV.DecMapUintptrFloat64V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUintptrFloat64V(rv2i(rv).(map[uintptr]float64), false, d) +} +func (f fastpathT) DecMapUintptrFloat64X(vp *map[uintptr]float64, d *Decoder) { + if v, changed := f.DecMapUintptrFloat64V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrFloat64V(v map[uintptr]float64, canChange bool, + d *Decoder) (_ map[uintptr]float64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uintptr]float64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uintptr + var mv float64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintptrBoolR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uintptr]bool) + if v, changed := fastpathTV.DecMapUintptrBoolV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUintptrBoolV(rv2i(rv).(map[uintptr]bool), false, d) +} +func (f fastpathT) DecMapUintptrBoolX(vp *map[uintptr]bool, d *Decoder) { + if v, changed := f.DecMapUintptrBoolV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrBoolV(v map[uintptr]bool, canChange bool, + d *Decoder) (_ map[uintptr]bool, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uintptr]bool, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uintptr + var mv bool + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = false + } + continue + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntIntfR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int]interface{}) + if v, changed := fastpathTV.DecMapIntIntfV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapIntIntfV(rv2i(rv).(map[int]interface{}), false, d) +} +func (f fastpathT) DecMapIntIntfX(vp *map[int]interface{}, d *Decoder) { + if v, changed := f.DecMapIntIntfV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntIntfV(v map[int]interface{}, canChange bool, + d *Decoder) (_ map[int]interface{}, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[int]interface{}, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + var mk int + var mv interface{} + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int(dd.DecodeInt(intBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = nil + } + continue + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntStringR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int]string) + if v, changed := fastpathTV.DecMapIntStringV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapIntStringV(rv2i(rv).(map[int]string), false, d) +} +func (f fastpathT) DecMapIntStringX(vp *map[int]string, d *Decoder) { + if v, changed := f.DecMapIntStringV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntStringV(v map[int]string, canChange bool, + d *Decoder) (_ map[int]string, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[int]string, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int + var mv string + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int(dd.DecodeInt(intBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = "" + } + continue + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntUintR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int]uint) + if v, changed := fastpathTV.DecMapIntUintV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapIntUintV(rv2i(rv).(map[int]uint), false, d) +} +func (f fastpathT) DecMapIntUintX(vp *map[int]uint, d *Decoder) { + if v, changed := f.DecMapIntUintV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntUintV(v map[int]uint, canChange bool, + d *Decoder) (_ map[int]uint, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[int]uint, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int + var mv uint + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int(dd.DecodeInt(intBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntUint8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int]uint8) + if v, changed := fastpathTV.DecMapIntUint8V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapIntUint8V(rv2i(rv).(map[int]uint8), false, d) +} +func (f fastpathT) DecMapIntUint8X(vp *map[int]uint8, d *Decoder) { + if v, changed := f.DecMapIntUint8V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntUint8V(v map[int]uint8, canChange bool, + d *Decoder) (_ map[int]uint8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[int]uint8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int + var mv uint8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int(dd.DecodeInt(intBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntUint16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int]uint16) + if v, changed := fastpathTV.DecMapIntUint16V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapIntUint16V(rv2i(rv).(map[int]uint16), false, d) +} +func (f fastpathT) DecMapIntUint16X(vp *map[int]uint16, d *Decoder) { + if v, changed := f.DecMapIntUint16V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntUint16V(v map[int]uint16, canChange bool, + d *Decoder) (_ map[int]uint16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[int]uint16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int + var mv uint16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int(dd.DecodeInt(intBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntUint32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int]uint32) + if v, changed := fastpathTV.DecMapIntUint32V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapIntUint32V(rv2i(rv).(map[int]uint32), false, d) +} +func (f fastpathT) DecMapIntUint32X(vp *map[int]uint32, d *Decoder) { + if v, changed := f.DecMapIntUint32V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntUint32V(v map[int]uint32, canChange bool, + d *Decoder) (_ map[int]uint32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[int]uint32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int + var mv uint32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int(dd.DecodeInt(intBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntUint64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int]uint64) + if v, changed := fastpathTV.DecMapIntUint64V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapIntUint64V(rv2i(rv).(map[int]uint64), false, d) +} +func (f fastpathT) DecMapIntUint64X(vp *map[int]uint64, d *Decoder) { + if v, changed := f.DecMapIntUint64V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntUint64V(v map[int]uint64, canChange bool, + d *Decoder) (_ map[int]uint64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[int]uint64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int + var mv uint64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int(dd.DecodeInt(intBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntUintptrR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int]uintptr) + if v, changed := fastpathTV.DecMapIntUintptrV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapIntUintptrV(rv2i(rv).(map[int]uintptr), false, d) +} +func (f fastpathT) DecMapIntUintptrX(vp *map[int]uintptr, d *Decoder) { + if v, changed := f.DecMapIntUintptrV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntUintptrV(v map[int]uintptr, canChange bool, + d *Decoder) (_ map[int]uintptr, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[int]uintptr, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int + var mv uintptr + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int(dd.DecodeInt(intBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntIntR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int]int) + if v, changed := fastpathTV.DecMapIntIntV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapIntIntV(rv2i(rv).(map[int]int), false, d) +} +func (f fastpathT) DecMapIntIntX(vp *map[int]int, d *Decoder) { + if v, changed := f.DecMapIntIntV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntIntV(v map[int]int, canChange bool, + d *Decoder) (_ map[int]int, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[int]int, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int + var mv int + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int(dd.DecodeInt(intBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntInt8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int]int8) + if v, changed := fastpathTV.DecMapIntInt8V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapIntInt8V(rv2i(rv).(map[int]int8), false, d) +} +func (f fastpathT) DecMapIntInt8X(vp *map[int]int8, d *Decoder) { + if v, changed := f.DecMapIntInt8V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntInt8V(v map[int]int8, canChange bool, + d *Decoder) (_ map[int]int8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[int]int8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int + var mv int8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int(dd.DecodeInt(intBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntInt16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int]int16) + if v, changed := fastpathTV.DecMapIntInt16V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapIntInt16V(rv2i(rv).(map[int]int16), false, d) +} +func (f fastpathT) DecMapIntInt16X(vp *map[int]int16, d *Decoder) { + if v, changed := f.DecMapIntInt16V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntInt16V(v map[int]int16, canChange bool, + d *Decoder) (_ map[int]int16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[int]int16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int + var mv int16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int(dd.DecodeInt(intBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntInt32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int]int32) + if v, changed := fastpathTV.DecMapIntInt32V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapIntInt32V(rv2i(rv).(map[int]int32), false, d) +} +func (f fastpathT) DecMapIntInt32X(vp *map[int]int32, d *Decoder) { + if v, changed := f.DecMapIntInt32V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntInt32V(v map[int]int32, canChange bool, + d *Decoder) (_ map[int]int32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[int]int32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int + var mv int32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int(dd.DecodeInt(intBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntInt64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int]int64) + if v, changed := fastpathTV.DecMapIntInt64V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapIntInt64V(rv2i(rv).(map[int]int64), false, d) +} +func (f fastpathT) DecMapIntInt64X(vp *map[int]int64, d *Decoder) { + if v, changed := f.DecMapIntInt64V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntInt64V(v map[int]int64, canChange bool, + d *Decoder) (_ map[int]int64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[int]int64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int + var mv int64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int(dd.DecodeInt(intBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntFloat32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int]float32) + if v, changed := fastpathTV.DecMapIntFloat32V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapIntFloat32V(rv2i(rv).(map[int]float32), false, d) +} +func (f fastpathT) DecMapIntFloat32X(vp *map[int]float32, d *Decoder) { + if v, changed := f.DecMapIntFloat32V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntFloat32V(v map[int]float32, canChange bool, + d *Decoder) (_ map[int]float32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[int]float32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int + var mv float32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int(dd.DecodeInt(intBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntFloat64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int]float64) + if v, changed := fastpathTV.DecMapIntFloat64V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapIntFloat64V(rv2i(rv).(map[int]float64), false, d) +} +func (f fastpathT) DecMapIntFloat64X(vp *map[int]float64, d *Decoder) { + if v, changed := f.DecMapIntFloat64V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntFloat64V(v map[int]float64, canChange bool, + d *Decoder) (_ map[int]float64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[int]float64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int + var mv float64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int(dd.DecodeInt(intBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntBoolR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int]bool) + if v, changed := fastpathTV.DecMapIntBoolV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapIntBoolV(rv2i(rv).(map[int]bool), false, d) +} +func (f fastpathT) DecMapIntBoolX(vp *map[int]bool, d *Decoder) { + if v, changed := f.DecMapIntBoolV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntBoolV(v map[int]bool, canChange bool, + d *Decoder) (_ map[int]bool, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[int]bool, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int + var mv bool + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int(dd.DecodeInt(intBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = false + } + continue + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt8IntfR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int8]interface{}) + if v, changed := fastpathTV.DecMapInt8IntfV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt8IntfV(rv2i(rv).(map[int8]interface{}), false, d) +} +func (f fastpathT) DecMapInt8IntfX(vp *map[int8]interface{}, d *Decoder) { + if v, changed := f.DecMapInt8IntfV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8IntfV(v map[int8]interface{}, canChange bool, + d *Decoder) (_ map[int8]interface{}, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 17) + v = make(map[int8]interface{}, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + var mk int8 + var mv interface{} + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int8(dd.DecodeInt(8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = nil + } + continue + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt8StringR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int8]string) + if v, changed := fastpathTV.DecMapInt8StringV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt8StringV(rv2i(rv).(map[int8]string), false, d) +} +func (f fastpathT) DecMapInt8StringX(vp *map[int8]string, d *Decoder) { + if v, changed := f.DecMapInt8StringV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8StringV(v map[int8]string, canChange bool, + d *Decoder) (_ map[int8]string, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 17) + v = make(map[int8]string, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int8 + var mv string + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int8(dd.DecodeInt(8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = "" + } + continue + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt8UintR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int8]uint) + if v, changed := fastpathTV.DecMapInt8UintV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt8UintV(rv2i(rv).(map[int8]uint), false, d) +} +func (f fastpathT) DecMapInt8UintX(vp *map[int8]uint, d *Decoder) { + if v, changed := f.DecMapInt8UintV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8UintV(v map[int8]uint, canChange bool, + d *Decoder) (_ map[int8]uint, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[int8]uint, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int8 + var mv uint + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int8(dd.DecodeInt(8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt8Uint8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int8]uint8) + if v, changed := fastpathTV.DecMapInt8Uint8V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt8Uint8V(rv2i(rv).(map[int8]uint8), false, d) +} +func (f fastpathT) DecMapInt8Uint8X(vp *map[int8]uint8, d *Decoder) { + if v, changed := f.DecMapInt8Uint8V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8Uint8V(v map[int8]uint8, canChange bool, + d *Decoder) (_ map[int8]uint8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 2) + v = make(map[int8]uint8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int8 + var mv uint8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int8(dd.DecodeInt(8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt8Uint16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int8]uint16) + if v, changed := fastpathTV.DecMapInt8Uint16V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt8Uint16V(rv2i(rv).(map[int8]uint16), false, d) +} +func (f fastpathT) DecMapInt8Uint16X(vp *map[int8]uint16, d *Decoder) { + if v, changed := f.DecMapInt8Uint16V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8Uint16V(v map[int8]uint16, canChange bool, + d *Decoder) (_ map[int8]uint16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 3) + v = make(map[int8]uint16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int8 + var mv uint16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int8(dd.DecodeInt(8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt8Uint32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int8]uint32) + if v, changed := fastpathTV.DecMapInt8Uint32V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt8Uint32V(rv2i(rv).(map[int8]uint32), false, d) +} +func (f fastpathT) DecMapInt8Uint32X(vp *map[int8]uint32, d *Decoder) { + if v, changed := f.DecMapInt8Uint32V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8Uint32V(v map[int8]uint32, canChange bool, + d *Decoder) (_ map[int8]uint32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[int8]uint32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int8 + var mv uint32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int8(dd.DecodeInt(8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt8Uint64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int8]uint64) + if v, changed := fastpathTV.DecMapInt8Uint64V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt8Uint64V(rv2i(rv).(map[int8]uint64), false, d) +} +func (f fastpathT) DecMapInt8Uint64X(vp *map[int8]uint64, d *Decoder) { + if v, changed := f.DecMapInt8Uint64V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8Uint64V(v map[int8]uint64, canChange bool, + d *Decoder) (_ map[int8]uint64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[int8]uint64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int8 + var mv uint64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int8(dd.DecodeInt(8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt8UintptrR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int8]uintptr) + if v, changed := fastpathTV.DecMapInt8UintptrV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt8UintptrV(rv2i(rv).(map[int8]uintptr), false, d) +} +func (f fastpathT) DecMapInt8UintptrX(vp *map[int8]uintptr, d *Decoder) { + if v, changed := f.DecMapInt8UintptrV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8UintptrV(v map[int8]uintptr, canChange bool, + d *Decoder) (_ map[int8]uintptr, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[int8]uintptr, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int8 + var mv uintptr + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int8(dd.DecodeInt(8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt8IntR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int8]int) + if v, changed := fastpathTV.DecMapInt8IntV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt8IntV(rv2i(rv).(map[int8]int), false, d) +} +func (f fastpathT) DecMapInt8IntX(vp *map[int8]int, d *Decoder) { + if v, changed := f.DecMapInt8IntV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8IntV(v map[int8]int, canChange bool, + d *Decoder) (_ map[int8]int, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[int8]int, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int8 + var mv int + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int8(dd.DecodeInt(8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt8Int8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int8]int8) + if v, changed := fastpathTV.DecMapInt8Int8V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt8Int8V(rv2i(rv).(map[int8]int8), false, d) +} +func (f fastpathT) DecMapInt8Int8X(vp *map[int8]int8, d *Decoder) { + if v, changed := f.DecMapInt8Int8V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8Int8V(v map[int8]int8, canChange bool, + d *Decoder) (_ map[int8]int8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 2) + v = make(map[int8]int8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int8 + var mv int8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int8(dd.DecodeInt(8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt8Int16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int8]int16) + if v, changed := fastpathTV.DecMapInt8Int16V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt8Int16V(rv2i(rv).(map[int8]int16), false, d) +} +func (f fastpathT) DecMapInt8Int16X(vp *map[int8]int16, d *Decoder) { + if v, changed := f.DecMapInt8Int16V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8Int16V(v map[int8]int16, canChange bool, + d *Decoder) (_ map[int8]int16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 3) + v = make(map[int8]int16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int8 + var mv int16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int8(dd.DecodeInt(8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt8Int32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int8]int32) + if v, changed := fastpathTV.DecMapInt8Int32V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt8Int32V(rv2i(rv).(map[int8]int32), false, d) +} +func (f fastpathT) DecMapInt8Int32X(vp *map[int8]int32, d *Decoder) { + if v, changed := f.DecMapInt8Int32V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8Int32V(v map[int8]int32, canChange bool, + d *Decoder) (_ map[int8]int32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[int8]int32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int8 + var mv int32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int8(dd.DecodeInt(8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt8Int64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int8]int64) + if v, changed := fastpathTV.DecMapInt8Int64V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt8Int64V(rv2i(rv).(map[int8]int64), false, d) +} +func (f fastpathT) DecMapInt8Int64X(vp *map[int8]int64, d *Decoder) { + if v, changed := f.DecMapInt8Int64V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8Int64V(v map[int8]int64, canChange bool, + d *Decoder) (_ map[int8]int64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[int8]int64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int8 + var mv int64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int8(dd.DecodeInt(8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt8Float32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int8]float32) + if v, changed := fastpathTV.DecMapInt8Float32V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt8Float32V(rv2i(rv).(map[int8]float32), false, d) +} +func (f fastpathT) DecMapInt8Float32X(vp *map[int8]float32, d *Decoder) { + if v, changed := f.DecMapInt8Float32V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8Float32V(v map[int8]float32, canChange bool, + d *Decoder) (_ map[int8]float32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[int8]float32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int8 + var mv float32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int8(dd.DecodeInt(8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt8Float64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int8]float64) + if v, changed := fastpathTV.DecMapInt8Float64V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt8Float64V(rv2i(rv).(map[int8]float64), false, d) +} +func (f fastpathT) DecMapInt8Float64X(vp *map[int8]float64, d *Decoder) { + if v, changed := f.DecMapInt8Float64V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8Float64V(v map[int8]float64, canChange bool, + d *Decoder) (_ map[int8]float64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[int8]float64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int8 + var mv float64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int8(dd.DecodeInt(8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt8BoolR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int8]bool) + if v, changed := fastpathTV.DecMapInt8BoolV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt8BoolV(rv2i(rv).(map[int8]bool), false, d) +} +func (f fastpathT) DecMapInt8BoolX(vp *map[int8]bool, d *Decoder) { + if v, changed := f.DecMapInt8BoolV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8BoolV(v map[int8]bool, canChange bool, + d *Decoder) (_ map[int8]bool, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 2) + v = make(map[int8]bool, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int8 + var mv bool + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int8(dd.DecodeInt(8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = false + } + continue + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt16IntfR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int16]interface{}) + if v, changed := fastpathTV.DecMapInt16IntfV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt16IntfV(rv2i(rv).(map[int16]interface{}), false, d) +} +func (f fastpathT) DecMapInt16IntfX(vp *map[int16]interface{}, d *Decoder) { + if v, changed := f.DecMapInt16IntfV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16IntfV(v map[int16]interface{}, canChange bool, + d *Decoder) (_ map[int16]interface{}, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 18) + v = make(map[int16]interface{}, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + var mk int16 + var mv interface{} + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int16(dd.DecodeInt(16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = nil + } + continue + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt16StringR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int16]string) + if v, changed := fastpathTV.DecMapInt16StringV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt16StringV(rv2i(rv).(map[int16]string), false, d) +} +func (f fastpathT) DecMapInt16StringX(vp *map[int16]string, d *Decoder) { + if v, changed := f.DecMapInt16StringV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16StringV(v map[int16]string, canChange bool, + d *Decoder) (_ map[int16]string, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 18) + v = make(map[int16]string, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int16 + var mv string + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int16(dd.DecodeInt(16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = "" + } + continue + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt16UintR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int16]uint) + if v, changed := fastpathTV.DecMapInt16UintV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt16UintV(rv2i(rv).(map[int16]uint), false, d) +} +func (f fastpathT) DecMapInt16UintX(vp *map[int16]uint, d *Decoder) { + if v, changed := f.DecMapInt16UintV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16UintV(v map[int16]uint, canChange bool, + d *Decoder) (_ map[int16]uint, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[int16]uint, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int16 + var mv uint + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int16(dd.DecodeInt(16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt16Uint8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int16]uint8) + if v, changed := fastpathTV.DecMapInt16Uint8V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt16Uint8V(rv2i(rv).(map[int16]uint8), false, d) +} +func (f fastpathT) DecMapInt16Uint8X(vp *map[int16]uint8, d *Decoder) { + if v, changed := f.DecMapInt16Uint8V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16Uint8V(v map[int16]uint8, canChange bool, + d *Decoder) (_ map[int16]uint8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 3) + v = make(map[int16]uint8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int16 + var mv uint8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int16(dd.DecodeInt(16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt16Uint16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int16]uint16) + if v, changed := fastpathTV.DecMapInt16Uint16V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt16Uint16V(rv2i(rv).(map[int16]uint16), false, d) +} +func (f fastpathT) DecMapInt16Uint16X(vp *map[int16]uint16, d *Decoder) { + if v, changed := f.DecMapInt16Uint16V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16Uint16V(v map[int16]uint16, canChange bool, + d *Decoder) (_ map[int16]uint16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 4) + v = make(map[int16]uint16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int16 + var mv uint16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int16(dd.DecodeInt(16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt16Uint32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int16]uint32) + if v, changed := fastpathTV.DecMapInt16Uint32V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt16Uint32V(rv2i(rv).(map[int16]uint32), false, d) +} +func (f fastpathT) DecMapInt16Uint32X(vp *map[int16]uint32, d *Decoder) { + if v, changed := f.DecMapInt16Uint32V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16Uint32V(v map[int16]uint32, canChange bool, + d *Decoder) (_ map[int16]uint32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 6) + v = make(map[int16]uint32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int16 + var mv uint32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int16(dd.DecodeInt(16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt16Uint64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int16]uint64) + if v, changed := fastpathTV.DecMapInt16Uint64V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt16Uint64V(rv2i(rv).(map[int16]uint64), false, d) +} +func (f fastpathT) DecMapInt16Uint64X(vp *map[int16]uint64, d *Decoder) { + if v, changed := f.DecMapInt16Uint64V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16Uint64V(v map[int16]uint64, canChange bool, + d *Decoder) (_ map[int16]uint64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[int16]uint64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int16 + var mv uint64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int16(dd.DecodeInt(16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt16UintptrR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int16]uintptr) + if v, changed := fastpathTV.DecMapInt16UintptrV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt16UintptrV(rv2i(rv).(map[int16]uintptr), false, d) +} +func (f fastpathT) DecMapInt16UintptrX(vp *map[int16]uintptr, d *Decoder) { + if v, changed := f.DecMapInt16UintptrV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16UintptrV(v map[int16]uintptr, canChange bool, + d *Decoder) (_ map[int16]uintptr, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[int16]uintptr, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int16 + var mv uintptr + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int16(dd.DecodeInt(16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt16IntR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int16]int) + if v, changed := fastpathTV.DecMapInt16IntV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt16IntV(rv2i(rv).(map[int16]int), false, d) +} +func (f fastpathT) DecMapInt16IntX(vp *map[int16]int, d *Decoder) { + if v, changed := f.DecMapInt16IntV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16IntV(v map[int16]int, canChange bool, + d *Decoder) (_ map[int16]int, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[int16]int, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int16 + var mv int + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int16(dd.DecodeInt(16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt16Int8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int16]int8) + if v, changed := fastpathTV.DecMapInt16Int8V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt16Int8V(rv2i(rv).(map[int16]int8), false, d) +} +func (f fastpathT) DecMapInt16Int8X(vp *map[int16]int8, d *Decoder) { + if v, changed := f.DecMapInt16Int8V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16Int8V(v map[int16]int8, canChange bool, + d *Decoder) (_ map[int16]int8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 3) + v = make(map[int16]int8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int16 + var mv int8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int16(dd.DecodeInt(16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt16Int16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int16]int16) + if v, changed := fastpathTV.DecMapInt16Int16V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt16Int16V(rv2i(rv).(map[int16]int16), false, d) +} +func (f fastpathT) DecMapInt16Int16X(vp *map[int16]int16, d *Decoder) { + if v, changed := f.DecMapInt16Int16V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16Int16V(v map[int16]int16, canChange bool, + d *Decoder) (_ map[int16]int16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 4) + v = make(map[int16]int16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int16 + var mv int16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int16(dd.DecodeInt(16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt16Int32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int16]int32) + if v, changed := fastpathTV.DecMapInt16Int32V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt16Int32V(rv2i(rv).(map[int16]int32), false, d) +} +func (f fastpathT) DecMapInt16Int32X(vp *map[int16]int32, d *Decoder) { + if v, changed := f.DecMapInt16Int32V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16Int32V(v map[int16]int32, canChange bool, + d *Decoder) (_ map[int16]int32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 6) + v = make(map[int16]int32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int16 + var mv int32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int16(dd.DecodeInt(16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt16Int64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int16]int64) + if v, changed := fastpathTV.DecMapInt16Int64V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt16Int64V(rv2i(rv).(map[int16]int64), false, d) +} +func (f fastpathT) DecMapInt16Int64X(vp *map[int16]int64, d *Decoder) { + if v, changed := f.DecMapInt16Int64V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16Int64V(v map[int16]int64, canChange bool, + d *Decoder) (_ map[int16]int64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[int16]int64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int16 + var mv int64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int16(dd.DecodeInt(16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt16Float32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int16]float32) + if v, changed := fastpathTV.DecMapInt16Float32V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt16Float32V(rv2i(rv).(map[int16]float32), false, d) +} +func (f fastpathT) DecMapInt16Float32X(vp *map[int16]float32, d *Decoder) { + if v, changed := f.DecMapInt16Float32V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16Float32V(v map[int16]float32, canChange bool, + d *Decoder) (_ map[int16]float32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 6) + v = make(map[int16]float32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int16 + var mv float32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int16(dd.DecodeInt(16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt16Float64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int16]float64) + if v, changed := fastpathTV.DecMapInt16Float64V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt16Float64V(rv2i(rv).(map[int16]float64), false, d) +} +func (f fastpathT) DecMapInt16Float64X(vp *map[int16]float64, d *Decoder) { + if v, changed := f.DecMapInt16Float64V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16Float64V(v map[int16]float64, canChange bool, + d *Decoder) (_ map[int16]float64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[int16]float64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int16 + var mv float64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int16(dd.DecodeInt(16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt16BoolR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int16]bool) + if v, changed := fastpathTV.DecMapInt16BoolV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt16BoolV(rv2i(rv).(map[int16]bool), false, d) +} +func (f fastpathT) DecMapInt16BoolX(vp *map[int16]bool, d *Decoder) { + if v, changed := f.DecMapInt16BoolV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16BoolV(v map[int16]bool, canChange bool, + d *Decoder) (_ map[int16]bool, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 3) + v = make(map[int16]bool, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int16 + var mv bool + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int16(dd.DecodeInt(16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = false + } + continue + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt32IntfR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int32]interface{}) + if v, changed := fastpathTV.DecMapInt32IntfV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt32IntfV(rv2i(rv).(map[int32]interface{}), false, d) +} +func (f fastpathT) DecMapInt32IntfX(vp *map[int32]interface{}, d *Decoder) { + if v, changed := f.DecMapInt32IntfV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32IntfV(v map[int32]interface{}, canChange bool, + d *Decoder) (_ map[int32]interface{}, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 20) + v = make(map[int32]interface{}, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + var mk int32 + var mv interface{} + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int32(dd.DecodeInt(32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = nil + } + continue + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt32StringR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int32]string) + if v, changed := fastpathTV.DecMapInt32StringV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt32StringV(rv2i(rv).(map[int32]string), false, d) +} +func (f fastpathT) DecMapInt32StringX(vp *map[int32]string, d *Decoder) { + if v, changed := f.DecMapInt32StringV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32StringV(v map[int32]string, canChange bool, + d *Decoder) (_ map[int32]string, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 20) + v = make(map[int32]string, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int32 + var mv string + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int32(dd.DecodeInt(32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = "" + } + continue + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt32UintR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int32]uint) + if v, changed := fastpathTV.DecMapInt32UintV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt32UintV(rv2i(rv).(map[int32]uint), false, d) +} +func (f fastpathT) DecMapInt32UintX(vp *map[int32]uint, d *Decoder) { + if v, changed := f.DecMapInt32UintV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32UintV(v map[int32]uint, canChange bool, + d *Decoder) (_ map[int32]uint, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[int32]uint, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int32 + var mv uint + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int32(dd.DecodeInt(32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt32Uint8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int32]uint8) + if v, changed := fastpathTV.DecMapInt32Uint8V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt32Uint8V(rv2i(rv).(map[int32]uint8), false, d) +} +func (f fastpathT) DecMapInt32Uint8X(vp *map[int32]uint8, d *Decoder) { + if v, changed := f.DecMapInt32Uint8V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32Uint8V(v map[int32]uint8, canChange bool, + d *Decoder) (_ map[int32]uint8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[int32]uint8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int32 + var mv uint8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int32(dd.DecodeInt(32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt32Uint16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int32]uint16) + if v, changed := fastpathTV.DecMapInt32Uint16V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt32Uint16V(rv2i(rv).(map[int32]uint16), false, d) +} +func (f fastpathT) DecMapInt32Uint16X(vp *map[int32]uint16, d *Decoder) { + if v, changed := f.DecMapInt32Uint16V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32Uint16V(v map[int32]uint16, canChange bool, + d *Decoder) (_ map[int32]uint16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 6) + v = make(map[int32]uint16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int32 + var mv uint16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int32(dd.DecodeInt(32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt32Uint32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int32]uint32) + if v, changed := fastpathTV.DecMapInt32Uint32V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt32Uint32V(rv2i(rv).(map[int32]uint32), false, d) +} +func (f fastpathT) DecMapInt32Uint32X(vp *map[int32]uint32, d *Decoder) { + if v, changed := f.DecMapInt32Uint32V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32Uint32V(v map[int32]uint32, canChange bool, + d *Decoder) (_ map[int32]uint32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 8) + v = make(map[int32]uint32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int32 + var mv uint32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int32(dd.DecodeInt(32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt32Uint64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int32]uint64) + if v, changed := fastpathTV.DecMapInt32Uint64V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt32Uint64V(rv2i(rv).(map[int32]uint64), false, d) +} +func (f fastpathT) DecMapInt32Uint64X(vp *map[int32]uint64, d *Decoder) { + if v, changed := f.DecMapInt32Uint64V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32Uint64V(v map[int32]uint64, canChange bool, + d *Decoder) (_ map[int32]uint64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[int32]uint64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int32 + var mv uint64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int32(dd.DecodeInt(32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt32UintptrR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int32]uintptr) + if v, changed := fastpathTV.DecMapInt32UintptrV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt32UintptrV(rv2i(rv).(map[int32]uintptr), false, d) +} +func (f fastpathT) DecMapInt32UintptrX(vp *map[int32]uintptr, d *Decoder) { + if v, changed := f.DecMapInt32UintptrV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32UintptrV(v map[int32]uintptr, canChange bool, + d *Decoder) (_ map[int32]uintptr, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[int32]uintptr, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int32 + var mv uintptr + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int32(dd.DecodeInt(32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt32IntR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int32]int) + if v, changed := fastpathTV.DecMapInt32IntV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt32IntV(rv2i(rv).(map[int32]int), false, d) +} +func (f fastpathT) DecMapInt32IntX(vp *map[int32]int, d *Decoder) { + if v, changed := f.DecMapInt32IntV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32IntV(v map[int32]int, canChange bool, + d *Decoder) (_ map[int32]int, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[int32]int, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int32 + var mv int + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int32(dd.DecodeInt(32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt32Int8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int32]int8) + if v, changed := fastpathTV.DecMapInt32Int8V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt32Int8V(rv2i(rv).(map[int32]int8), false, d) +} +func (f fastpathT) DecMapInt32Int8X(vp *map[int32]int8, d *Decoder) { + if v, changed := f.DecMapInt32Int8V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32Int8V(v map[int32]int8, canChange bool, + d *Decoder) (_ map[int32]int8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[int32]int8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int32 + var mv int8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int32(dd.DecodeInt(32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt32Int16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int32]int16) + if v, changed := fastpathTV.DecMapInt32Int16V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt32Int16V(rv2i(rv).(map[int32]int16), false, d) +} +func (f fastpathT) DecMapInt32Int16X(vp *map[int32]int16, d *Decoder) { + if v, changed := f.DecMapInt32Int16V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32Int16V(v map[int32]int16, canChange bool, + d *Decoder) (_ map[int32]int16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 6) + v = make(map[int32]int16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int32 + var mv int16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int32(dd.DecodeInt(32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt32Int32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int32]int32) + if v, changed := fastpathTV.DecMapInt32Int32V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt32Int32V(rv2i(rv).(map[int32]int32), false, d) +} +func (f fastpathT) DecMapInt32Int32X(vp *map[int32]int32, d *Decoder) { + if v, changed := f.DecMapInt32Int32V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32Int32V(v map[int32]int32, canChange bool, + d *Decoder) (_ map[int32]int32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 8) + v = make(map[int32]int32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int32 + var mv int32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int32(dd.DecodeInt(32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt32Int64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int32]int64) + if v, changed := fastpathTV.DecMapInt32Int64V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt32Int64V(rv2i(rv).(map[int32]int64), false, d) +} +func (f fastpathT) DecMapInt32Int64X(vp *map[int32]int64, d *Decoder) { + if v, changed := f.DecMapInt32Int64V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32Int64V(v map[int32]int64, canChange bool, + d *Decoder) (_ map[int32]int64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[int32]int64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int32 + var mv int64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int32(dd.DecodeInt(32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt32Float32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int32]float32) + if v, changed := fastpathTV.DecMapInt32Float32V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt32Float32V(rv2i(rv).(map[int32]float32), false, d) +} +func (f fastpathT) DecMapInt32Float32X(vp *map[int32]float32, d *Decoder) { + if v, changed := f.DecMapInt32Float32V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32Float32V(v map[int32]float32, canChange bool, + d *Decoder) (_ map[int32]float32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 8) + v = make(map[int32]float32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int32 + var mv float32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int32(dd.DecodeInt(32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt32Float64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int32]float64) + if v, changed := fastpathTV.DecMapInt32Float64V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt32Float64V(rv2i(rv).(map[int32]float64), false, d) +} +func (f fastpathT) DecMapInt32Float64X(vp *map[int32]float64, d *Decoder) { + if v, changed := f.DecMapInt32Float64V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32Float64V(v map[int32]float64, canChange bool, + d *Decoder) (_ map[int32]float64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[int32]float64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int32 + var mv float64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int32(dd.DecodeInt(32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt32BoolR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int32]bool) + if v, changed := fastpathTV.DecMapInt32BoolV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt32BoolV(rv2i(rv).(map[int32]bool), false, d) +} +func (f fastpathT) DecMapInt32BoolX(vp *map[int32]bool, d *Decoder) { + if v, changed := f.DecMapInt32BoolV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32BoolV(v map[int32]bool, canChange bool, + d *Decoder) (_ map[int32]bool, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[int32]bool, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int32 + var mv bool + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int32(dd.DecodeInt(32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = false + } + continue + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt64IntfR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int64]interface{}) + if v, changed := fastpathTV.DecMapInt64IntfV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt64IntfV(rv2i(rv).(map[int64]interface{}), false, d) +} +func (f fastpathT) DecMapInt64IntfX(vp *map[int64]interface{}, d *Decoder) { + if v, changed := f.DecMapInt64IntfV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64IntfV(v map[int64]interface{}, canChange bool, + d *Decoder) (_ map[int64]interface{}, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[int64]interface{}, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + var mk int64 + var mv interface{} + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeInt(64) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = nil + } + continue + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt64StringR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int64]string) + if v, changed := fastpathTV.DecMapInt64StringV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt64StringV(rv2i(rv).(map[int64]string), false, d) +} +func (f fastpathT) DecMapInt64StringX(vp *map[int64]string, d *Decoder) { + if v, changed := f.DecMapInt64StringV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64StringV(v map[int64]string, canChange bool, + d *Decoder) (_ map[int64]string, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[int64]string, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int64 + var mv string + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeInt(64) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = "" + } + continue + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt64UintR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int64]uint) + if v, changed := fastpathTV.DecMapInt64UintV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt64UintV(rv2i(rv).(map[int64]uint), false, d) +} +func (f fastpathT) DecMapInt64UintX(vp *map[int64]uint, d *Decoder) { + if v, changed := f.DecMapInt64UintV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64UintV(v map[int64]uint, canChange bool, + d *Decoder) (_ map[int64]uint, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[int64]uint, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int64 + var mv uint + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeInt(64) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt64Uint8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int64]uint8) + if v, changed := fastpathTV.DecMapInt64Uint8V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt64Uint8V(rv2i(rv).(map[int64]uint8), false, d) +} +func (f fastpathT) DecMapInt64Uint8X(vp *map[int64]uint8, d *Decoder) { + if v, changed := f.DecMapInt64Uint8V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64Uint8V(v map[int64]uint8, canChange bool, + d *Decoder) (_ map[int64]uint8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[int64]uint8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int64 + var mv uint8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeInt(64) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt64Uint16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int64]uint16) + if v, changed := fastpathTV.DecMapInt64Uint16V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt64Uint16V(rv2i(rv).(map[int64]uint16), false, d) +} +func (f fastpathT) DecMapInt64Uint16X(vp *map[int64]uint16, d *Decoder) { + if v, changed := f.DecMapInt64Uint16V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64Uint16V(v map[int64]uint16, canChange bool, + d *Decoder) (_ map[int64]uint16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[int64]uint16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int64 + var mv uint16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeInt(64) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt64Uint32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int64]uint32) + if v, changed := fastpathTV.DecMapInt64Uint32V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt64Uint32V(rv2i(rv).(map[int64]uint32), false, d) +} +func (f fastpathT) DecMapInt64Uint32X(vp *map[int64]uint32, d *Decoder) { + if v, changed := f.DecMapInt64Uint32V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64Uint32V(v map[int64]uint32, canChange bool, + d *Decoder) (_ map[int64]uint32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[int64]uint32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int64 + var mv uint32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeInt(64) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt64Uint64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int64]uint64) + if v, changed := fastpathTV.DecMapInt64Uint64V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt64Uint64V(rv2i(rv).(map[int64]uint64), false, d) +} +func (f fastpathT) DecMapInt64Uint64X(vp *map[int64]uint64, d *Decoder) { + if v, changed := f.DecMapInt64Uint64V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64Uint64V(v map[int64]uint64, canChange bool, + d *Decoder) (_ map[int64]uint64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[int64]uint64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int64 + var mv uint64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeInt(64) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt64UintptrR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int64]uintptr) + if v, changed := fastpathTV.DecMapInt64UintptrV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt64UintptrV(rv2i(rv).(map[int64]uintptr), false, d) +} +func (f fastpathT) DecMapInt64UintptrX(vp *map[int64]uintptr, d *Decoder) { + if v, changed := f.DecMapInt64UintptrV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64UintptrV(v map[int64]uintptr, canChange bool, + d *Decoder) (_ map[int64]uintptr, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[int64]uintptr, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int64 + var mv uintptr + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeInt(64) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt64IntR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int64]int) + if v, changed := fastpathTV.DecMapInt64IntV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt64IntV(rv2i(rv).(map[int64]int), false, d) +} +func (f fastpathT) DecMapInt64IntX(vp *map[int64]int, d *Decoder) { + if v, changed := f.DecMapInt64IntV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64IntV(v map[int64]int, canChange bool, + d *Decoder) (_ map[int64]int, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[int64]int, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int64 + var mv int + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeInt(64) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt64Int8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int64]int8) + if v, changed := fastpathTV.DecMapInt64Int8V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt64Int8V(rv2i(rv).(map[int64]int8), false, d) +} +func (f fastpathT) DecMapInt64Int8X(vp *map[int64]int8, d *Decoder) { + if v, changed := f.DecMapInt64Int8V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64Int8V(v map[int64]int8, canChange bool, + d *Decoder) (_ map[int64]int8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[int64]int8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int64 + var mv int8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeInt(64) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt64Int16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int64]int16) + if v, changed := fastpathTV.DecMapInt64Int16V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt64Int16V(rv2i(rv).(map[int64]int16), false, d) +} +func (f fastpathT) DecMapInt64Int16X(vp *map[int64]int16, d *Decoder) { + if v, changed := f.DecMapInt64Int16V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64Int16V(v map[int64]int16, canChange bool, + d *Decoder) (_ map[int64]int16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[int64]int16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int64 + var mv int16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeInt(64) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt64Int32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int64]int32) + if v, changed := fastpathTV.DecMapInt64Int32V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt64Int32V(rv2i(rv).(map[int64]int32), false, d) +} +func (f fastpathT) DecMapInt64Int32X(vp *map[int64]int32, d *Decoder) { + if v, changed := f.DecMapInt64Int32V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64Int32V(v map[int64]int32, canChange bool, + d *Decoder) (_ map[int64]int32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[int64]int32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int64 + var mv int32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeInt(64) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt64Int64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int64]int64) + if v, changed := fastpathTV.DecMapInt64Int64V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt64Int64V(rv2i(rv).(map[int64]int64), false, d) +} +func (f fastpathT) DecMapInt64Int64X(vp *map[int64]int64, d *Decoder) { + if v, changed := f.DecMapInt64Int64V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64Int64V(v map[int64]int64, canChange bool, + d *Decoder) (_ map[int64]int64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[int64]int64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int64 + var mv int64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeInt(64) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt64Float32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int64]float32) + if v, changed := fastpathTV.DecMapInt64Float32V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt64Float32V(rv2i(rv).(map[int64]float32), false, d) +} +func (f fastpathT) DecMapInt64Float32X(vp *map[int64]float32, d *Decoder) { + if v, changed := f.DecMapInt64Float32V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64Float32V(v map[int64]float32, canChange bool, + d *Decoder) (_ map[int64]float32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[int64]float32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int64 + var mv float32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeInt(64) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt64Float64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int64]float64) + if v, changed := fastpathTV.DecMapInt64Float64V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt64Float64V(rv2i(rv).(map[int64]float64), false, d) +} +func (f fastpathT) DecMapInt64Float64X(vp *map[int64]float64, d *Decoder) { + if v, changed := f.DecMapInt64Float64V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64Float64V(v map[int64]float64, canChange bool, + d *Decoder) (_ map[int64]float64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[int64]float64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int64 + var mv float64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeInt(64) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt64BoolR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int64]bool) + if v, changed := fastpathTV.DecMapInt64BoolV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt64BoolV(rv2i(rv).(map[int64]bool), false, d) +} +func (f fastpathT) DecMapInt64BoolX(vp *map[int64]bool, d *Decoder) { + if v, changed := f.DecMapInt64BoolV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64BoolV(v map[int64]bool, canChange bool, + d *Decoder) (_ map[int64]bool, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[int64]bool, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int64 + var mv bool + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeInt(64) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = false + } + continue + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapBoolIntfR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[bool]interface{}) + if v, changed := fastpathTV.DecMapBoolIntfV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapBoolIntfV(rv2i(rv).(map[bool]interface{}), false, d) +} +func (f fastpathT) DecMapBoolIntfX(vp *map[bool]interface{}, d *Decoder) { + if v, changed := f.DecMapBoolIntfV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolIntfV(v map[bool]interface{}, canChange bool, + d *Decoder) (_ map[bool]interface{}, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 17) + v = make(map[bool]interface{}, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + var mk bool + var mv interface{} + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeBool() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = nil + } + continue + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapBoolStringR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[bool]string) + if v, changed := fastpathTV.DecMapBoolStringV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapBoolStringV(rv2i(rv).(map[bool]string), false, d) +} +func (f fastpathT) DecMapBoolStringX(vp *map[bool]string, d *Decoder) { + if v, changed := f.DecMapBoolStringV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolStringV(v map[bool]string, canChange bool, + d *Decoder) (_ map[bool]string, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 17) + v = make(map[bool]string, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk bool + var mv string + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeBool() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = "" + } + continue + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapBoolUintR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[bool]uint) + if v, changed := fastpathTV.DecMapBoolUintV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapBoolUintV(rv2i(rv).(map[bool]uint), false, d) +} +func (f fastpathT) DecMapBoolUintX(vp *map[bool]uint, d *Decoder) { + if v, changed := f.DecMapBoolUintV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolUintV(v map[bool]uint, canChange bool, + d *Decoder) (_ map[bool]uint, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[bool]uint, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk bool + var mv uint + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeBool() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapBoolUint8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[bool]uint8) + if v, changed := fastpathTV.DecMapBoolUint8V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapBoolUint8V(rv2i(rv).(map[bool]uint8), false, d) +} +func (f fastpathT) DecMapBoolUint8X(vp *map[bool]uint8, d *Decoder) { + if v, changed := f.DecMapBoolUint8V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolUint8V(v map[bool]uint8, canChange bool, + d *Decoder) (_ map[bool]uint8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 2) + v = make(map[bool]uint8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk bool + var mv uint8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeBool() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapBoolUint16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[bool]uint16) + if v, changed := fastpathTV.DecMapBoolUint16V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapBoolUint16V(rv2i(rv).(map[bool]uint16), false, d) +} +func (f fastpathT) DecMapBoolUint16X(vp *map[bool]uint16, d *Decoder) { + if v, changed := f.DecMapBoolUint16V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolUint16V(v map[bool]uint16, canChange bool, + d *Decoder) (_ map[bool]uint16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 3) + v = make(map[bool]uint16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk bool + var mv uint16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeBool() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapBoolUint32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[bool]uint32) + if v, changed := fastpathTV.DecMapBoolUint32V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapBoolUint32V(rv2i(rv).(map[bool]uint32), false, d) +} +func (f fastpathT) DecMapBoolUint32X(vp *map[bool]uint32, d *Decoder) { + if v, changed := f.DecMapBoolUint32V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolUint32V(v map[bool]uint32, canChange bool, + d *Decoder) (_ map[bool]uint32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[bool]uint32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk bool + var mv uint32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeBool() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapBoolUint64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[bool]uint64) + if v, changed := fastpathTV.DecMapBoolUint64V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapBoolUint64V(rv2i(rv).(map[bool]uint64), false, d) +} +func (f fastpathT) DecMapBoolUint64X(vp *map[bool]uint64, d *Decoder) { + if v, changed := f.DecMapBoolUint64V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolUint64V(v map[bool]uint64, canChange bool, + d *Decoder) (_ map[bool]uint64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[bool]uint64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk bool + var mv uint64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeBool() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapBoolUintptrR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[bool]uintptr) + if v, changed := fastpathTV.DecMapBoolUintptrV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapBoolUintptrV(rv2i(rv).(map[bool]uintptr), false, d) +} +func (f fastpathT) DecMapBoolUintptrX(vp *map[bool]uintptr, d *Decoder) { + if v, changed := f.DecMapBoolUintptrV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolUintptrV(v map[bool]uintptr, canChange bool, + d *Decoder) (_ map[bool]uintptr, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[bool]uintptr, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk bool + var mv uintptr + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeBool() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapBoolIntR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[bool]int) + if v, changed := fastpathTV.DecMapBoolIntV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapBoolIntV(rv2i(rv).(map[bool]int), false, d) +} +func (f fastpathT) DecMapBoolIntX(vp *map[bool]int, d *Decoder) { + if v, changed := f.DecMapBoolIntV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolIntV(v map[bool]int, canChange bool, + d *Decoder) (_ map[bool]int, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[bool]int, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk bool + var mv int + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeBool() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapBoolInt8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[bool]int8) + if v, changed := fastpathTV.DecMapBoolInt8V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapBoolInt8V(rv2i(rv).(map[bool]int8), false, d) +} +func (f fastpathT) DecMapBoolInt8X(vp *map[bool]int8, d *Decoder) { + if v, changed := f.DecMapBoolInt8V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolInt8V(v map[bool]int8, canChange bool, + d *Decoder) (_ map[bool]int8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 2) + v = make(map[bool]int8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk bool + var mv int8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeBool() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapBoolInt16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[bool]int16) + if v, changed := fastpathTV.DecMapBoolInt16V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapBoolInt16V(rv2i(rv).(map[bool]int16), false, d) +} +func (f fastpathT) DecMapBoolInt16X(vp *map[bool]int16, d *Decoder) { + if v, changed := f.DecMapBoolInt16V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolInt16V(v map[bool]int16, canChange bool, + d *Decoder) (_ map[bool]int16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 3) + v = make(map[bool]int16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk bool + var mv int16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeBool() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapBoolInt32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[bool]int32) + if v, changed := fastpathTV.DecMapBoolInt32V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapBoolInt32V(rv2i(rv).(map[bool]int32), false, d) +} +func (f fastpathT) DecMapBoolInt32X(vp *map[bool]int32, d *Decoder) { + if v, changed := f.DecMapBoolInt32V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolInt32V(v map[bool]int32, canChange bool, + d *Decoder) (_ map[bool]int32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[bool]int32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk bool + var mv int32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeBool() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapBoolInt64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[bool]int64) + if v, changed := fastpathTV.DecMapBoolInt64V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapBoolInt64V(rv2i(rv).(map[bool]int64), false, d) +} +func (f fastpathT) DecMapBoolInt64X(vp *map[bool]int64, d *Decoder) { + if v, changed := f.DecMapBoolInt64V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolInt64V(v map[bool]int64, canChange bool, + d *Decoder) (_ map[bool]int64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[bool]int64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk bool + var mv int64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeBool() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapBoolFloat32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[bool]float32) + if v, changed := fastpathTV.DecMapBoolFloat32V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapBoolFloat32V(rv2i(rv).(map[bool]float32), false, d) +} +func (f fastpathT) DecMapBoolFloat32X(vp *map[bool]float32, d *Decoder) { + if v, changed := f.DecMapBoolFloat32V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolFloat32V(v map[bool]float32, canChange bool, + d *Decoder) (_ map[bool]float32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[bool]float32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk bool + var mv float32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeBool() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapBoolFloat64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[bool]float64) + if v, changed := fastpathTV.DecMapBoolFloat64V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapBoolFloat64V(rv2i(rv).(map[bool]float64), false, d) +} +func (f fastpathT) DecMapBoolFloat64X(vp *map[bool]float64, d *Decoder) { + if v, changed := f.DecMapBoolFloat64V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolFloat64V(v map[bool]float64, canChange bool, + d *Decoder) (_ map[bool]float64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[bool]float64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk bool + var mv float64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeBool() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapBoolBoolR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[bool]bool) + if v, changed := fastpathTV.DecMapBoolBoolV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapBoolBoolV(rv2i(rv).(map[bool]bool), false, d) +} +func (f fastpathT) DecMapBoolBoolX(vp *map[bool]bool, d *Decoder) { + if v, changed := f.DecMapBoolBoolV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolBoolV(v map[bool]bool, canChange bool, + d *Decoder) (_ map[bool]bool, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 2) + v = make(map[bool]bool, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk bool + var mv bool + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeBool() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = false + } + continue + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} diff --git a/vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/fast-path.not.go b/vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/fast-path.not.go new file mode 100644 index 00000000..9573d64a --- /dev/null +++ b/vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/fast-path.not.go @@ -0,0 +1,35 @@ +// +build notfastpath + +package codec + +import "reflect" + +const fastpathEnabled = false + +// The generated fast-path code is very large, and adds a few seconds to the build time. +// This causes test execution, execution of small tools which use codec, etc +// to take a long time. +// +// To mitigate, we now support the notfastpath tag. +// This tag disables fastpath during build, allowing for faster build, test execution, +// short-program runs, etc. + +func fastpathDecodeTypeSwitch(iv interface{}, d *Decoder) bool { return false } +func fastpathEncodeTypeSwitch(iv interface{}, e *Encoder) bool { return false } +func fastpathEncodeTypeSwitchSlice(iv interface{}, e *Encoder) bool { return false } +func fastpathEncodeTypeSwitchMap(iv interface{}, e *Encoder) bool { return false } +func fastpathDecodeSetZeroTypeSwitch(iv interface{}, d *Decoder) bool { return false } + +type fastpathT struct{} +type fastpathE struct { + rtid uintptr + rt reflect.Type + encfn func(*Encoder, *codecFnInfo, reflect.Value) + decfn func(*Decoder, *codecFnInfo, reflect.Value) +} +type fastpathA [0]fastpathE + +func (x fastpathA) index(rtid uintptr) int { return -1 } + +var fastpathAV fastpathA +var fastpathTV fastpathT diff --git a/vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/gen-helper.generated.go b/vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/gen-helper.generated.go new file mode 100644 index 00000000..9293463d --- /dev/null +++ b/vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/gen-helper.generated.go @@ -0,0 +1,250 @@ +/* // +build ignore */ + +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// ************************************************************ +// DO NOT EDIT. +// THIS FILE IS AUTO-GENERATED from gen-helper.go.tmpl +// ************************************************************ + +package codec + +import ( + "encoding" + "reflect" +) + +// GenVersion is the current version of codecgen. +const GenVersion = 8 + +// This file is used to generate helper code for codecgen. +// The values here i.e. genHelper(En|De)coder are not to be used directly by +// library users. They WILL change continuously and without notice. +// +// To help enforce this, we create an unexported type with exported members. +// The only way to get the type is via the one exported type that we control (somewhat). +// +// When static codecs are created for types, they will use this value +// to perform encoding or decoding of primitives or known slice or map types. + +// GenHelperEncoder is exported so that it can be used externally by codecgen. +// +// Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE. +func GenHelperEncoder(e *Encoder) (genHelperEncoder, encDriver) { + return genHelperEncoder{e: e}, e.e +} + +// GenHelperDecoder is exported so that it can be used externally by codecgen. +// +// Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE. +func GenHelperDecoder(d *Decoder) (genHelperDecoder, decDriver) { + return genHelperDecoder{d: d}, d.d +} + +// Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE. +func BasicHandleDoNotUse(h Handle) *BasicHandle { + return h.getBasicHandle() +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +type genHelperEncoder struct { + e *Encoder + F fastpathT +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +type genHelperDecoder struct { + d *Decoder + F fastpathT +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncBasicHandle() *BasicHandle { + return f.e.h +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncBinary() bool { + return f.e.cf.be // f.e.hh.isBinaryEncoding() +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncFallback(iv interface{}) { + // println(">>>>>>>>> EncFallback") + // f.e.encodeI(iv, false, false) + f.e.encodeValue(reflect.ValueOf(iv), nil, false) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncTextMarshal(iv encoding.TextMarshaler) { + bs, fnerr := iv.MarshalText() + f.e.marshal(bs, fnerr, false, c_UTF8) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncJSONMarshal(iv jsonMarshaler) { + bs, fnerr := iv.MarshalJSON() + f.e.marshal(bs, fnerr, true, c_UTF8) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncBinaryMarshal(iv encoding.BinaryMarshaler) { + bs, fnerr := iv.MarshalBinary() + f.e.marshal(bs, fnerr, false, c_RAW) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncRaw(iv Raw) { + f.e.rawBytes(iv) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) TimeRtidIfBinc() uintptr { + if _, ok := f.e.hh.(*BincHandle); ok { + return timeTypId + } + return 0 +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) IsJSONHandle() bool { + return f.e.cf.js +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) HasExtensions() bool { + return len(f.e.h.extHandle) != 0 +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncExt(v interface{}) (r bool) { + rt := reflect.TypeOf(v) + if rt.Kind() == reflect.Ptr { + rt = rt.Elem() + } + rtid := rt2id(rt) + if xfFn := f.e.h.getExt(rtid); xfFn != nil { + f.e.e.EncodeExt(v, xfFn.tag, xfFn.ext, f.e) + return true + } + return false +} + +// ---------------- DECODER FOLLOWS ----------------- + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecBasicHandle() *BasicHandle { + return f.d.h +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecBinary() bool { + return f.d.be // f.d.hh.isBinaryEncoding() +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecSwallow() { + f.d.swallow() +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecScratchBuffer() []byte { + return f.d.b[:] +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecFallback(iv interface{}, chkPtr bool) { + // println(">>>>>>>>> DecFallback") + rv := reflect.ValueOf(iv) + if chkPtr { + rv = f.d.ensureDecodeable(rv) + } + f.d.decodeValue(rv, nil, false, false) + // f.d.decodeValueFallback(rv) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecSliceHelperStart() (decSliceHelper, int) { + return f.d.decSliceHelperStart() +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecStructFieldNotFound(index int, name string) { + f.d.structFieldNotFound(index, name) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecArrayCannotExpand(sliceLen, streamLen int) { + f.d.arrayCannotExpand(sliceLen, streamLen) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecTextUnmarshal(tm encoding.TextUnmarshaler) { + fnerr := tm.UnmarshalText(f.d.d.DecodeStringAsBytes()) + if fnerr != nil { + panic(fnerr) + } +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecJSONUnmarshal(tm jsonUnmarshaler) { + // bs := f.dd.DecodeStringAsBytes() + // grab the bytes to be read, as UnmarshalJSON needs the full JSON so as to unmarshal it itself. + fnerr := tm.UnmarshalJSON(f.d.nextValueBytes()) + if fnerr != nil { + panic(fnerr) + } +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecBinaryUnmarshal(bm encoding.BinaryUnmarshaler) { + fnerr := bm.UnmarshalBinary(f.d.d.DecodeBytes(nil, true)) + if fnerr != nil { + panic(fnerr) + } +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecRaw() []byte { + return f.d.rawBytes() +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) TimeRtidIfBinc() uintptr { + if _, ok := f.d.hh.(*BincHandle); ok { + return timeTypId + } + return 0 +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) IsJSONHandle() bool { + return f.d.js +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) HasExtensions() bool { + return len(f.d.h.extHandle) != 0 +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecExt(v interface{}) (r bool) { + rt := reflect.TypeOf(v).Elem() + rtid := rt2id(rt) + if xfFn := f.d.h.getExt(rtid); xfFn != nil { + f.d.d.DecodeExt(v, xfFn.tag, xfFn.ext) + return true + } + return false +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecInferLen(clen, maxlen, unit int) (rvlen int) { + return decInferLen(clen, maxlen, unit) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) StringView(v []byte) string { + return stringView(v) +} diff --git a/vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/gen.generated.go b/vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/gen.generated.go new file mode 100644 index 00000000..b50a6024 --- /dev/null +++ b/vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/gen.generated.go @@ -0,0 +1,132 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +// DO NOT EDIT. THIS FILE IS AUTO-GENERATED FROM gen-dec-(map|array).go.tmpl + +const genDecMapTmpl = ` +{{var "v"}} := *{{ .Varname }} +{{var "l"}} := r.ReadMapStart() +{{var "bh"}} := z.DecBasicHandle() +if {{var "v"}} == nil { + {{var "rl"}} := z.DecInferLen({{var "l"}}, {{var "bh"}}.MaxInitLen, {{ .Size }}) + {{var "v"}} = make(map[{{ .KTyp }}]{{ .Typ }}, {{var "rl"}}) + *{{ .Varname }} = {{var "v"}} +} +var {{var "mk"}} {{ .KTyp }} +var {{var "mv"}} {{ .Typ }} +var {{var "mg"}}, {{var "mdn"}} {{if decElemKindPtr}}, {{var "ms"}}, {{var "mok"}}{{end}} bool +if {{var "bh"}}.MapValueReset { + {{if decElemKindPtr}}{{var "mg"}} = true + {{else if decElemKindIntf}}if !{{var "bh"}}.InterfaceReset { {{var "mg"}} = true } + {{else if not decElemKindImmutable}}{{var "mg"}} = true + {{end}} } +if {{var "l"}} != 0 { +{{var "hl"}} := {{var "l"}} > 0 + for {{var "j"}} := 0; ({{var "hl"}} && {{var "j"}} < {{var "l"}}) || !({{var "hl"}} || r.CheckBreak()); {{var "j"}}++ { + r.ReadMapElemKey() {{/* z.DecSendContainerState(codecSelfer_containerMapKey{{ .Sfx }}) */}} + {{ $x := printf "%vmk%v" .TempVar .Rand }}{{ decLineVarK $x }} +{{ if eq .KTyp "interface{}" }}{{/* // special case if a byte array. */}}if {{var "bv"}}, {{var "bok"}} := {{var "mk"}}.([]byte); {{var "bok"}} { + {{var "mk"}} = string({{var "bv"}}) + }{{ end }}{{if decElemKindPtr}} + {{var "ms"}} = true{{end}} + if {{var "mg"}} { + {{if decElemKindPtr}}{{var "mv"}}, {{var "mok"}} = {{var "v"}}[{{var "mk"}}] + if {{var "mok"}} { + {{var "ms"}} = false + } {{else}}{{var "mv"}} = {{var "v"}}[{{var "mk"}}] {{end}} + } {{if not decElemKindImmutable}}else { {{var "mv"}} = {{decElemZero}} }{{end}} + r.ReadMapElemValue() {{/* z.DecSendContainerState(codecSelfer_containerMapValue{{ .Sfx }}) */}} + {{var "mdn"}} = false + {{ $x := printf "%vmv%v" .TempVar .Rand }}{{ $y := printf "%vmdn%v" .TempVar .Rand }}{{ decLineVar $x $y }} + if {{var "mdn"}} { + if {{ var "bh" }}.DeleteOnNilMapValue { delete({{var "v"}}, {{var "mk"}}) } else { {{var "v"}}[{{var "mk"}}] = {{decElemZero}} } + } else if {{if decElemKindPtr}} {{var "ms"}} && {{end}} {{var "v"}} != nil { + {{var "v"}}[{{var "mk"}}] = {{var "mv"}} + } +} +} // else len==0: TODO: Should we clear map entries? +r.ReadMapEnd() {{/* z.DecSendContainerState(codecSelfer_containerMapEnd{{ .Sfx }}) */}} +` + +const genDecListTmpl = ` +{{var "v"}} := {{if not isArray}}*{{end}}{{ .Varname }} +{{var "h"}}, {{var "l"}} := z.DecSliceHelperStart() {{/* // helper, containerLenS */}}{{if not isArray}} +var {{var "c"}} bool {{/* // changed */}} +_ = {{var "c"}}{{end}} +if {{var "l"}} == 0 { + {{if isSlice }}if {{var "v"}} == nil { + {{var "v"}} = []{{ .Typ }}{} + {{var "c"}} = true + } else if len({{var "v"}}) != 0 { + {{var "v"}} = {{var "v"}}[:0] + {{var "c"}} = true + } {{end}} {{if isChan }}if {{var "v"}} == nil { + {{var "v"}} = make({{ .CTyp }}, 0) + {{var "c"}} = true + } {{end}} +} else { + {{var "hl"}} := {{var "l"}} > 0 + var {{var "rl"}} int; _ = {{var "rl"}} + {{if isSlice }} if {{var "hl"}} { + if {{var "l"}} > cap({{var "v"}}) { + {{var "rl"}} = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }}) + if {{var "rl"}} <= cap({{var "v"}}) { + {{var "v"}} = {{var "v"}}[:{{var "rl"}}] + } else { + {{var "v"}} = make([]{{ .Typ }}, {{var "rl"}}) + } + {{var "c"}} = true + } else if {{var "l"}} != len({{var "v"}}) { + {{var "v"}} = {{var "v"}}[:{{var "l"}}] + {{var "c"}} = true + } + } {{end}} + var {{var "j"}} int + // var {{var "dn"}} bool + for ; ({{var "hl"}} && {{var "j"}} < {{var "l"}}) || !({{var "hl"}} || r.CheckBreak()); {{var "j"}}++ { + {{if not isArray}} if {{var "j"}} == 0 && len({{var "v"}}) == 0 { + if {{var "hl"}} { + {{var "rl"}} = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }}) + } else { + {{var "rl"}} = 8 + } + {{var "v"}} = make([]{{ .Typ }}, {{var "rl"}}) + {{var "c"}} = true + }{{end}} + {{var "h"}}.ElemContainerState({{var "j"}}) + // {{var "dn"}} = r.TryDecodeAsNil() + {{if isChan}}{{ $x := printf "%[1]vv%[2]v" .TempVar .Rand }}var {{var $x}} {{ .Typ }} + {{ decLineVar $x }} + {{var "v"}} <- {{ $x }} + {{else}} + // if indefinite, etc, then expand the slice if necessary + var {{var "db"}} bool + if {{var "j"}} >= len({{var "v"}}) { + {{if isSlice }} {{var "v"}} = append({{var "v"}}, {{ zero }}); {{var "c"}} = true + {{else}} z.DecArrayCannotExpand(len(v), {{var "j"}}+1); {{var "db"}} = true + {{end}} + } + if {{var "db"}} { + z.DecSwallow() + } else { + {{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x }} + } + {{end}} + } + {{if isSlice}} if {{var "j"}} < len({{var "v"}}) { + {{var "v"}} = {{var "v"}}[:{{var "j"}}] + {{var "c"}} = true + } else if {{var "j"}} == 0 && {{var "v"}} == nil { + {{var "v"}} = make([]{{ .Typ }}, 0) + {{var "c"}} = true + } {{end}} +} +{{var "h"}}.End() +{{if not isArray }}if {{var "c"}} { + *{{ .Varname }} = {{var "v"}} +}{{end}} + +` + diff --git a/vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/gen.go b/vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/gen.go new file mode 100644 index 00000000..043f10d8 --- /dev/null +++ b/vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/gen.go @@ -0,0 +1,2014 @@ +// +build codecgen.exec + +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +import ( + "bytes" + "encoding/base64" + "errors" + "fmt" + "go/format" + "io" + "io/ioutil" + "math/rand" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "sync" + "text/template" + "time" + "unicode" + "unicode/utf8" +) + +// --------------------------------------------------- +// codecgen supports the full cycle of reflection-based codec: +// - RawExt +// - Raw +// - Builtins +// - Extensions +// - (Binary|Text|JSON)(Unm|M)arshal +// - generic by-kind +// +// This means that, for dynamic things, we MUST use reflection to at least get the reflect.Type. +// In those areas, we try to only do reflection or interface-conversion when NECESSARY: +// - Extensions, only if Extensions are configured. +// +// However, codecgen doesn't support the following: +// - Canonical option. (codecgen IGNORES it currently) +// This is just because it has not been implemented. +// +// During encode/decode, Selfer takes precedence. +// A type implementing Selfer will know how to encode/decode itself statically. +// +// The following field types are supported: +// array: [n]T +// slice: []T +// map: map[K]V +// primitive: [u]int[n], float(32|64), bool, string +// struct +// +// --------------------------------------------------- +// Note that a Selfer cannot call (e|d).(En|De)code on itself, +// as this will cause a circular reference, as (En|De)code will call Selfer methods. +// Any type that implements Selfer must implement completely and not fallback to (En|De)code. +// +// In addition, code in this file manages the generation of fast-path implementations of +// encode/decode of slices/maps of primitive keys/values. +// +// Users MUST re-generate their implementations whenever the code shape changes. +// The generated code will panic if it was generated with a version older than the supporting library. +// --------------------------------------------------- +// +// codec framework is very feature rich. +// When encoding or decoding into an interface, it depends on the runtime type of the interface. +// The type of the interface may be a named type, an extension, etc. +// Consequently, we fallback to runtime codec for encoding/decoding interfaces. +// In addition, we fallback for any value which cannot be guaranteed at runtime. +// This allows us support ANY value, including any named types, specifically those which +// do not implement our interfaces (e.g. Selfer). +// +// This explains some slowness compared to other code generation codecs (e.g. msgp). +// This reduction in speed is only seen when your refers to interfaces, +// e.g. type T struct { A interface{}; B []interface{}; C map[string]interface{} } +// +// codecgen will panic if the file was generated with an old version of the library in use. +// +// Note: +// It was a conscious decision to have gen.go always explicitly call EncodeNil or TryDecodeAsNil. +// This way, there isn't a function call overhead just to see that we should not enter a block of code. +// +// Note: +// codecgen-generated code depends on the variables defined by fast-path.generated.go. +// consequently, you cannot run with tags "codecgen notfastpath". + +// GenVersion is the current version of codecgen. +// +// NOTE: Increment this value each time codecgen changes fundamentally. +// Fundamental changes are: +// - helper methods change (signature change, new ones added, some removed, etc) +// - codecgen command line changes +// +// v1: Initial Version +// v2: +// v3: Changes for Kubernetes: +// changes in signature of some unpublished helper methods and codecgen cmdline arguments. +// v4: Removed separator support from (en|de)cDriver, and refactored codec(gen) +// v5: changes to support faster json decoding. Let encoder/decoder maintain state of collections. +// v6: removed unsafe from gen, and now uses codecgen.exec tag +const genVersion = 8 + +const ( + genCodecPkg = "codec1978" + genTempVarPfx = "yy" + genTopLevelVarName = "x" + + // ignore canBeNil parameter, and always set to true. + // This is because nil can appear anywhere, so we should always check. + genAnythingCanBeNil = true + + // if genUseOneFunctionForDecStructMap, make a single codecDecodeSelferFromMap function; + // else make codecDecodeSelferFromMap{LenPrefix,CheckBreak} so that conditionals + // are not executed a lot. + // + // From testing, it didn't make much difference in runtime, so keep as true (one function only) + genUseOneFunctionForDecStructMap = true +) + +type genStructMapStyle uint8 + +const ( + genStructMapStyleConsolidated genStructMapStyle = iota + genStructMapStyleLenPrefix + genStructMapStyleCheckBreak +) + +var ( + genAllTypesSamePkgErr = errors.New("All types must be in the same package") + genExpectArrayOrMapErr = errors.New("unexpected type. Expecting array/map/slice") + genBase64enc = base64.NewEncoding("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789__") + genQNameRegex = regexp.MustCompile(`[A-Za-z_.]+`) +) + +// genRunner holds some state used during a Gen run. +type genRunner struct { + w io.Writer // output + c uint64 // counter used for generating varsfx + t []reflect.Type // list of types to run selfer on + + tc reflect.Type // currently running selfer on this type + te map[uintptr]bool // types for which the encoder has been created + td map[uintptr]bool // types for which the decoder has been created + cp string // codec import path + + im map[string]reflect.Type // imports to add + imn map[string]string // package names of imports to add + imc uint64 // counter for import numbers + + is map[reflect.Type]struct{} // types seen during import search + bp string // base PkgPath, for which we are generating for + + cpfx string // codec package prefix + + tm map[reflect.Type]struct{} // types for which enc/dec must be generated + ts []reflect.Type // types for which enc/dec must be generated + + xs string // top level variable/constant suffix + hn string // fn helper type name + + ti *TypeInfos + // rr *rand.Rand // random generator for file-specific types + + nx bool // no extensions +} + +// Gen will write a complete go file containing Selfer implementations for each +// type passed. All the types must be in the same package. +// +// Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE. +func Gen(w io.Writer, buildTags, pkgName, uid string, noExtensions bool, + ti *TypeInfos, typ ...reflect.Type) { + // All types passed to this method do not have a codec.Selfer method implemented directly. + // codecgen already checks the AST and skips any types that define the codec.Selfer methods. + // Consequently, there's no need to check and trim them if they implement codec.Selfer + + if len(typ) == 0 { + return + } + x := genRunner{ + w: w, + t: typ, + te: make(map[uintptr]bool), + td: make(map[uintptr]bool), + im: make(map[string]reflect.Type), + imn: make(map[string]string), + is: make(map[reflect.Type]struct{}), + tm: make(map[reflect.Type]struct{}), + ts: []reflect.Type{}, + bp: genImportPath(typ[0]), + xs: uid, + ti: ti, + nx: noExtensions, + } + if x.ti == nil { + x.ti = defTypeInfos + } + if x.xs == "" { + rr := rand.New(rand.NewSource(time.Now().UnixNano())) + x.xs = strconv.FormatInt(rr.Int63n(9999), 10) + } + + // gather imports first: + x.cp = genImportPath(reflect.TypeOf(x)) + x.imn[x.cp] = genCodecPkg + for _, t := range typ { + // fmt.Printf("###########: PkgPath: '%v', Name: '%s'\n", genImportPath(t), t.Name()) + if genImportPath(t) != x.bp { + panic(genAllTypesSamePkgErr) + } + x.genRefPkgs(t) + } + if buildTags != "" { + x.line("// +build " + buildTags) + x.line("") + } + x.line(` + +// ************************************************************ +// DO NOT EDIT. +// THIS FILE IS AUTO-GENERATED BY codecgen. +// ************************************************************ + +`) + x.line("package " + pkgName) + x.line("") + x.line("import (") + if x.cp != x.bp { + x.cpfx = genCodecPkg + "." + x.linef("%s \"%s\"", genCodecPkg, x.cp) + } + // use a sorted set of im keys, so that we can get consistent output + imKeys := make([]string, 0, len(x.im)) + for k, _ := range x.im { + imKeys = append(imKeys, k) + } + sort.Strings(imKeys) + for _, k := range imKeys { // for k, _ := range x.im { + x.linef("%s \"%s\"", x.imn[k], k) + } + // add required packages + for _, k := range [...]string{"reflect", "runtime", "fmt", "errors"} { + if _, ok := x.im[k]; !ok { + x.line("\"" + k + "\"") + } + } + x.line(")") + x.line("") + + x.line("const (") + x.linef("// ----- content types ----") + x.linef("codecSelferC_UTF8%s = %v", x.xs, int64(c_UTF8)) + x.linef("codecSelferC_RAW%s = %v", x.xs, int64(c_RAW)) + x.linef("// ----- value types used ----") + x.linef("codecSelferValueTypeArray%s = %v", x.xs, int64(valueTypeArray)) + x.linef("codecSelferValueTypeMap%s = %v", x.xs, int64(valueTypeMap)) + x.linef("// ----- containerStateValues ----") + x.linef("codecSelfer_containerMapKey%s = %v", x.xs, int64(containerMapKey)) + x.linef("codecSelfer_containerMapValue%s = %v", x.xs, int64(containerMapValue)) + x.linef("codecSelfer_containerMapEnd%s = %v", x.xs, int64(containerMapEnd)) + x.linef("codecSelfer_containerArrayElem%s = %v", x.xs, int64(containerArrayElem)) + x.linef("codecSelfer_containerArrayEnd%s = %v", x.xs, int64(containerArrayEnd)) + x.line(")") + x.line("var (") + x.line("codecSelferBitsize" + x.xs + " = uint8(reflect.TypeOf(uint(0)).Bits())") + x.line("codecSelferOnlyMapOrArrayEncodeToStructErr" + x.xs + " = errors.New(`only encoded map or array can be decoded into a struct`)") + x.line(")") + x.line("") + + x.hn = "codecSelfer" + x.xs + x.line("type " + x.hn + " struct{}") + x.line("") + + x.varsfxreset() + x.line("func init() {") + x.linef("if %sGenVersion != %v {", x.cpfx, genVersion) + x.line("_, file, _, _ := runtime.Caller(0)") + x.line(`err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", `) + x.linef(`%v, %sGenVersion, file)`, genVersion, x.cpfx) + x.line("panic(err)") + x.linef("}") + x.line("if false { // reference the types, but skip this branch at build/run time") + var n int + // for k, t := range x.im { + for _, k := range imKeys { + t := x.im[k] + x.linef("var v%v %s.%s", n, x.imn[k], t.Name()) + n++ + } + if n > 0 { + x.out("_") + for i := 1; i < n; i++ { + x.out(", _") + } + x.out(" = v0") + for i := 1; i < n; i++ { + x.outf(", v%v", i) + } + } + x.line("} ") // close if false + x.line("}") // close init + x.line("") + + // generate rest of type info + for _, t := range typ { + x.tc = t + x.selfer(true) + x.selfer(false) + } + + for _, t := range x.ts { + rtid := rt2id(t) + // generate enc functions for all these slice/map types. + x.varsfxreset() + x.linef("func (x %s) enc%s(v %s%s, e *%sEncoder) {", x.hn, x.genMethodNameT(t), x.arr2str(t, "*"), x.genTypeName(t), x.cpfx) + x.genRequiredMethodVars(true) + switch t.Kind() { + case reflect.Array, reflect.Slice, reflect.Chan: + x.encListFallback("v", t) + case reflect.Map: + x.encMapFallback("v", t) + default: + panic(genExpectArrayOrMapErr) + } + x.line("}") + x.line("") + + // generate dec functions for all these slice/map types. + x.varsfxreset() + x.linef("func (x %s) dec%s(v *%s, d *%sDecoder) {", x.hn, x.genMethodNameT(t), x.genTypeName(t), x.cpfx) + x.genRequiredMethodVars(false) + switch t.Kind() { + case reflect.Array, reflect.Slice, reflect.Chan: + x.decListFallback("v", rtid, t) + case reflect.Map: + x.decMapFallback("v", rtid, t) + default: + panic(genExpectArrayOrMapErr) + } + x.line("}") + x.line("") + } + + x.line("") +} + +func (x *genRunner) checkForSelfer(t reflect.Type, varname string) bool { + // return varname != genTopLevelVarName && t != x.tc + // the only time we checkForSelfer is if we are not at the TOP of the generated code. + return varname != genTopLevelVarName +} + +func (x *genRunner) arr2str(t reflect.Type, s string) string { + if t.Kind() == reflect.Array { + return s + } + return "" +} + +func (x *genRunner) genRequiredMethodVars(encode bool) { + x.line("var h " + x.hn) + if encode { + x.line("z, r := " + x.cpfx + "GenHelperEncoder(e)") + } else { + x.line("z, r := " + x.cpfx + "GenHelperDecoder(d)") + } + x.line("_, _, _ = h, z, r") +} + +func (x *genRunner) genRefPkgs(t reflect.Type) { + if _, ok := x.is[t]; ok { + return + } + // fmt.Printf(">>>>>>: PkgPath: '%v', Name: '%s'\n", genImportPath(t), t.Name()) + x.is[t] = struct{}{} + tpkg, tname := genImportPath(t), t.Name() + if tpkg != "" && tpkg != x.bp && tpkg != x.cp && tname != "" && tname[0] >= 'A' && tname[0] <= 'Z' { + if _, ok := x.im[tpkg]; !ok { + x.im[tpkg] = t + if idx := strings.LastIndex(tpkg, "/"); idx < 0 { + x.imn[tpkg] = tpkg + } else { + x.imc++ + x.imn[tpkg] = "pkg" + strconv.FormatUint(x.imc, 10) + "_" + genGoIdentifier(tpkg[idx+1:], false) + } + } + } + switch t.Kind() { + case reflect.Array, reflect.Slice, reflect.Ptr, reflect.Chan: + x.genRefPkgs(t.Elem()) + case reflect.Map: + x.genRefPkgs(t.Elem()) + x.genRefPkgs(t.Key()) + case reflect.Struct: + for i := 0; i < t.NumField(); i++ { + if fname := t.Field(i).Name; fname != "" && fname[0] >= 'A' && fname[0] <= 'Z' { + x.genRefPkgs(t.Field(i).Type) + } + } + } +} + +func (x *genRunner) line(s string) { + x.out(s) + if len(s) == 0 || s[len(s)-1] != '\n' { + x.out("\n") + } +} + +func (x *genRunner) varsfx() string { + x.c++ + return strconv.FormatUint(x.c, 10) +} + +func (x *genRunner) varsfxreset() { + x.c = 0 +} + +func (x *genRunner) out(s string) { + if _, err := io.WriteString(x.w, s); err != nil { + panic(err) + } +} + +func (x *genRunner) linef(s string, params ...interface{}) { + x.line(fmt.Sprintf(s, params...)) +} + +func (x *genRunner) outf(s string, params ...interface{}) { + x.out(fmt.Sprintf(s, params...)) +} + +func (x *genRunner) genTypeName(t reflect.Type) (n string) { + // defer func() { fmt.Printf(">>>> ####: genTypeName: t: %v, name: '%s'\n", t, n) }() + + // if the type has a PkgPath, which doesn't match the current package, + // then include it. + // We cannot depend on t.String() because it includes current package, + // or t.PkgPath because it includes full import path, + // + var ptrPfx string + for t.Kind() == reflect.Ptr { + ptrPfx += "*" + t = t.Elem() + } + if tn := t.Name(); tn != "" { + return ptrPfx + x.genTypeNamePrim(t) + } + switch t.Kind() { + case reflect.Map: + return ptrPfx + "map[" + x.genTypeName(t.Key()) + "]" + x.genTypeName(t.Elem()) + case reflect.Slice: + return ptrPfx + "[]" + x.genTypeName(t.Elem()) + case reflect.Array: + return ptrPfx + "[" + strconv.FormatInt(int64(t.Len()), 10) + "]" + x.genTypeName(t.Elem()) + case reflect.Chan: + return ptrPfx + t.ChanDir().String() + " " + x.genTypeName(t.Elem()) + default: + if t == intfTyp { + return ptrPfx + "interface{}" + } else { + return ptrPfx + x.genTypeNamePrim(t) + } + } +} + +func (x *genRunner) genTypeNamePrim(t reflect.Type) (n string) { + if t.Name() == "" { + return t.String() + } else if genImportPath(t) == "" || genImportPath(t) == genImportPath(x.tc) { + return t.Name() + } else { + return x.imn[genImportPath(t)] + "." + t.Name() + // return t.String() // best way to get the package name inclusive + } +} + +func (x *genRunner) genZeroValueR(t reflect.Type) string { + // if t is a named type, w + switch t.Kind() { + case reflect.Ptr, reflect.Interface, reflect.Chan, reflect.Func, + reflect.Slice, reflect.Map, reflect.Invalid: + return "nil" + case reflect.Bool: + return "false" + case reflect.String: + return `""` + case reflect.Struct, reflect.Array: + return x.genTypeName(t) + "{}" + default: // all numbers + return "0" + } +} + +func (x *genRunner) genMethodNameT(t reflect.Type) (s string) { + return genMethodNameT(t, x.tc) +} + +func (x *genRunner) selfer(encode bool) { + t := x.tc + t0 := t + // always make decode use a pointer receiver, + // and structs always use a ptr receiver (encode|decode) + isptr := !encode || t.Kind() == reflect.Struct + x.varsfxreset() + fnSigPfx := "func (x " + if isptr { + fnSigPfx += "*" + } + fnSigPfx += x.genTypeName(t) + + x.out(fnSigPfx) + if isptr { + t = reflect.PtrTo(t) + } + if encode { + x.line(") CodecEncodeSelf(e *" + x.cpfx + "Encoder) {") + x.genRequiredMethodVars(true) + // x.enc(genTopLevelVarName, t) + x.encVar(genTopLevelVarName, t) + } else { + x.line(") CodecDecodeSelf(d *" + x.cpfx + "Decoder) {") + x.genRequiredMethodVars(false) + // do not use decVar, as there is no need to check TryDecodeAsNil + // or way to elegantly handle that, and also setting it to a + // non-nil value doesn't affect the pointer passed. + // x.decVar(genTopLevelVarName, t, false) + x.dec(genTopLevelVarName, t0) + } + x.line("}") + x.line("") + + if encode || t0.Kind() != reflect.Struct { + return + } + + // write is containerMap + if genUseOneFunctionForDecStructMap { + x.out(fnSigPfx) + x.line(") codecDecodeSelfFromMap(l int, d *" + x.cpfx + "Decoder) {") + x.genRequiredMethodVars(false) + x.decStructMap(genTopLevelVarName, "l", rt2id(t0), t0, genStructMapStyleConsolidated) + x.line("}") + x.line("") + } else { + x.out(fnSigPfx) + x.line(") codecDecodeSelfFromMapLenPrefix(l int, d *" + x.cpfx + "Decoder) {") + x.genRequiredMethodVars(false) + x.decStructMap(genTopLevelVarName, "l", rt2id(t0), t0, genStructMapStyleLenPrefix) + x.line("}") + x.line("") + + x.out(fnSigPfx) + x.line(") codecDecodeSelfFromMapCheckBreak(l int, d *" + x.cpfx + "Decoder) {") + x.genRequiredMethodVars(false) + x.decStructMap(genTopLevelVarName, "l", rt2id(t0), t0, genStructMapStyleCheckBreak) + x.line("}") + x.line("") + } + + // write containerArray + x.out(fnSigPfx) + x.line(") codecDecodeSelfFromArray(l int, d *" + x.cpfx + "Decoder) {") + x.genRequiredMethodVars(false) + x.decStructArray(genTopLevelVarName, "l", "return", rt2id(t0), t0) + x.line("}") + x.line("") + +} + +// used for chan, array, slice, map +func (x *genRunner) xtraSM(varname string, encode bool, t reflect.Type) { + if encode { + x.linef("h.enc%s((%s%s)(%s), e)", x.genMethodNameT(t), x.arr2str(t, "*"), x.genTypeName(t), varname) + } else { + x.linef("h.dec%s((*%s)(%s), d)", x.genMethodNameT(t), x.genTypeName(t), varname) + } + x.registerXtraT(t) +} + +func (x *genRunner) registerXtraT(t reflect.Type) { + // recursively register the types + if _, ok := x.tm[t]; ok { + return + } + var tkey reflect.Type + switch t.Kind() { + case reflect.Chan, reflect.Slice, reflect.Array: + case reflect.Map: + tkey = t.Key() + default: + return + } + x.tm[t] = struct{}{} + x.ts = append(x.ts, t) + // check if this refers to any xtra types eg. a slice of array: add the array + x.registerXtraT(t.Elem()) + if tkey != nil { + x.registerXtraT(tkey) + } +} + +// encVar will encode a variable. +// The parameter, t, is the reflect.Type of the variable itself +func (x *genRunner) encVar(varname string, t reflect.Type) { + // fmt.Printf(">>>>>> varname: %s, t: %v\n", varname, t) + var checkNil bool + switch t.Kind() { + case reflect.Ptr, reflect.Interface, reflect.Slice, reflect.Map, reflect.Chan: + checkNil = true + } + if checkNil { + x.linef("if %s == nil { r.EncodeNil() } else { ", varname) + } + switch t.Kind() { + case reflect.Ptr: + switch t.Elem().Kind() { + case reflect.Struct, reflect.Array: + x.enc(varname, genNonPtr(t)) + default: + i := x.varsfx() + x.line(genTempVarPfx + i + " := *" + varname) + x.enc(genTempVarPfx+i, genNonPtr(t)) + } + case reflect.Struct, reflect.Array: + i := x.varsfx() + x.line(genTempVarPfx + i + " := &" + varname) + x.enc(genTempVarPfx+i, t) + default: + x.enc(varname, t) + } + + if checkNil { + x.line("}") + } + +} + +// enc will encode a variable (varname) of type t, +// except t is of kind reflect.Struct or reflect.Array, wherein varname is of type ptrTo(T) (to prevent copying) +func (x *genRunner) enc(varname string, t reflect.Type) { + rtid := rt2id(t) + // We call CodecEncodeSelf if one of the following are honored: + // - the type already implements Selfer, call that + // - the type has a Selfer implementation just created, use that + // - the type is in the list of the ones we will generate for, but it is not currently being generated + + mi := x.varsfx() + tptr := reflect.PtrTo(t) + tk := t.Kind() + if x.checkForSelfer(t, varname) { + if tk == reflect.Array || tk == reflect.Struct { // varname is of type *T + if tptr.Implements(selferTyp) || t.Implements(selferTyp) { + x.line(varname + ".CodecEncodeSelf(e)") + return + } + } else { // varname is of type T + if t.Implements(selferTyp) { + x.line(varname + ".CodecEncodeSelf(e)") + return + } else if tptr.Implements(selferTyp) { + x.linef("%ssf%s := &%s", genTempVarPfx, mi, varname) + x.linef("%ssf%s.CodecEncodeSelf(e)", genTempVarPfx, mi) + return + } + } + + if _, ok := x.te[rtid]; ok { + x.line(varname + ".CodecEncodeSelf(e)") + return + } + } + + inlist := false + for _, t0 := range x.t { + if t == t0 { + inlist = true + if x.checkForSelfer(t, varname) { + x.line(varname + ".CodecEncodeSelf(e)") + return + } + break + } + } + + var rtidAdded bool + if t == x.tc { + x.te[rtid] = true + rtidAdded = true + } + + // check if + // - type is RawExt, Raw + // - the type implements (Text|JSON|Binary)(Unm|M)arshal + x.linef("%sm%s := z.EncBinary()", genTempVarPfx, mi) + x.linef("_ = %sm%s", genTempVarPfx, mi) + x.line("if false {") //start if block + defer func() { x.line("}") }() //end if block + + if t == rawTyp { + x.linef("} else { z.EncRaw(%v)", varname) + return + } + if t == rawExtTyp { + x.linef("} else { r.EncodeRawExt(%v, e)", varname) + return + } + // HACK: Support for Builtins. + // Currently, only Binc supports builtins, and the only builtin type is time.Time. + // Have a method that returns the rtid for time.Time if Handle is Binc. + if t == timeTyp { + vrtid := genTempVarPfx + "m" + x.varsfx() + x.linef("} else if %s := z.TimeRtidIfBinc(); %s != 0 { ", vrtid, vrtid) + x.linef("r.EncodeBuiltin(%s, %s)", vrtid, varname) + } + // only check for extensions if the type is named, and has a packagePath. + if !x.nx && genImportPath(t) != "" && t.Name() != "" { + // first check if extensions are configued, before doing the interface conversion + x.linef("} else if z.HasExtensions() && z.EncExt(%s) {", varname) + } + if tk == reflect.Array || tk == reflect.Struct { // varname is of type *T + if t.Implements(binaryMarshalerTyp) || tptr.Implements(binaryMarshalerTyp) { + x.linef("} else if %sm%s { z.EncBinaryMarshal(%v) ", genTempVarPfx, mi, varname) + } + if t.Implements(jsonMarshalerTyp) || tptr.Implements(jsonMarshalerTyp) { + x.linef("} else if !%sm%s && z.IsJSONHandle() { z.EncJSONMarshal(%v) ", genTempVarPfx, mi, varname) + } else if t.Implements(textMarshalerTyp) || tptr.Implements(textMarshalerTyp) { + x.linef("} else if !%sm%s { z.EncTextMarshal(%v) ", genTempVarPfx, mi, varname) + } + } else { // varname is of type T + if t.Implements(binaryMarshalerTyp) { + x.linef("} else if %sm%s { z.EncBinaryMarshal(%v) ", genTempVarPfx, mi, varname) + } else if tptr.Implements(binaryMarshalerTyp) { + x.linef("} else if %sm%s { z.EncBinaryMarshal(&%v) ", genTempVarPfx, mi, varname) + } + if t.Implements(jsonMarshalerTyp) { + x.linef("} else if !%sm%s && z.IsJSONHandle() { z.EncJSONMarshal(%v) ", genTempVarPfx, mi, varname) + } else if tptr.Implements(jsonMarshalerTyp) { + x.linef("} else if !%sm%s && z.IsJSONHandle() { z.EncJSONMarshal(&%v) ", genTempVarPfx, mi, varname) + } else if t.Implements(textMarshalerTyp) { + x.linef("} else if !%sm%s { z.EncTextMarshal(%v) ", genTempVarPfx, mi, varname) + } else if tptr.Implements(textMarshalerTyp) { + x.linef("} else if !%sm%s { z.EncTextMarshal(&%v) ", genTempVarPfx, mi, varname) + } + } + x.line("} else {") + + switch t.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + x.line("r.EncodeInt(int64(" + varname + "))") + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + x.line("r.EncodeUint(uint64(" + varname + "))") + case reflect.Float32: + x.line("r.EncodeFloat32(float32(" + varname + "))") + case reflect.Float64: + x.line("r.EncodeFloat64(float64(" + varname + "))") + case reflect.Bool: + x.line("r.EncodeBool(bool(" + varname + "))") + case reflect.String: + x.line("r.EncodeString(codecSelferC_UTF8" + x.xs + ", string(" + varname + "))") + case reflect.Chan: + x.xtraSM(varname, true, t) + // x.encListFallback(varname, rtid, t) + case reflect.Array: + x.xtraSM(varname, true, t) + case reflect.Slice: + // if nil, call dedicated function + // if a []uint8, call dedicated function + // if a known fastpath slice, call dedicated function + // else write encode function in-line. + // - if elements are primitives or Selfers, call dedicated function on each member. + // - else call Encoder.encode(XXX) on it. + if rtid == uint8SliceTypId { + x.line("r.EncodeStringBytes(codecSelferC_RAW" + x.xs + ", []byte(" + varname + "))") + } else if fastpathAV.index(rtid) != -1 { + g := x.newGenV(t) + x.line("z.F." + g.MethodNamePfx("Enc", false) + "V(" + varname + ", e)") + } else { + x.xtraSM(varname, true, t) + // x.encListFallback(varname, rtid, t) + } + case reflect.Map: + // if nil, call dedicated function + // if a known fastpath map, call dedicated function + // else write encode function in-line. + // - if elements are primitives or Selfers, call dedicated function on each member. + // - else call Encoder.encode(XXX) on it. + // x.line("if " + varname + " == nil { \nr.EncodeNil()\n } else { ") + if fastpathAV.index(rtid) != -1 { + g := x.newGenV(t) + x.line("z.F." + g.MethodNamePfx("Enc", false) + "V(" + varname + ", e)") + } else { + x.xtraSM(varname, true, t) + // x.encMapFallback(varname, rtid, t) + } + case reflect.Struct: + if !inlist { + delete(x.te, rtid) + x.line("z.EncFallback(" + varname + ")") + break + } + x.encStruct(varname, rtid, t) + default: + if rtidAdded { + delete(x.te, rtid) + } + x.line("z.EncFallback(" + varname + ")") + } +} + +func (x *genRunner) encZero(t reflect.Type) { + switch t.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + x.line("r.EncodeInt(0)") + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + x.line("r.EncodeUint(0)") + case reflect.Float32: + x.line("r.EncodeFloat32(0)") + case reflect.Float64: + x.line("r.EncodeFloat64(0)") + case reflect.Bool: + x.line("r.EncodeBool(false)") + case reflect.String: + x.line("r.EncodeString(codecSelferC_UTF8" + x.xs + `, "")`) + default: + x.line("r.EncodeNil()") + } +} + +func (x *genRunner) encStruct(varname string, rtid uintptr, t reflect.Type) { + // Use knowledge from structfieldinfo (mbs, encodable fields. Ignore omitempty. ) + // replicate code in kStruct i.e. for each field, deref type to non-pointer, and call x.enc on it + + // if t === type currently running selfer on, do for all + ti := x.ti.get(rtid, t) + i := x.varsfx() + sepVarname := genTempVarPfx + "sep" + i + numfieldsvar := genTempVarPfx + "q" + i + ti2arrayvar := genTempVarPfx + "r" + i + struct2arrvar := genTempVarPfx + "2arr" + i + + x.line(sepVarname + " := !z.EncBinary()") + x.linef("%s := z.EncBasicHandle().StructToArray", struct2arrvar) + tisfi := ti.sfip // always use sequence from file. decStruct expects same thing. + // due to omitEmpty, we need to calculate the + // number of non-empty things we write out first. + // This is required as we need to pre-determine the size of the container, + // to support length-prefixing. + if ti.anyOmitEmpty { + x.linef("var %s [%v]bool", numfieldsvar, len(tisfi)) + x.linef("_ = %s", numfieldsvar) + } + x.linef("_, _ = %s, %s", sepVarname, struct2arrvar) + x.linef("const %s bool = %v", ti2arrayvar, ti.toArray) + var nn int + if ti.anyOmitEmpty { + for j, si := range tisfi { + if !si.omitEmpty { + nn++ + continue + } + var t2 reflect.StructField + var omitline string + { + t2typ := t + varname3 := varname + for ij, ix := range si.is { + if uint8(ij) == si.nis { + break + } + for t2typ.Kind() == reflect.Ptr { + t2typ = t2typ.Elem() + } + t2 = t2typ.Field(int(ix)) + t2typ = t2.Type + varname3 = varname3 + "." + t2.Name + if t2typ.Kind() == reflect.Ptr { + omitline += varname3 + " != nil && " + } + } + } + // never check omitEmpty on a struct type, as it may contain uncomparable map/slice/etc. + // also, for maps/slices/arrays, check if len ! 0 (not if == zero value) + switch t2.Type.Kind() { + case reflect.Struct: + omitline += " true" + case reflect.Map, reflect.Slice, reflect.Array, reflect.Chan: + omitline += "len(" + varname + "." + t2.Name + ") != 0" + default: + omitline += varname + "." + t2.Name + " != " + x.genZeroValueR(t2.Type) + } + x.linef("%s[%v] = %s", numfieldsvar, j, omitline) + } + } + // x.linef("var %snn%s int", genTempVarPfx, i) + x.linef("if %s || %s {", ti2arrayvar, struct2arrvar) // if ti.toArray { + x.linef("r.WriteArrayStart(%d)", len(tisfi)) + x.linef("} else {") // if not ti.toArray + if ti.anyOmitEmpty { + x.linef("var %snn%s = %v", genTempVarPfx, i, nn) + x.linef("for _, b := range %s { if b { %snn%s++ } }", numfieldsvar, genTempVarPfx, i) + x.linef("r.WriteMapStart(%snn%s)", genTempVarPfx, i) + x.linef("%snn%s = %v", genTempVarPfx, i, 0) + } else { + x.linef("r.WriteMapStart(%d)", len(tisfi)) + } + x.line("}") // close if not StructToArray + + for j, si := range tisfi { + i := x.varsfx() + isNilVarName := genTempVarPfx + "n" + i + var labelUsed bool + var t2 reflect.StructField + { + t2typ := t + varname3 := varname + for ij, ix := range si.is { + if uint8(ij) == si.nis { + break + } + // fmt.Printf("%%%% %v, ix: %v\n", t2typ, ix) + for t2typ.Kind() == reflect.Ptr { + t2typ = t2typ.Elem() + } + t2 = t2typ.Field(int(ix)) + t2typ = t2.Type + varname3 = varname3 + "." + t2.Name + if t2typ.Kind() == reflect.Ptr { + if !labelUsed { + x.line("var " + isNilVarName + " bool") + } + x.line("if " + varname3 + " == nil { " + isNilVarName + " = true ") + x.line("goto LABEL" + i) + x.line("}") + labelUsed = true + // "varname3 = new(" + x.genTypeName(t3.Elem()) + ") }") + } + } + // t2 = t.FieldByIndex(si.is) + } + if labelUsed { + x.line("LABEL" + i + ":") + } + // if the type of the field is a Selfer, or one of the ones + + x.linef("if %s || %s {", ti2arrayvar, struct2arrvar) // if ti.toArray + if labelUsed { + x.linef("if %s { r.WriteArrayElem(); r.EncodeNil() } else { ", isNilVarName) + // x.linef("if %s { z.EncSendContainerState(codecSelfer_containerArrayElem%s); r.EncodeNil() } else { ", isNilVarName, x.xs) + } + x.line("r.WriteArrayElem()") // x.linef("z.EncSendContainerState(codecSelfer_containerArrayElem%s)", x.xs) + if si.omitEmpty { + x.linef("if %s[%v] {", numfieldsvar, j) + } + x.encVar(varname+"."+t2.Name, t2.Type) + if si.omitEmpty { + x.linef("} else {") + x.encZero(t2.Type) + x.linef("}") + } + if labelUsed { + x.line("}") + } + + x.linef("} else {") // if not ti.toArray + + if si.omitEmpty { + x.linef("if %s[%v] {", numfieldsvar, j) + } + x.line("r.WriteMapElemKey()") // x.linef("z.EncSendContainerState(codecSelfer_containerMapKey%s)", x.xs) + x.line("r.EncodeString(codecSelferC_UTF8" + x.xs + ", string(\"" + si.encName + "\"))") + x.line("r.WriteMapElemValue()") // x.linef("z.EncSendContainerState(codecSelfer_containerMapValue%s)", x.xs) + if labelUsed { + x.line("if " + isNilVarName + " { r.EncodeNil() } else { ") + x.encVar(varname+"."+t2.Name, t2.Type) + x.line("}") + } else { + x.encVar(varname+"."+t2.Name, t2.Type) + } + if si.omitEmpty { + x.line("}") + } + x.linef("} ") // end if/else ti.toArray + } + x.linef("if %s || %s {", ti2arrayvar, struct2arrvar) // if ti.toArray { + x.line("r.WriteArrayEnd()") // x.linef("z.EncSendContainerState(codecSelfer_containerArrayEnd%s)", x.xs) + x.line("} else {") + x.line("r.WriteMapEnd()") // x.linef("z.EncSendContainerState(codecSelfer_containerMapEnd%s)", x.xs) + x.line("}") + +} + +func (x *genRunner) encListFallback(varname string, t reflect.Type) { + if t.AssignableTo(uint8SliceTyp) { + x.linef("r.EncodeStringBytes(codecSelferC_RAW%s, []byte(%s))", x.xs, varname) + return + } + if t.Kind() == reflect.Array && t.Elem().Kind() == reflect.Uint8 { + x.linef("r.EncodeStringBytes(codecSelferC_RAW%s, ([%v]byte(%s))[:])", x.xs, t.Len(), varname) + return + } + i := x.varsfx() + g := genTempVarPfx + x.line("r.WriteArrayStart(len(" + varname + "))") + if t.Kind() == reflect.Chan { + x.linef("for %si%s, %si2%s := 0, len(%s); %si%s < %si2%s; %si%s++ {", g, i, g, i, varname, g, i, g, i, g, i) + x.line("r.WriteArrayElem()") // x.linef("z.EncSendContainerState(codecSelfer_containerArrayElem%s)", x.xs) + x.linef("%sv%s := <-%s", g, i, varname) + } else { + // x.linef("for %si%s, %sv%s := range %s {", genTempVarPfx, i, genTempVarPfx, i, varname) + x.linef("for _, %sv%s := range %s {", genTempVarPfx, i, varname) + x.line("r.WriteArrayElem()") // x.linef("z.EncSendContainerState(codecSelfer_containerArrayElem%s)", x.xs) + } + x.encVar(genTempVarPfx+"v"+i, t.Elem()) + x.line("}") + x.line("r.WriteArrayEnd()") // x.linef("z.EncSendContainerState(codecSelfer_containerArrayEnd%s)", x.xs) +} + +func (x *genRunner) encMapFallback(varname string, t reflect.Type) { + // TODO: expand this to handle canonical. + i := x.varsfx() + x.line("r.WriteMapStart(len(" + varname + "))") + x.linef("for %sk%s, %sv%s := range %s {", genTempVarPfx, i, genTempVarPfx, i, varname) + // x.line("for " + genTempVarPfx + "k" + i + ", " + genTempVarPfx + "v" + i + " := range " + varname + " {") + x.line("r.WriteMapElemKey()") // f("z.EncSendContainerState(codecSelfer_containerMapKey%s)", x.xs) + x.encVar(genTempVarPfx+"k"+i, t.Key()) + x.line("r.WriteMapElemValue()") // f("z.EncSendContainerState(codecSelfer_containerMapValue%s)", x.xs) + x.encVar(genTempVarPfx+"v"+i, t.Elem()) + x.line("}") + x.line("r.WriteMapEnd()") // f("z.EncSendContainerState(codecSelfer_containerMapEnd%s)", x.xs) +} + +func (x *genRunner) decVar(varname, decodedNilVarname string, t reflect.Type, canBeNil bool) { + // We only encode as nil if a nillable value. + // This removes some of the wasted checks for TryDecodeAsNil. + // We need to think about this more, to see what happens if omitempty, etc + // cause a nil value to be stored when something is expected. + // This could happen when decoding from a struct encoded as an array. + // For that, decVar should be called with canNil=true, to force true as its value. + i := x.varsfx() + if !canBeNil { + canBeNil = genAnythingCanBeNil || !genIsImmutable(t) + } + if canBeNil { + x.line("if r.TryDecodeAsNil() {") + if decodedNilVarname != "" { + x.line(decodedNilVarname + " = true") + } else if t.Kind() == reflect.Ptr { + x.line("if " + varname + " != nil { ") + + // if varname is a field of a struct (has a dot in it), + // then just set it to nil + if strings.IndexByte(varname, '.') != -1 { + x.line(varname + " = nil") + } else { + x.line("*" + varname + " = " + x.genZeroValueR(t.Elem())) + } + x.line("}") + } else { + x.line(varname + " = " + x.genZeroValueR(t)) + } + x.line("} else {") + } else { + x.line("// cannot be nil") + } + if t.Kind() != reflect.Ptr { + if x.decTryAssignPrimitive(varname, t) { + x.line(genTempVarPfx + "v" + i + " := &" + varname) + x.dec(genTempVarPfx+"v"+i, t) + } + } else { + x.linef("if %s == nil { %s = new(%s) }", varname, varname, x.genTypeName(t.Elem())) + // Ensure we set underlying ptr to a non-nil value (so we can deref to it later). + // There's a chance of a **T in here which is nil. + var ptrPfx string + for t = t.Elem(); t.Kind() == reflect.Ptr; t = t.Elem() { + ptrPfx += "*" + x.linef("if %s%s == nil { %s%s = new(%s)}", + ptrPfx, varname, ptrPfx, varname, x.genTypeName(t)) + } + // if varname has [ in it, then create temp variable for this ptr thingie + if strings.Index(varname, "[") >= 0 { + varname2 := genTempVarPfx + "w" + i + x.line(varname2 + " := " + varname) + varname = varname2 + } + + if ptrPfx == "" { + x.dec(varname, t) + } else { + x.line(genTempVarPfx + "z" + i + " := " + ptrPfx + varname) + x.dec(genTempVarPfx+"z"+i, t) + } + + } + + if canBeNil { + x.line("} ") + } +} + +// dec will decode a variable (varname) of type ptrTo(t). +// t is always a basetype (i.e. not of kind reflect.Ptr). +func (x *genRunner) dec(varname string, t reflect.Type) { + // assumptions: + // - the varname is to a pointer already. No need to take address of it + // - t is always a baseType T (not a *T, etc). + rtid := rt2id(t) + tptr := reflect.PtrTo(t) + if x.checkForSelfer(t, varname) { + if t.Implements(selferTyp) || tptr.Implements(selferTyp) { + x.line(varname + ".CodecDecodeSelf(d)") + return + } + if _, ok := x.td[rtid]; ok { + x.line(varname + ".CodecDecodeSelf(d)") + return + } + } + + inlist := false + for _, t0 := range x.t { + if t == t0 { + inlist = true + if x.checkForSelfer(t, varname) { + x.line(varname + ".CodecDecodeSelf(d)") + return + } + break + } + } + + var rtidAdded bool + if t == x.tc { + x.td[rtid] = true + rtidAdded = true + } + + // check if + // - type is Raw, RawExt + // - the type implements (Text|JSON|Binary)(Unm|M)arshal + mi := x.varsfx() + x.linef("%sm%s := z.DecBinary()", genTempVarPfx, mi) + x.linef("_ = %sm%s", genTempVarPfx, mi) + x.line("if false {") //start if block + defer func() { x.line("}") }() //end if block + + if t == rawTyp { + x.linef("} else { *%v = z.DecRaw()", varname) + return + } + if t == rawExtTyp { + x.linef("} else { r.DecodeExt(%v, 0, nil)", varname) + return + } + + // HACK: Support for Builtins. + // Currently, only Binc supports builtins, and the only builtin type is time.Time. + // Have a method that returns the rtid for time.Time if Handle is Binc. + if t == timeTyp { + vrtid := genTempVarPfx + "m" + x.varsfx() + x.linef("} else if %s := z.TimeRtidIfBinc(); %s != 0 { ", vrtid, vrtid) + x.linef("r.DecodeBuiltin(%s, %s)", vrtid, varname) + } + // only check for extensions if the type is named, and has a packagePath. + if !x.nx && genImportPath(t) != "" && t.Name() != "" { + // first check if extensions are configued, before doing the interface conversion + x.linef("} else if z.HasExtensions() && z.DecExt(%s) {", varname) + } + + if t.Implements(binaryUnmarshalerTyp) || tptr.Implements(binaryUnmarshalerTyp) { + x.linef("} else if %sm%s { z.DecBinaryUnmarshal(%v) ", genTempVarPfx, mi, varname) + } + if t.Implements(jsonUnmarshalerTyp) || tptr.Implements(jsonUnmarshalerTyp) { + x.linef("} else if !%sm%s && z.IsJSONHandle() { z.DecJSONUnmarshal(%v)", genTempVarPfx, mi, varname) + } else if t.Implements(textUnmarshalerTyp) || tptr.Implements(textUnmarshalerTyp) { + x.linef("} else if !%sm%s { z.DecTextUnmarshal(%v)", genTempVarPfx, mi, varname) + } + + x.line("} else {") + + // Since these are pointers, we cannot share, and have to use them one by one + switch t.Kind() { + case reflect.Int: + x.line("*((*int)(" + varname + ")) = int(r.DecodeInt(codecSelferBitsize" + x.xs + "))") + // x.line("z.DecInt((*int)(" + varname + "))") + case reflect.Int8: + x.line("*((*int8)(" + varname + ")) = int8(r.DecodeInt(8))") + // x.line("z.DecInt8((*int8)(" + varname + "))") + case reflect.Int16: + x.line("*((*int16)(" + varname + ")) = int16(r.DecodeInt(16))") + // x.line("z.DecInt16((*int16)(" + varname + "))") + case reflect.Int32: + x.line("*((*int32)(" + varname + ")) = int32(r.DecodeInt(32))") + // x.line("z.DecInt32((*int32)(" + varname + "))") + case reflect.Int64: + x.line("*((*int64)(" + varname + ")) = int64(r.DecodeInt(64))") + // x.line("z.DecInt64((*int64)(" + varname + "))") + + case reflect.Uint: + x.line("*((*uint)(" + varname + ")) = uint(r.DecodeUint(codecSelferBitsize" + x.xs + "))") + // x.line("z.DecUint((*uint)(" + varname + "))") + case reflect.Uint8: + x.line("*((*uint8)(" + varname + ")) = uint8(r.DecodeUint(8))") + // x.line("z.DecUint8((*uint8)(" + varname + "))") + case reflect.Uint16: + x.line("*((*uint16)(" + varname + ")) = uint16(r.DecodeUint(16))") + //x.line("z.DecUint16((*uint16)(" + varname + "))") + case reflect.Uint32: + x.line("*((*uint32)(" + varname + ")) = uint32(r.DecodeUint(32))") + //x.line("z.DecUint32((*uint32)(" + varname + "))") + case reflect.Uint64: + x.line("*((*uint64)(" + varname + ")) = uint64(r.DecodeUint(64))") + //x.line("z.DecUint64((*uint64)(" + varname + "))") + case reflect.Uintptr: + x.line("*((*uintptr)(" + varname + ")) = uintptr(r.DecodeUint(codecSelferBitsize" + x.xs + "))") + + case reflect.Float32: + x.line("*((*float32)(" + varname + ")) = float32(r.DecodeFloat(true))") + //x.line("z.DecFloat32((*float32)(" + varname + "))") + case reflect.Float64: + x.line("*((*float64)(" + varname + ")) = float64(r.DecodeFloat(false))") + // x.line("z.DecFloat64((*float64)(" + varname + "))") + + case reflect.Bool: + x.line("*((*bool)(" + varname + ")) = r.DecodeBool()") + // x.line("z.DecBool((*bool)(" + varname + "))") + case reflect.String: + x.line("*((*string)(" + varname + ")) = r.DecodeString()") + // x.line("z.DecString((*string)(" + varname + "))") + case reflect.Array, reflect.Chan: + x.xtraSM(varname, false, t) + // x.decListFallback(varname, rtid, true, t) + case reflect.Slice: + // if a []uint8, call dedicated function + // if a known fastpath slice, call dedicated function + // else write encode function in-line. + // - if elements are primitives or Selfers, call dedicated function on each member. + // - else call Encoder.encode(XXX) on it. + if rtid == uint8SliceTypId { + x.line("*" + varname + " = r.DecodeBytes(*(*[]byte)(" + varname + "), false)") + } else if fastpathAV.index(rtid) != -1 { + g := x.newGenV(t) + x.line("z.F." + g.MethodNamePfx("Dec", false) + "X(" + varname + ", d)") + } else { + x.xtraSM(varname, false, t) + // x.decListFallback(varname, rtid, false, t) + } + case reflect.Map: + // if a known fastpath map, call dedicated function + // else write encode function in-line. + // - if elements are primitives or Selfers, call dedicated function on each member. + // - else call Encoder.encode(XXX) on it. + if fastpathAV.index(rtid) != -1 { + g := x.newGenV(t) + x.line("z.F." + g.MethodNamePfx("Dec", false) + "X(" + varname + ", d)") + } else { + x.xtraSM(varname, false, t) + // x.decMapFallback(varname, rtid, t) + } + case reflect.Struct: + if inlist { + x.decStruct(varname, rtid, t) + } else { + // delete(x.td, rtid) + x.line("z.DecFallback(" + varname + ", false)") + } + default: + if rtidAdded { + delete(x.te, rtid) + } + x.line("z.DecFallback(" + varname + ", true)") + } +} + +func (x *genRunner) decTryAssignPrimitive(varname string, t reflect.Type) (tryAsPtr bool) { + // This should only be used for exact primitives (ie un-named types). + // Named types may be implementations of Selfer, Unmarshaler, etc. + // They should be handled by dec(...) + + if t.Name() != "" { + tryAsPtr = true + return + } + + switch t.Kind() { + case reflect.Int: + x.linef("%s = r.DecodeInt(codecSelferBitsize%s)", varname, x.xs) + case reflect.Int8: + x.linef("%s = r.DecodeInt(8)", varname) + case reflect.Int16: + x.linef("%s = r.DecodeInt(16)", varname) + case reflect.Int32: + x.linef("%s = r.DecodeInt(32)", varname) + case reflect.Int64: + x.linef("%s = r.DecodeInt(64)", varname) + + case reflect.Uint: + x.linef("%s = r.DecodeUint(codecSelferBitsize%s)", varname, x.xs) + case reflect.Uint8: + x.linef("%s = r.DecodeUint(8)", varname) + case reflect.Uint16: + x.linef("%s = r.DecodeUint(16)", varname) + case reflect.Uint32: + x.linef("%s = r.DecodeUint(32)", varname) + case reflect.Uint64: + x.linef("%s = r.DecodeUint(64)", varname) + case reflect.Uintptr: + x.linef("%s = r.DecodeUint(codecSelferBitsize%s)", varname, x.xs) + + case reflect.Float32: + x.linef("%s = r.DecodeFloat(true)", varname) + case reflect.Float64: + x.linef("%s = r.DecodeFloat(false)", varname) + + case reflect.Bool: + x.linef("%s = r.DecodeBool()", varname) + case reflect.String: + x.linef("%s = r.DecodeString()", varname) + default: + tryAsPtr = true + } + return +} + +func (x *genRunner) decListFallback(varname string, rtid uintptr, t reflect.Type) { + if t.AssignableTo(uint8SliceTyp) { + x.line("*" + varname + " = r.DecodeBytes(*((*[]byte)(" + varname + ")), false)") + return + } + if t.Kind() == reflect.Array && t.Elem().Kind() == reflect.Uint8 { + x.linef("r.DecodeBytes( ((*[%s]byte)(%s))[:], true)", t.Len(), varname) + return + } + type tstruc struct { + TempVar string + Rand string + Varname string + CTyp string + Typ string + Immutable bool + Size int + } + telem := t.Elem() + ts := tstruc{genTempVarPfx, x.varsfx(), varname, x.genTypeName(t), x.genTypeName(telem), genIsImmutable(telem), int(telem.Size())} + + funcs := make(template.FuncMap) + + funcs["decLineVar"] = func(varname string) string { + x.decVar(varname, "", telem, false) + return "" + } + // funcs["decLine"] = func(pfx string) string { + // x.decVar(ts.TempVar+pfx+ts.Rand, "", reflect.PtrTo(telem), false) + // return "" + // } + funcs["var"] = func(s string) string { + return ts.TempVar + s + ts.Rand + } + funcs["zero"] = func() string { + return x.genZeroValueR(telem) + } + funcs["isArray"] = func() bool { + return t.Kind() == reflect.Array + } + funcs["isSlice"] = func() bool { + return t.Kind() == reflect.Slice + } + funcs["isChan"] = func() bool { + return t.Kind() == reflect.Chan + } + tm, err := template.New("").Funcs(funcs).Parse(genDecListTmpl) + if err != nil { + panic(err) + } + if err = tm.Execute(x.w, &ts); err != nil { + panic(err) + } +} + +func (x *genRunner) decMapFallback(varname string, rtid uintptr, t reflect.Type) { + type tstruc struct { + TempVar string + Sfx string + Rand string + Varname string + KTyp string + Typ string + Size int + } + telem := t.Elem() + tkey := t.Key() + ts := tstruc{ + genTempVarPfx, x.xs, x.varsfx(), varname, x.genTypeName(tkey), + x.genTypeName(telem), int(telem.Size() + tkey.Size()), + } + + funcs := make(template.FuncMap) + funcs["decElemZero"] = func() string { + return x.genZeroValueR(telem) + } + funcs["decElemKindImmutable"] = func() bool { + return genIsImmutable(telem) + } + funcs["decElemKindPtr"] = func() bool { + return telem.Kind() == reflect.Ptr + } + funcs["decElemKindIntf"] = func() bool { + return telem.Kind() == reflect.Interface + } + funcs["decLineVarK"] = func(varname string) string { + x.decVar(varname, "", tkey, false) + return "" + } + funcs["decLineVar"] = func(varname, decodedNilVarname string) string { + x.decVar(varname, decodedNilVarname, telem, false) + return "" + } + // funcs["decLineK"] = func(pfx string) string { + // x.decVar(ts.TempVar+pfx+ts.Rand, reflect.PtrTo(tkey), false) + // return "" + // } + // funcs["decLine"] = func(pfx string) string { + // x.decVar(ts.TempVar+pfx+ts.Rand, reflect.PtrTo(telem), false) + // return "" + // } + funcs["var"] = func(s string) string { + return ts.TempVar + s + ts.Rand + } + + tm, err := template.New("").Funcs(funcs).Parse(genDecMapTmpl) + if err != nil { + panic(err) + } + if err = tm.Execute(x.w, &ts); err != nil { + panic(err) + } +} + +func (x *genRunner) decStructMapSwitch(kName string, varname string, rtid uintptr, t reflect.Type) { + ti := x.ti.get(rtid, t) + tisfi := ti.sfip // always use sequence from file. decStruct expects same thing. + x.line("switch (" + kName + ") {") + for _, si := range tisfi { + x.line("case \"" + si.encName + "\":") + var t2 reflect.StructField + { + //we must accommodate anonymous fields, where the embedded field is a nil pointer in the value. + // t2 = t.FieldByIndex(si.is) + t2typ := t + varname3 := varname + for ij, ix := range si.is { + if uint8(ij) == si.nis { + break + } + for t2typ.Kind() == reflect.Ptr { + t2typ = t2typ.Elem() + } + t2 = t2typ.Field(int(ix)) + t2typ = t2.Type + varname3 = varname3 + "." + t2.Name + if t2typ.Kind() == reflect.Ptr { + x.linef("if %s == nil { %s = new(%s) }", varname3, varname3, x.genTypeName(t2typ.Elem())) + } + } + } + x.decVar(varname+"."+t2.Name, "", t2.Type, false) + } + x.line("default:") + // pass the slice here, so that the string will not escape, and maybe save allocation + x.line("z.DecStructFieldNotFound(-1, " + kName + ")") + x.line("} // end switch " + kName) +} + +func (x *genRunner) decStructMap(varname, lenvarname string, rtid uintptr, t reflect.Type, style genStructMapStyle) { + tpfx := genTempVarPfx + i := x.varsfx() + kName := tpfx + "s" + i + + // x.line("var " + kName + "Arr = [32]byte{} // default string to decode into") + // x.line("var " + kName + "Slc = " + kName + "Arr[:] // default slice to decode into") + // use the scratch buffer to avoid allocation (most field names are < 32). + + x.line("var " + kName + "Slc = z.DecScratchBuffer() // default slice to decode into") + + x.line("_ = " + kName + "Slc") + switch style { + case genStructMapStyleLenPrefix: + x.linef("for %sj%s := 0; %sj%s < %s; %sj%s++ {", tpfx, i, tpfx, i, lenvarname, tpfx, i) + case genStructMapStyleCheckBreak: + x.linef("for %sj%s := 0; !r.CheckBreak(); %sj%s++ {", tpfx, i, tpfx, i) + default: // 0, otherwise. + x.linef("var %shl%s bool = %s >= 0", tpfx, i, lenvarname) // has length + x.linef("for %sj%s := 0; ; %sj%s++ {", tpfx, i, tpfx, i) + x.linef("if %shl%s { if %sj%s >= %s { break }", tpfx, i, tpfx, i, lenvarname) + x.line("} else { if r.CheckBreak() { break }; }") + } + x.line("r.ReadMapElemKey()") // f("z.DecSendContainerState(codecSelfer_containerMapKey%s)", x.xs) + x.line(kName + "Slc = r.DecodeStringAsBytes()") + // let string be scoped to this loop alone, so it doesn't escape. + x.line(kName + " := string(" + kName + "Slc)") + x.line("r.ReadMapElemValue()") // f("z.DecSendContainerState(codecSelfer_containerMapValue%s)", x.xs) + x.decStructMapSwitch(kName, varname, rtid, t) + + x.line("} // end for " + tpfx + "j" + i) + x.line("r.ReadMapEnd()") // f("z.DecSendContainerState(codecSelfer_containerMapEnd%s)", x.xs) +} + +func (x *genRunner) decStructArray(varname, lenvarname, breakString string, rtid uintptr, t reflect.Type) { + tpfx := genTempVarPfx + i := x.varsfx() + ti := x.ti.get(rtid, t) + tisfi := ti.sfip // always use sequence from file. decStruct expects same thing. + x.linef("var %sj%s int", tpfx, i) + x.linef("var %sb%s bool", tpfx, i) // break + x.linef("var %shl%s bool = %s >= 0", tpfx, i, lenvarname) // has length + for _, si := range tisfi { + var t2 reflect.StructField + { + //we must accommodate anonymous fields, where the embedded field is a nil pointer in the value. + // t2 = t.FieldByIndex(si.is) + t2typ := t + varname3 := varname + for ij, ix := range si.is { + if uint8(ij) == si.nis { + break + } + for t2typ.Kind() == reflect.Ptr { + t2typ = t2typ.Elem() + } + t2 = t2typ.Field(int(ix)) + t2typ = t2.Type + varname3 = varname3 + "." + t2.Name + if t2typ.Kind() == reflect.Ptr { + x.linef("if %s == nil { %s = new(%s) }", varname3, varname3, x.genTypeName(t2typ.Elem())) + } + } + } + + x.linef("%sj%s++; if %shl%s { %sb%s = %sj%s > %s } else { %sb%s = r.CheckBreak() }", + tpfx, i, tpfx, i, tpfx, i, + tpfx, i, lenvarname, tpfx, i) + x.linef("if %sb%s { r.ReadArrayEnd(); %s }", tpfx, i, breakString) + // x.linef("if %sb%s { z.DecSendContainerState(codecSelfer_containerArrayEnd%s); %s }", tpfx, i, x.xs, breakString) + x.line("r.ReadArrayElem()") // f("z.DecSendContainerState(codecSelfer_containerArrayElem%s)", x.xs) + x.decVar(varname+"."+t2.Name, "", t2.Type, true) + } + // read remaining values and throw away. + x.line("for {") + x.linef("%sj%s++; if %shl%s { %sb%s = %sj%s > %s } else { %sb%s = r.CheckBreak() }", + tpfx, i, tpfx, i, tpfx, i, + tpfx, i, lenvarname, tpfx, i) + x.linef("if %sb%s { break }", tpfx, i) + x.line("r.ReadArrayElem()") // f("z.DecSendContainerState(codecSelfer_containerArrayElem%s)", x.xs) + x.linef(`z.DecStructFieldNotFound(%sj%s - 1, "")`, tpfx, i) + x.line("}") + x.line("r.ReadArrayEnd()") // f("z.DecSendContainerState(codecSelfer_containerArrayEnd%s)", x.xs) +} + +func (x *genRunner) decStruct(varname string, rtid uintptr, t reflect.Type) { + // if container is map + i := x.varsfx() + x.linef("%sct%s := r.ContainerType()", genTempVarPfx, i) + x.linef("if %sct%s == codecSelferValueTypeMap%s {", genTempVarPfx, i, x.xs) + x.line(genTempVarPfx + "l" + i + " := r.ReadMapStart()") + x.linef("if %sl%s == 0 {", genTempVarPfx, i) + x.line("r.ReadMapEnd()") // f("z.DecSendContainerState(codecSelfer_containerMapEnd%s)", x.xs) + if genUseOneFunctionForDecStructMap { + x.line("} else { ") + x.linef("x.codecDecodeSelfFromMap(%sl%s, d)", genTempVarPfx, i) + } else { + x.line("} else if " + genTempVarPfx + "l" + i + " > 0 { ") + x.line("x.codecDecodeSelfFromMapLenPrefix(" + genTempVarPfx + "l" + i + ", d)") + x.line("} else {") + x.line("x.codecDecodeSelfFromMapCheckBreak(" + genTempVarPfx + "l" + i + ", d)") + } + x.line("}") + + // else if container is array + x.linef("} else if %sct%s == codecSelferValueTypeArray%s {", genTempVarPfx, i, x.xs) + x.line(genTempVarPfx + "l" + i + " := r.ReadArrayStart()") + x.linef("if %sl%s == 0 {", genTempVarPfx, i) + x.line("r.ReadArrayEnd()") // f("z.DecSendContainerState(codecSelfer_containerArrayEnd%s)", x.xs) + x.line("} else { ") + x.linef("x.codecDecodeSelfFromArray(%sl%s, d)", genTempVarPfx, i) + x.line("}") + // else panic + x.line("} else { ") + x.line("panic(codecSelferOnlyMapOrArrayEncodeToStructErr" + x.xs + ")") + x.line("} ") +} + +// -------- + +type genV struct { + // genV is either a primitive (Primitive != "") or a map (MapKey != "") or a slice + MapKey string + Elem string + Primitive string + Size int +} + +func (x *genRunner) newGenV(t reflect.Type) (v genV) { + switch t.Kind() { + case reflect.Slice, reflect.Array: + te := t.Elem() + v.Elem = x.genTypeName(te) + v.Size = int(te.Size()) + case reflect.Map: + te, tk := t.Elem(), t.Key() + v.Elem = x.genTypeName(te) + v.MapKey = x.genTypeName(tk) + v.Size = int(te.Size() + tk.Size()) + default: + panic("unexpected type for newGenV. Requires map or slice type") + } + return +} + +func (x *genV) MethodNamePfx(prefix string, prim bool) string { + var name []byte + if prefix != "" { + name = append(name, prefix...) + } + if prim { + name = append(name, genTitleCaseName(x.Primitive)...) + } else { + if x.MapKey == "" { + name = append(name, "Slice"...) + } else { + name = append(name, "Map"...) + name = append(name, genTitleCaseName(x.MapKey)...) + } + name = append(name, genTitleCaseName(x.Elem)...) + } + return string(name) + +} + +// genImportPath returns import path of a non-predeclared named typed, or an empty string otherwise. +// +// This handles the misbehaviour that occurs when 1.5-style vendoring is enabled, +// where PkgPath returns the full path, including the vendoring pre-fix that should have been stripped. +// We strip it here. +func genImportPath(t reflect.Type) (s string) { + s = t.PkgPath() + if genCheckVendor { + // HACK: always handle vendoring. It should be typically on in go 1.6, 1.7 + s = stripVendor(s) + } + return +} + +// A go identifier is (letter|_)[letter|number|_]* +func genGoIdentifier(s string, checkFirstChar bool) string { + b := make([]byte, 0, len(s)) + t := make([]byte, 4) + var n int + for i, r := range s { + if checkFirstChar && i == 0 && !unicode.IsLetter(r) { + b = append(b, '_') + } + // r must be unicode_letter, unicode_digit or _ + if unicode.IsLetter(r) || unicode.IsDigit(r) { + n = utf8.EncodeRune(t, r) + b = append(b, t[:n]...) + } else { + b = append(b, '_') + } + } + return string(b) +} + +func genNonPtr(t reflect.Type) reflect.Type { + for t.Kind() == reflect.Ptr { + t = t.Elem() + } + return t +} + +func genTitleCaseName(s string) string { + switch s { + case "interface{}", "interface {}": + return "Intf" + default: + return strings.ToUpper(s[0:1]) + s[1:] + } +} + +func genMethodNameT(t reflect.Type, tRef reflect.Type) (n string) { + var ptrPfx string + for t.Kind() == reflect.Ptr { + ptrPfx += "Ptrto" + t = t.Elem() + } + tstr := t.String() + if tn := t.Name(); tn != "" { + if tRef != nil && genImportPath(t) == genImportPath(tRef) { + return ptrPfx + tn + } else { + if genQNameRegex.MatchString(tstr) { + return ptrPfx + strings.Replace(tstr, ".", "_", 1000) + } else { + return ptrPfx + genCustomTypeName(tstr) + } + } + } + switch t.Kind() { + case reflect.Map: + return ptrPfx + "Map" + genMethodNameT(t.Key(), tRef) + genMethodNameT(t.Elem(), tRef) + case reflect.Slice: + return ptrPfx + "Slice" + genMethodNameT(t.Elem(), tRef) + case reflect.Array: + return ptrPfx + "Array" + strconv.FormatInt(int64(t.Len()), 10) + genMethodNameT(t.Elem(), tRef) + case reflect.Chan: + var cx string + switch t.ChanDir() { + case reflect.SendDir: + cx = "ChanSend" + case reflect.RecvDir: + cx = "ChanRecv" + default: + cx = "Chan" + } + return ptrPfx + cx + genMethodNameT(t.Elem(), tRef) + default: + if t == intfTyp { + return ptrPfx + "Interface" + } else { + if tRef != nil && genImportPath(t) == genImportPath(tRef) { + if t.Name() != "" { + return ptrPfx + t.Name() + } else { + return ptrPfx + genCustomTypeName(tstr) + } + } else { + // best way to get the package name inclusive + // return ptrPfx + strings.Replace(tstr, ".", "_", 1000) + // return ptrPfx + genBase64enc.EncodeToString([]byte(tstr)) + if t.Name() != "" && genQNameRegex.MatchString(tstr) { + return ptrPfx + strings.Replace(tstr, ".", "_", 1000) + } else { + return ptrPfx + genCustomTypeName(tstr) + } + } + } + } +} + +// genCustomNameForType base64encodes the t.String() value in such a way +// that it can be used within a function name. +func genCustomTypeName(tstr string) string { + len2 := genBase64enc.EncodedLen(len(tstr)) + bufx := make([]byte, len2) + genBase64enc.Encode(bufx, []byte(tstr)) + for i := len2 - 1; i >= 0; i-- { + if bufx[i] == '=' { + len2-- + } else { + break + } + } + return string(bufx[:len2]) +} + +func genIsImmutable(t reflect.Type) (v bool) { + return isImmutableKind(t.Kind()) +} + +type genInternal struct { + Version int + Values []genV +} + +func (x genInternal) FastpathLen() (l int) { + for _, v := range x.Values { + if v.Primitive == "" { + l++ + } + } + return +} + +func genInternalZeroValue(s string) string { + switch s { + case "interface{}", "interface {}": + return "nil" + case "bool": + return "false" + case "string": + return `""` + default: + return "0" + } +} + +func genInternalEncCommandAsString(s string, vname string) string { + switch s { + case "uint", "uint8", "uint16", "uint32", "uint64": + return "ee.EncodeUint(uint64(" + vname + "))" + case "int", "int8", "int16", "int32", "int64": + return "ee.EncodeInt(int64(" + vname + "))" + case "string": + return "ee.EncodeString(c_UTF8, " + vname + ")" + case "float32": + return "ee.EncodeFloat32(" + vname + ")" + case "float64": + return "ee.EncodeFloat64(" + vname + ")" + case "bool": + return "ee.EncodeBool(" + vname + ")" + case "symbol": + return "ee.EncodeSymbol(" + vname + ")" + default: + return "e.encode(" + vname + ")" + } +} + +func genInternalDecCommandAsString(s string) string { + switch s { + case "uint": + return "uint(dd.DecodeUint(uintBitsize))" + case "uint8": + return "uint8(dd.DecodeUint(8))" + case "uint16": + return "uint16(dd.DecodeUint(16))" + case "uint32": + return "uint32(dd.DecodeUint(32))" + case "uint64": + return "dd.DecodeUint(64)" + case "uintptr": + return "uintptr(dd.DecodeUint(uintBitsize))" + case "int": + return "int(dd.DecodeInt(intBitsize))" + case "int8": + return "int8(dd.DecodeInt(8))" + case "int16": + return "int16(dd.DecodeInt(16))" + case "int32": + return "int32(dd.DecodeInt(32))" + case "int64": + return "dd.DecodeInt(64)" + + case "string": + return "dd.DecodeString()" + case "float32": + return "float32(dd.DecodeFloat(true))" + case "float64": + return "dd.DecodeFloat(false)" + case "bool": + return "dd.DecodeBool()" + default: + panic(errors.New("gen internal: unknown type for decode: " + s)) + } +} + +func genInternalSortType(s string, elem bool) string { + for _, v := range [...]string{"int", "uint", "float", "bool", "string"} { + if strings.HasPrefix(s, v) { + if elem { + if v == "int" || v == "uint" || v == "float" { + return v + "64" + } else { + return v + } + } + return v + "Slice" + } + } + panic("sorttype: unexpected type: " + s) +} + +func stripVendor(s string) string { + // HACK: Misbehaviour occurs in go 1.5. May have to re-visit this later. + // if s contains /vendor/ OR startsWith vendor/, then return everything after it. + const vendorStart = "vendor/" + const vendorInline = "/vendor/" + if i := strings.LastIndex(s, vendorInline); i >= 0 { + s = s[i+len(vendorInline):] + } else if strings.HasPrefix(s, vendorStart) { + s = s[len(vendorStart):] + } + return s +} + +// var genInternalMu sync.Mutex +var genInternalV = genInternal{Version: genVersion} +var genInternalTmplFuncs template.FuncMap +var genInternalOnce sync.Once + +func genInternalInit() { + types := [...]string{ + "interface{}", + "string", + "float32", + "float64", + "uint", + "uint8", + "uint16", + "uint32", + "uint64", + "uintptr", + "int", + "int8", + "int16", + "int32", + "int64", + "bool", + } + // keep as slice, so it is in specific iteration order. + // Initial order was uint64, string, interface{}, int, int64 + mapvaltypes := [...]string{ + "interface{}", + "string", + "uint", + "uint8", + "uint16", + "uint32", + "uint64", + "uintptr", + "int", + "int8", + "int16", + "int32", + "int64", + "float32", + "float64", + "bool", + } + wordSizeBytes := int(intBitsize) / 8 + + mapvaltypes2 := map[string]int{ + "interface{}": 2 * wordSizeBytes, + "string": 2 * wordSizeBytes, + "uint": 1 * wordSizeBytes, + "uint8": 1, + "uint16": 2, + "uint32": 4, + "uint64": 8, + "uintptr": 1 * wordSizeBytes, + "int": 1 * wordSizeBytes, + "int8": 1, + "int16": 2, + "int32": 4, + "int64": 8, + "float32": 4, + "float64": 8, + "bool": 1, + } + var gt = genInternal{Version: genVersion} + + // For each slice or map type, there must be a (symmetrical) Encode and Decode fast-path function + for _, s := range types { + gt.Values = append(gt.Values, genV{Primitive: s, Size: mapvaltypes2[s]}) + if s != "uint8" { // do not generate fast path for slice of bytes. Treat specially already. + gt.Values = append(gt.Values, genV{Elem: s, Size: mapvaltypes2[s]}) + } + if _, ok := mapvaltypes2[s]; !ok { + gt.Values = append(gt.Values, genV{MapKey: s, Elem: s, Size: 2 * mapvaltypes2[s]}) + } + for _, ms := range mapvaltypes { + gt.Values = append(gt.Values, genV{MapKey: s, Elem: ms, Size: mapvaltypes2[s] + mapvaltypes2[ms]}) + } + } + + funcs := make(template.FuncMap) + // funcs["haspfx"] = strings.HasPrefix + funcs["encmd"] = genInternalEncCommandAsString + funcs["decmd"] = genInternalDecCommandAsString + funcs["zerocmd"] = genInternalZeroValue + funcs["hasprefix"] = strings.HasPrefix + funcs["sorttype"] = genInternalSortType + + genInternalV = gt + genInternalTmplFuncs = funcs +} + +// genInternalGoFile is used to generate source files from templates. +// It is run by the program author alone. +// Unfortunately, it has to be exported so that it can be called from a command line tool. +// *** DO NOT USE *** +func genInternalGoFile(r io.Reader, w io.Writer) (err error) { + genInternalOnce.Do(genInternalInit) + + gt := genInternalV + + t := template.New("").Funcs(genInternalTmplFuncs) + + tmplstr, err := ioutil.ReadAll(r) + if err != nil { + return + } + + if t, err = t.Parse(string(tmplstr)); err != nil { + return + } + + var out bytes.Buffer + err = t.Execute(&out, gt) + if err != nil { + return + } + + bout, err := format.Source(out.Bytes()) + if err != nil { + w.Write(out.Bytes()) // write out if error, so we can still see. + // w.Write(bout) // write out if error, as much as possible, so we can still see. + return + } + w.Write(bout) + return +} diff --git a/vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/goversion_arrayof_gte_go15.go b/vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/goversion_arrayof_gte_go15.go new file mode 100644 index 00000000..7567e2c0 --- /dev/null +++ b/vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/goversion_arrayof_gte_go15.go @@ -0,0 +1,14 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// +build go1.5 + +package codec + +import "reflect" + +const reflectArrayOfSupported = true + +func reflectArrayOf(count int, elem reflect.Type) reflect.Type { + return reflect.ArrayOf(count, elem) +} diff --git a/vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/goversion_arrayof_lt_go15.go b/vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/goversion_arrayof_lt_go15.go new file mode 100644 index 00000000..ec94bd0c --- /dev/null +++ b/vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/goversion_arrayof_lt_go15.go @@ -0,0 +1,14 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// +build !go1.5 + +package codec + +import "reflect" + +const reflectArrayOfSupported = false + +func reflectArrayOf(count int, elem reflect.Type) reflect.Type { + panic("codec: reflect.ArrayOf unsupported in this go version") +} diff --git a/vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/goversion_makemap_gte_go19.go b/vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/goversion_makemap_gte_go19.go new file mode 100644 index 00000000..51fe40e5 --- /dev/null +++ b/vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/goversion_makemap_gte_go19.go @@ -0,0 +1,15 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// +build go1.9 + +package codec + +import "reflect" + +func makeMapReflect(t reflect.Type, size int) reflect.Value { + if size < 0 { + return reflect.MakeMapWithSize(t, 4) + } + return reflect.MakeMapWithSize(t, size) +} diff --git a/vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/goversion_makemap_lt_go19.go b/vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/goversion_makemap_lt_go19.go new file mode 100644 index 00000000..d4b9c2c8 --- /dev/null +++ b/vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/goversion_makemap_lt_go19.go @@ -0,0 +1,12 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// +build !go1.9 + +package codec + +import "reflect" + +func makeMapReflect(t reflect.Type, size int) reflect.Value { + return reflect.MakeMap(t) +} diff --git a/vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/goversion_unsupported_lt_go14.go b/vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/goversion_unsupported_lt_go14.go new file mode 100644 index 00000000..dcd8c3d1 --- /dev/null +++ b/vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/goversion_unsupported_lt_go14.go @@ -0,0 +1,17 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// +build !go1.4 + +package codec + +// This codec package will only work for go1.4 and above. +// This is for the following reasons: +// - go 1.4 was released in 2014 +// - go runtime is written fully in go +// - interface only holds pointers +// - reflect.Value is stabilized as 3 words + +func init() { + panic("codec: go 1.3 and below are not supported") +} diff --git a/vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/goversion_vendor_eq_go15.go b/vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/goversion_vendor_eq_go15.go new file mode 100644 index 00000000..68626e1c --- /dev/null +++ b/vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/goversion_vendor_eq_go15.go @@ -0,0 +1,10 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// +build go1.5,!go1.6 + +package codec + +import "os" + +var genCheckVendor = os.Getenv("GO15VENDOREXPERIMENT") == "1" diff --git a/vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/goversion_vendor_eq_go16.go b/vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/goversion_vendor_eq_go16.go new file mode 100644 index 00000000..344f5967 --- /dev/null +++ b/vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/goversion_vendor_eq_go16.go @@ -0,0 +1,10 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// +build go1.6,!go1.7 + +package codec + +import "os" + +var genCheckVendor = os.Getenv("GO15VENDOREXPERIMENT") != "0" diff --git a/vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/goversion_vendor_gte_go17.go b/vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/goversion_vendor_gte_go17.go new file mode 100644 index 00000000..de91d294 --- /dev/null +++ b/vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/goversion_vendor_gte_go17.go @@ -0,0 +1,8 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// +build go1.7 + +package codec + +const genCheckVendor = true diff --git a/vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/goversion_vendor_lt_go15.go b/vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/goversion_vendor_lt_go15.go new file mode 100644 index 00000000..9d007bfe --- /dev/null +++ b/vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/goversion_vendor_lt_go15.go @@ -0,0 +1,8 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// +build !go1.5 + +package codec + +var genCheckVendor = false diff --git a/vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/helper.go b/vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/helper.go new file mode 100644 index 00000000..b846df0d --- /dev/null +++ b/vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/helper.go @@ -0,0 +1,1944 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +// Contains code shared by both encode and decode. + +// Some shared ideas around encoding/decoding +// ------------------------------------------ +// +// If an interface{} is passed, we first do a type assertion to see if it is +// a primitive type or a map/slice of primitive types, and use a fastpath to handle it. +// +// If we start with a reflect.Value, we are already in reflect.Value land and +// will try to grab the function for the underlying Type and directly call that function. +// This is more performant than calling reflect.Value.Interface(). +// +// This still helps us bypass many layers of reflection, and give best performance. +// +// Containers +// ------------ +// Containers in the stream are either associative arrays (key-value pairs) or +// regular arrays (indexed by incrementing integers). +// +// Some streams support indefinite-length containers, and use a breaking +// byte-sequence to denote that the container has come to an end. +// +// Some streams also are text-based, and use explicit separators to denote the +// end/beginning of different values. +// +// During encode, we use a high-level condition to determine how to iterate through +// the container. That decision is based on whether the container is text-based (with +// separators) or binary (without separators). If binary, we do not even call the +// encoding of separators. +// +// During decode, we use a different high-level condition to determine how to iterate +// through the containers. That decision is based on whether the stream contained +// a length prefix, or if it used explicit breaks. If length-prefixed, we assume that +// it has to be binary, and we do not even try to read separators. +// +// Philosophy +// ------------ +// On decode, this codec will update containers appropriately: +// - If struct, update fields from stream into fields of struct. +// If field in stream not found in struct, handle appropriately (based on option). +// If a struct field has no corresponding value in the stream, leave it AS IS. +// If nil in stream, set value to nil/zero value. +// - If map, update map from stream. +// If the stream value is NIL, set the map to nil. +// - if slice, try to update up to length of array in stream. +// if container len is less than stream array length, +// and container cannot be expanded, handled (based on option). +// This means you can decode 4-element stream array into 1-element array. +// +// ------------------------------------ +// On encode, user can specify omitEmpty. This means that the value will be omitted +// if the zero value. The problem may occur during decode, where omitted values do not affect +// the value being decoded into. This means that if decoding into a struct with an +// int field with current value=5, and the field is omitted in the stream, then after +// decoding, the value will still be 5 (not 0). +// omitEmpty only works if you guarantee that you always decode into zero-values. +// +// ------------------------------------ +// We could have truncated a map to remove keys not available in the stream, +// or set values in the struct which are not in the stream to their zero values. +// We decided against it because there is no efficient way to do it. +// We may introduce it as an option later. +// However, that will require enabling it for both runtime and code generation modes. +// +// To support truncate, we need to do 2 passes over the container: +// map +// - first collect all keys (e.g. in k1) +// - for each key in stream, mark k1 that the key should not be removed +// - after updating map, do second pass and call delete for all keys in k1 which are not marked +// struct: +// - for each field, track the *typeInfo s1 +// - iterate through all s1, and for each one not marked, set value to zero +// - this involves checking the possible anonymous fields which are nil ptrs. +// too much work. +// +// ------------------------------------------ +// Error Handling is done within the library using panic. +// +// This way, the code doesn't have to keep checking if an error has happened, +// and we don't have to keep sending the error value along with each call +// or storing it in the En|Decoder and checking it constantly along the way. +// +// The disadvantage is that small functions which use panics cannot be inlined. +// The code accounts for that by only using panics behind an interface; +// since interface calls cannot be inlined, this is irrelevant. +// +// We considered storing the error is En|Decoder. +// - once it has its err field set, it cannot be used again. +// - panicing will be optional, controlled by const flag. +// - code should always check error first and return early. +// We eventually decided against it as it makes the code clumsier to always +// check for these error conditions. + +import ( + "bytes" + "encoding" + "encoding/binary" + "errors" + "fmt" + "math" + "os" + "reflect" + "sort" + "strconv" + "strings" + "sync" + "time" +) + +const ( + scratchByteArrayLen = 32 + // initCollectionCap = 16 // 32 is defensive. 16 is preferred. + + // Support encoding.(Binary|Text)(Unm|M)arshaler. + // This constant flag will enable or disable it. + supportMarshalInterfaces = true + + // for debugging, set this to false, to catch panic traces. + // Note that this will always cause rpc tests to fail, since they need io.EOF sent via panic. + recoverPanicToErr = true + + // arrayCacheLen is the length of the cache used in encoder or decoder for + // allowing zero-alloc initialization. + arrayCacheLen = 8 + + // We tried an optimization, where we detect if a type is one of the known types + // we optimized for (e.g. int, []uint64, etc). + // + // However, we notice some worse performance when using this optimization. + // So we hide it behind a flag, to turn on if needed. + useLookupRecognizedTypes = false + + // using recognized allows us to do d.decode(interface{}) instead of d.decodeValue(reflect.Value) + // when we can infer that the kind of the interface{} is one of the ones hard-coded in the + // type switch for known types or the ones defined by fast-path. + // + // However, it seems we get better performance when we don't recognize, and just let + // reflection handle it. + // + // Reasoning is as below: + // typeswitch is a binary search with a branch to a code-point. + // getdecfn is a binary search with a call to a function pointer. + // + // both are about the same. + // + // so: why prefer typeswitch? + // + // is recognized does the following: + // - lookup rtid + // - check if in sorted list + // - calls decode(type switch) + // - 1 or 2 binary search to a point in code + // - branch there + // + // vs getdecfn + // - lookup rtid + // - check in sorted list for a function pointer + // - calls it to decode using reflection (optimized) + + // always set xDebug = false before releasing software + xDebug = true +) + +var ( + oneByteArr = [1]byte{0} + zeroByteSlice = oneByteArr[:0:0] +) + +var pool pooler + +func init() { + pool.init() +} + +// type findCodecFnMode uint8 + +// const ( +// findCodecFnModeMap findCodecFnMode = iota +// findCodecFnModeBinarySearch +// findCodecFnModeLinearSearch +// ) + +type charEncoding uint8 + +const ( + c_RAW charEncoding = iota + c_UTF8 + c_UTF16LE + c_UTF16BE + c_UTF32LE + c_UTF32BE +) + +// valueType is the stream type +type valueType uint8 + +const ( + valueTypeUnset valueType = iota + valueTypeNil + valueTypeInt + valueTypeUint + valueTypeFloat + valueTypeBool + valueTypeString + valueTypeSymbol + valueTypeBytes + valueTypeMap + valueTypeArray + valueTypeTimestamp + valueTypeExt + + // valueTypeInvalid = 0xff +) + +func (x valueType) String() string { + switch x { + case valueTypeNil: + return "Nil" + case valueTypeInt: + return "Int" + case valueTypeUint: + return "Uint" + case valueTypeFloat: + return "Float" + case valueTypeBool: + return "Bool" + case valueTypeString: + return "String" + case valueTypeSymbol: + return "Symbol" + case valueTypeBytes: + return "Bytes" + case valueTypeMap: + return "Map" + case valueTypeArray: + return "Array" + case valueTypeTimestamp: + return "Timestamp" + case valueTypeExt: + return "Ext" + } + return strconv.FormatInt(int64(x), 10) +} + +type seqType uint8 + +const ( + _ seqType = iota + seqTypeArray + seqTypeSlice + seqTypeChan +) + +// note that containerMapStart and containerArraySend are not sent. +// This is because the ReadXXXStart and EncodeXXXStart already does these. +type containerState uint8 + +const ( + _ containerState = iota + + containerMapStart // slot left open, since Driver method already covers it + containerMapKey + containerMapValue + containerMapEnd + containerArrayStart // slot left open, since Driver methods already cover it + containerArrayElem + containerArrayEnd +) + +// sfiIdx used for tracking where a (field/enc)Name is seen in a []*structFieldInfo +type sfiIdx struct { + name string + index int +} + +// do not recurse if a containing type refers to an embedded type +// which refers back to its containing type (via a pointer). +// The second time this back-reference happens, break out, +// so as not to cause an infinite loop. +const rgetMaxRecursion = 2 + +// Anecdotally, we believe most types have <= 12 fields. +// Java's PMD rules set TooManyFields threshold to 15. +const typeInfoLoadArrayLen = 12 + +type typeInfoLoad struct { + fNames []string + encNames []string + etypes []uintptr + sfis []*structFieldInfo +} + +type typeInfoLoadArray struct { + fNames [typeInfoLoadArrayLen]string + encNames [typeInfoLoadArrayLen]string + etypes [typeInfoLoadArrayLen]uintptr + sfis [typeInfoLoadArrayLen]*structFieldInfo + sfiidx [typeInfoLoadArrayLen]sfiIdx +} + +// type containerStateRecv interface { +// sendContainerState(containerState) +// } + +// mirror json.Marshaler and json.Unmarshaler here, +// so we don't import the encoding/json package +type jsonMarshaler interface { + MarshalJSON() ([]byte, error) +} +type jsonUnmarshaler interface { + UnmarshalJSON([]byte) error +} + +// type byteAccepter func(byte) bool + +var ( + bigen = binary.BigEndian + structInfoFieldName = "_struct" + + mapStrIntfTyp = reflect.TypeOf(map[string]interface{}(nil)) + mapIntfIntfTyp = reflect.TypeOf(map[interface{}]interface{}(nil)) + intfSliceTyp = reflect.TypeOf([]interface{}(nil)) + intfTyp = intfSliceTyp.Elem() + + stringTyp = reflect.TypeOf("") + timeTyp = reflect.TypeOf(time.Time{}) + rawExtTyp = reflect.TypeOf(RawExt{}) + rawTyp = reflect.TypeOf(Raw{}) + uint8SliceTyp = reflect.TypeOf([]uint8(nil)) + + mapBySliceTyp = reflect.TypeOf((*MapBySlice)(nil)).Elem() + + binaryMarshalerTyp = reflect.TypeOf((*encoding.BinaryMarshaler)(nil)).Elem() + binaryUnmarshalerTyp = reflect.TypeOf((*encoding.BinaryUnmarshaler)(nil)).Elem() + + textMarshalerTyp = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem() + textUnmarshalerTyp = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() + + jsonMarshalerTyp = reflect.TypeOf((*jsonMarshaler)(nil)).Elem() + jsonUnmarshalerTyp = reflect.TypeOf((*jsonUnmarshaler)(nil)).Elem() + + selferTyp = reflect.TypeOf((*Selfer)(nil)).Elem() + + uint8SliceTypId = rt2id(uint8SliceTyp) + rawExtTypId = rt2id(rawExtTyp) + rawTypId = rt2id(rawTyp) + intfTypId = rt2id(intfTyp) + timeTypId = rt2id(timeTyp) + stringTypId = rt2id(stringTyp) + + mapStrIntfTypId = rt2id(mapStrIntfTyp) + mapIntfIntfTypId = rt2id(mapIntfIntfTyp) + intfSliceTypId = rt2id(intfSliceTyp) + // mapBySliceTypId = rt2id(mapBySliceTyp) + + intBitsize uint8 = uint8(reflect.TypeOf(int(0)).Bits()) + uintBitsize uint8 = uint8(reflect.TypeOf(uint(0)).Bits()) + + bsAll0x00 = []byte{0, 0, 0, 0, 0, 0, 0, 0} + bsAll0xff = []byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff} + + chkOvf checkOverflow + + noFieldNameToStructFieldInfoErr = errors.New("no field name passed to parseStructFieldInfo") +) + +var defTypeInfos = NewTypeInfos([]string{"codec", "json"}) + +var immutableKindsSet = [32]bool{ + // reflect.Invalid: , + reflect.Bool: true, + reflect.Int: true, + reflect.Int8: true, + reflect.Int16: true, + reflect.Int32: true, + reflect.Int64: true, + reflect.Uint: true, + reflect.Uint8: true, + reflect.Uint16: true, + reflect.Uint32: true, + reflect.Uint64: true, + reflect.Uintptr: true, + reflect.Float32: true, + reflect.Float64: true, + reflect.Complex64: true, + reflect.Complex128: true, + // reflect.Array + // reflect.Chan + // reflect.Func: true, + // reflect.Interface + // reflect.Map + // reflect.Ptr + // reflect.Slice + reflect.String: true, + // reflect.Struct + // reflect.UnsafePointer +} + +var recognizedRtids []uintptr +var recognizedRtidPtrs []uintptr +var recognizedRtidOrPtrs []uintptr + +func init() { + if !useLookupRecognizedTypes { + return + } + for _, v := range [...]interface{}{ + float32(0), + float64(0), + uintptr(0), + uint(0), + uint8(0), + uint16(0), + uint32(0), + uint64(0), + uintptr(0), + int(0), + int8(0), + int16(0), + int32(0), + int64(0), + bool(false), + string(""), + Raw{}, + []byte(nil), + } { + rt := reflect.TypeOf(v) + recognizedRtids = append(recognizedRtids, rt2id(rt)) + recognizedRtidPtrs = append(recognizedRtidPtrs, rt2id(reflect.PtrTo(rt))) + } +} + +func containsU(s []uintptr, v uintptr) bool { + // return false // TODO: REMOVE + h, i, j := 0, 0, len(s) + for i < j { + h = i + (j-i)/2 + if s[h] < v { + i = h + 1 + } else { + j = h + } + } + if i < len(s) && s[i] == v { + return true + } + return false +} + +func isRecognizedRtid(rtid uintptr) bool { + return containsU(recognizedRtids, rtid) +} + +func isRecognizedRtidPtr(rtid uintptr) bool { + return containsU(recognizedRtidPtrs, rtid) +} + +func isRecognizedRtidOrPtr(rtid uintptr) bool { + return containsU(recognizedRtidOrPtrs, rtid) +} + +// Selfer defines methods by which a value can encode or decode itself. +// +// Any type which implements Selfer will be able to encode or decode itself. +// Consequently, during (en|de)code, this takes precedence over +// (text|binary)(M|Unm)arshal or extension support. +type Selfer interface { + CodecEncodeSelf(*Encoder) + CodecDecodeSelf(*Decoder) +} + +// MapBySlice represents a slice which should be encoded as a map in the stream. +// The slice contains a sequence of key-value pairs. +// This affords storing a map in a specific sequence in the stream. +// +// The support of MapBySlice affords the following: +// - A slice type which implements MapBySlice will be encoded as a map +// - A slice can be decoded from a map in the stream +type MapBySlice interface { + MapBySlice() +} + +// WARNING: DO NOT USE DIRECTLY. EXPORTED FOR GODOC BENEFIT. WILL BE REMOVED. +// +// BasicHandle encapsulates the common options and extension functions. +type BasicHandle struct { + // TypeInfos is used to get the type info for any type. + // + // If not configured, the default TypeInfos is used, which uses struct tag keys: codec, json + TypeInfos *TypeInfos + + extHandle + EncodeOptions + DecodeOptions + noBuiltInTypeChecker +} + +func (x *BasicHandle) getBasicHandle() *BasicHandle { + return x +} + +func (x *BasicHandle) getTypeInfo(rtid uintptr, rt reflect.Type) (pti *typeInfo) { + if x.TypeInfos == nil { + return defTypeInfos.get(rtid, rt) + } + return x.TypeInfos.get(rtid, rt) +} + +// Handle is the interface for a specific encoding format. +// +// Typically, a Handle is pre-configured before first time use, +// and not modified while in use. Such a pre-configured Handle +// is safe for concurrent access. +type Handle interface { + getBasicHandle() *BasicHandle + newEncDriver(w *Encoder) encDriver + newDecDriver(r *Decoder) decDriver + isBinary() bool + hasElemSeparators() bool + IsBuiltinType(rtid uintptr) bool +} + +// Raw represents raw formatted bytes. +// We "blindly" store it during encode and store the raw bytes during decode. +// Note: it is dangerous during encode, so we may gate the behaviour behind an Encode flag which must be explicitly set. +type Raw []byte + +// RawExt represents raw unprocessed extension data. +// Some codecs will decode extension data as a *RawExt if there is no registered extension for the tag. +// +// Only one of Data or Value is nil. If Data is nil, then the content of the RawExt is in the Value. +type RawExt struct { + Tag uint64 + // Data is the []byte which represents the raw ext. If Data is nil, ext is exposed in Value. + // Data is used by codecs (e.g. binc, msgpack, simple) which do custom serialization of the types + Data []byte + // Value represents the extension, if Data is nil. + // Value is used by codecs (e.g. cbor, json) which use the format to do custom serialization of the types. + Value interface{} +} + +// BytesExt handles custom (de)serialization of types to/from []byte. +// It is used by codecs (e.g. binc, msgpack, simple) which do custom serialization of the types. +type BytesExt interface { + // WriteExt converts a value to a []byte. + // + // Note: v *may* be a pointer to the extension type, if the extension type was a struct or array. + WriteExt(v interface{}) []byte + + // ReadExt updates a value from a []byte. + ReadExt(dst interface{}, src []byte) +} + +// InterfaceExt handles custom (de)serialization of types to/from another interface{} value. +// The Encoder or Decoder will then handle the further (de)serialization of that known type. +// +// It is used by codecs (e.g. cbor, json) which use the format to do custom serialization of the types. +type InterfaceExt interface { + // ConvertExt converts a value into a simpler interface for easy encoding e.g. convert time.Time to int64. + // + // Note: v *may* be a pointer to the extension type, if the extension type was a struct or array. + ConvertExt(v interface{}) interface{} + + // UpdateExt updates a value from a simpler interface for easy decoding e.g. convert int64 to time.Time. + UpdateExt(dst interface{}, src interface{}) +} + +// Ext handles custom (de)serialization of custom types / extensions. +type Ext interface { + BytesExt + InterfaceExt +} + +// addExtWrapper is a wrapper implementation to support former AddExt exported method. +type addExtWrapper struct { + encFn func(reflect.Value) ([]byte, error) + decFn func(reflect.Value, []byte) error +} + +func (x addExtWrapper) WriteExt(v interface{}) []byte { + bs, err := x.encFn(reflect.ValueOf(v)) + if err != nil { + panic(err) + } + return bs +} + +func (x addExtWrapper) ReadExt(v interface{}, bs []byte) { + if err := x.decFn(reflect.ValueOf(v), bs); err != nil { + panic(err) + } +} + +func (x addExtWrapper) ConvertExt(v interface{}) interface{} { + return x.WriteExt(v) +} + +func (x addExtWrapper) UpdateExt(dest interface{}, v interface{}) { + x.ReadExt(dest, v.([]byte)) +} + +type setExtWrapper struct { + b BytesExt + i InterfaceExt +} + +func (x *setExtWrapper) WriteExt(v interface{}) []byte { + if x.b == nil { + panic("BytesExt.WriteExt is not supported") + } + return x.b.WriteExt(v) +} + +func (x *setExtWrapper) ReadExt(v interface{}, bs []byte) { + if x.b == nil { + panic("BytesExt.WriteExt is not supported") + + } + x.b.ReadExt(v, bs) +} + +func (x *setExtWrapper) ConvertExt(v interface{}) interface{} { + if x.i == nil { + panic("InterfaceExt.ConvertExt is not supported") + + } + return x.i.ConvertExt(v) +} + +func (x *setExtWrapper) UpdateExt(dest interface{}, v interface{}) { + if x.i == nil { + panic("InterfaceExxt.UpdateExt is not supported") + + } + x.i.UpdateExt(dest, v) +} + +type binaryEncodingType struct{} + +func (_ binaryEncodingType) isBinary() bool { return true } + +type textEncodingType struct{} + +func (_ textEncodingType) isBinary() bool { return false } + +// noBuiltInTypes is embedded into many types which do not support builtins +// e.g. msgpack, simple, cbor. + +type noBuiltInTypeChecker struct{} + +func (_ noBuiltInTypeChecker) IsBuiltinType(rt uintptr) bool { return false } + +type noBuiltInTypes struct{ noBuiltInTypeChecker } + +func (_ noBuiltInTypes) EncodeBuiltin(rt uintptr, v interface{}) {} +func (_ noBuiltInTypes) DecodeBuiltin(rt uintptr, v interface{}) {} + +// type noStreamingCodec struct{} +// func (_ noStreamingCodec) CheckBreak() bool { return false } +// func (_ noStreamingCodec) hasElemSeparators() bool { return false } + +type noElemSeparators struct{} + +func (_ noElemSeparators) hasElemSeparators() (v bool) { return } + +// bigenHelper. +// Users must already slice the x completely, because we will not reslice. +type bigenHelper struct { + x []byte // must be correctly sliced to appropriate len. slicing is a cost. + w encWriter +} + +func (z bigenHelper) writeUint16(v uint16) { + bigen.PutUint16(z.x, v) + z.w.writeb(z.x) +} + +func (z bigenHelper) writeUint32(v uint32) { + bigen.PutUint32(z.x, v) + z.w.writeb(z.x) +} + +func (z bigenHelper) writeUint64(v uint64) { + bigen.PutUint64(z.x, v) + z.w.writeb(z.x) +} + +type extTypeTagFn struct { + rtid uintptr + rt reflect.Type + tag uint64 + ext Ext +} + +type extHandle []extTypeTagFn + +// DEPRECATED: Use SetBytesExt or SetInterfaceExt on the Handle instead. +// +// AddExt registes an encode and decode function for a reflect.Type. +// AddExt internally calls SetExt. +// To deregister an Ext, call AddExt with nil encfn and/or nil decfn. +func (o *extHandle) AddExt( + rt reflect.Type, tag byte, + encfn func(reflect.Value) ([]byte, error), decfn func(reflect.Value, []byte) error, +) (err error) { + if encfn == nil || decfn == nil { + return o.SetExt(rt, uint64(tag), nil) + } + return o.SetExt(rt, uint64(tag), addExtWrapper{encfn, decfn}) +} + +// DEPRECATED: Use SetBytesExt or SetInterfaceExt on the Handle instead. +// +// Note that the type must be a named type, and specifically not +// a pointer or Interface. An error is returned if that is not honored. +// +// To Deregister an ext, call SetExt with nil Ext +func (o *extHandle) SetExt(rt reflect.Type, tag uint64, ext Ext) (err error) { + // o is a pointer, because we may need to initialize it + if rt.PkgPath() == "" || rt.Kind() == reflect.Interface { + err = fmt.Errorf("codec.Handle.AddExt: Takes named type, not a pointer or interface: %T", + reflect.Zero(rt).Interface()) + return + } + + rtid := rt2id(rt) + for _, v := range *o { + if v.rtid == rtid { + v.tag, v.ext = tag, ext + return + } + } + + if *o == nil { + *o = make([]extTypeTagFn, 0, 4) + } + *o = append(*o, extTypeTagFn{rtid, rt, tag, ext}) + return +} + +func (o extHandle) getExt(rtid uintptr) *extTypeTagFn { + var v *extTypeTagFn + for i := range o { + v = &o[i] + if v.rtid == rtid { + return v + } + } + return nil +} + +func (o extHandle) getExtForTag(tag uint64) *extTypeTagFn { + var v *extTypeTagFn + for i := range o { + v = &o[i] + if v.tag == tag { + return v + } + } + return nil +} + +const maxLevelsEmbedding = 16 + +type structFieldInfo struct { + encName string // encode name + fieldName string // field name + + is [maxLevelsEmbedding]uint16 // (recursive/embedded) field index in struct + nis uint8 // num levels of embedding. if 1, then it's not embedded. + omitEmpty bool + toArray bool // if field is _struct, is the toArray set? +} + +func (si *structFieldInfo) setToZeroValue(v reflect.Value) { + if v, valid := si.field(v, false); valid { + v.Set(reflect.Zero(v.Type())) + } +} + +// rv returns the field of the struct. +// If anonymous, it returns an Invalid +func (si *structFieldInfo) field(v reflect.Value, update bool) (rv2 reflect.Value, valid bool) { + // replicate FieldByIndex + for i, x := range si.is { + if uint8(i) == si.nis { + break + } + if v, valid = baseStructRv(v, update); !valid { + return + } + v = v.Field(int(x)) + } + + return v, true +} + +func (si *structFieldInfo) fieldval(v reflect.Value, update bool) reflect.Value { + v, _ = si.field(v, update) + return v +} + +func parseStructFieldInfo(fname string, stag string) *structFieldInfo { + // if fname == "" { + // panic(noFieldNameToStructFieldInfoErr) + // } + si := structFieldInfo{ + encName: fname, + } + + if stag != "" { + for i, s := range strings.Split(stag, ",") { + if i == 0 { + if s != "" { + si.encName = s + } + } else { + if s == "omitempty" { + si.omitEmpty = true + } else if s == "toarray" { + si.toArray = true + } + } + } + } + // si.encNameBs = []byte(si.encName) + return &si +} + +type sfiSortedByEncName []*structFieldInfo + +func (p sfiSortedByEncName) Len() int { + return len(p) +} + +func (p sfiSortedByEncName) Less(i, j int) bool { + return p[i].encName < p[j].encName +} + +func (p sfiSortedByEncName) Swap(i, j int) { + p[i], p[j] = p[j], p[i] +} + +const structFieldNodeNumToCache = 4 + +type structFieldNodeCache struct { + rv [structFieldNodeNumToCache]reflect.Value + idx [structFieldNodeNumToCache]uint32 + num uint8 +} + +func (x *structFieldNodeCache) get(key uint32) (fv reflect.Value, valid bool) { + // defer func() { fmt.Printf(">>>> found in cache2? %v\n", valid) }() + for i, k := range &x.idx { + if uint8(i) == x.num { + return // break + } + if key == k { + return x.rv[i], true + } + } + return +} + +func (x *structFieldNodeCache) tryAdd(fv reflect.Value, key uint32) { + if x.num < structFieldNodeNumToCache { + x.rv[x.num] = fv + x.idx[x.num] = key + x.num++ + return + } +} + +type structFieldNode struct { + v reflect.Value + cache2 structFieldNodeCache + cache3 structFieldNodeCache + update bool +} + +func (x *structFieldNode) field(si *structFieldInfo) (fv reflect.Value) { + // return si.fieldval(x.v, x.update) + // Note: we only cache if nis=2 or nis=3 i.e. up to 2 levels of embedding + // This mostly saves us time on the repeated calls to v.Elem, v.Field, etc. + var valid bool + switch si.nis { + case 1: + fv = x.v.Field(int(si.is[0])) + case 2: + if fv, valid = x.cache2.get(uint32(si.is[0])); valid { + fv = fv.Field(int(si.is[1])) + return + } + fv = x.v.Field(int(si.is[0])) + if fv, valid = baseStructRv(fv, x.update); !valid { + return + } + x.cache2.tryAdd(fv, uint32(si.is[0])) + fv = fv.Field(int(si.is[1])) + case 3: + var key uint32 = uint32(si.is[0])<<16 | uint32(si.is[1]) + if fv, valid = x.cache3.get(key); valid { + fv = fv.Field(int(si.is[2])) + return + } + fv = x.v.Field(int(si.is[0])) + if fv, valid = baseStructRv(fv, x.update); !valid { + return + } + fv = fv.Field(int(si.is[1])) + if fv, valid = baseStructRv(fv, x.update); !valid { + return + } + x.cache3.tryAdd(fv, key) + fv = fv.Field(int(si.is[2])) + default: + fv, _ = si.field(x.v, x.update) + } + return +} + +func baseStructRv(v reflect.Value, update bool) (v2 reflect.Value, valid bool) { + for v.Kind() == reflect.Ptr { + if v.IsNil() { + if !update { + return + } + v.Set(reflect.New(v.Type().Elem())) + } + v = v.Elem() + } + return v, true +} + +// typeInfo keeps information about each type referenced in the encode/decode sequence. +// +// During an encode/decode sequence, we work as below: +// - If base is a built in type, en/decode base value +// - If base is registered as an extension, en/decode base value +// - If type is binary(M/Unm)arshaler, call Binary(M/Unm)arshal method +// - If type is text(M/Unm)arshaler, call Text(M/Unm)arshal method +// - Else decode appropriately based on the reflect.Kind +type typeInfo struct { + sfi []*structFieldInfo // sorted. Used when enc/dec struct to map. + sfip []*structFieldInfo // unsorted. Used when enc/dec struct to array. + + rt reflect.Type + rtid uintptr + // rv0 reflect.Value // saved zero value, used if immutableKind + + numMeth uint16 // number of methods + + // baseId gives pointer to the base reflect.Type, after deferencing + // the pointers. E.g. base type of ***time.Time is time.Time. + base reflect.Type + baseId uintptr + baseIndir int8 // number of indirections to get to base + + anyOmitEmpty bool + + mbs bool // base type (T or *T) is a MapBySlice + + bm bool // base type (T or *T) is a binaryMarshaler + bunm bool // base type (T or *T) is a binaryUnmarshaler + bmIndir int8 // number of indirections to get to binaryMarshaler type + bunmIndir int8 // number of indirections to get to binaryUnmarshaler type + + tm bool // base type (T or *T) is a textMarshaler + tunm bool // base type (T or *T) is a textUnmarshaler + tmIndir int8 // number of indirections to get to textMarshaler type + tunmIndir int8 // number of indirections to get to textUnmarshaler type + + jm bool // base type (T or *T) is a jsonMarshaler + junm bool // base type (T or *T) is a jsonUnmarshaler + jmIndir int8 // number of indirections to get to jsonMarshaler type + junmIndir int8 // number of indirections to get to jsonUnmarshaler type + + cs bool // base type (T or *T) is a Selfer + csIndir int8 // number of indirections to get to Selfer type + + toArray bool // whether this (struct) type should be encoded as an array +} + +// define length beyond which we do a binary search instead of a linear search. +// From our testing, linear search seems faster than binary search up to 16-field structs. +// However, we set to 8 similar to what python does for hashtables. +const indexForEncNameBinarySearchThreshold = 8 + +func (ti *typeInfo) indexForEncName(name string) int { + // NOTE: name may be a stringView, so don't pass it to another function. + //tisfi := ti.sfi + sfilen := len(ti.sfi) + if sfilen < indexForEncNameBinarySearchThreshold { + for i, si := range ti.sfi { + if si.encName == name { + return i + } + } + return -1 + } + // binary search. adapted from sort/search.go. + h, i, j := 0, 0, sfilen + for i < j { + h = i + (j-i)/2 + if ti.sfi[h].encName < name { + i = h + 1 + } else { + j = h + } + } + if i < sfilen && ti.sfi[i].encName == name { + return i + } + return -1 +} + +type rtid2ti struct { + rtid uintptr + ti *typeInfo +} + +// TypeInfos caches typeInfo for each type on first inspection. +// +// It is configured with a set of tag keys, which are used to get +// configuration for the type. +type TypeInfos struct { + infos atomicTypeInfoSlice // formerly map[uintptr]*typeInfo, now *[]rtid2ti + mu sync.Mutex + tags []string +} + +// NewTypeInfos creates a TypeInfos given a set of struct tags keys. +// +// This allows users customize the struct tag keys which contain configuration +// of their types. +func NewTypeInfos(tags []string) *TypeInfos { + return &TypeInfos{tags: tags} +} + +func (x *TypeInfos) structTag(t reflect.StructTag) (s string) { + // check for tags: codec, json, in that order. + // this allows seamless support for many configured structs. + for _, x := range x.tags { + s = t.Get(x) + if s != "" { + return s + } + } + return +} + +func (x *TypeInfos) find(sp *[]rtid2ti, rtid uintptr) (idx int, ti *typeInfo) { + // binary search. adapted from sort/search.go. + // if sp == nil { + // return -1, nil + // } + s := *sp + h, i, j := 0, 0, len(s) + for i < j { + h = i + (j-i)/2 + if s[h].rtid < rtid { + i = h + 1 + } else { + j = h + } + } + if i < len(s) && s[i].rtid == rtid { + return i, s[i].ti + } + return i, nil +} + +func (x *TypeInfos) get(rtid uintptr, rt reflect.Type) (pti *typeInfo) { + sp := x.infos.load() + var idx int + if sp != nil { + idx, pti = x.find(sp, rtid) + if pti != nil { + return + } + } + + // do not hold lock while computing this. + // it may lead to duplication, but that's ok. + ti := typeInfo{rt: rt, rtid: rtid} + // ti.rv0 = reflect.Zero(rt) + + ti.numMeth = uint16(rt.NumMethod()) + var ok bool + var indir int8 + if ok, indir = implementsIntf(rt, binaryMarshalerTyp); ok { + ti.bm, ti.bmIndir = true, indir + } + if ok, indir = implementsIntf(rt, binaryUnmarshalerTyp); ok { + ti.bunm, ti.bunmIndir = true, indir + } + if ok, indir = implementsIntf(rt, textMarshalerTyp); ok { + ti.tm, ti.tmIndir = true, indir + } + if ok, indir = implementsIntf(rt, textUnmarshalerTyp); ok { + ti.tunm, ti.tunmIndir = true, indir + } + if ok, indir = implementsIntf(rt, jsonMarshalerTyp); ok { + ti.jm, ti.jmIndir = true, indir + } + if ok, indir = implementsIntf(rt, jsonUnmarshalerTyp); ok { + ti.junm, ti.junmIndir = true, indir + } + if ok, indir = implementsIntf(rt, selferTyp); ok { + ti.cs, ti.csIndir = true, indir + } + if ok, _ = implementsIntf(rt, mapBySliceTyp); ok { + ti.mbs = true + } + + pt := rt + var ptIndir int8 + // for ; pt.Kind() == reflect.Ptr; pt, ptIndir = pt.Elem(), ptIndir+1 { } + for pt.Kind() == reflect.Ptr { + pt = pt.Elem() + ptIndir++ + } + if ptIndir == 0 { + ti.base = rt + ti.baseId = rtid + } else { + ti.base = pt + ti.baseId = rt2id(pt) + ti.baseIndir = ptIndir + } + + if rt.Kind() == reflect.Struct { + var omitEmpty bool + if f, ok := rt.FieldByName(structInfoFieldName); ok { + siInfo := parseStructFieldInfo(structInfoFieldName, x.structTag(f.Tag)) + ti.toArray = siInfo.toArray + omitEmpty = siInfo.omitEmpty + } + pp, pi := pool.tiLoad() + pv := pi.(*typeInfoLoadArray) + pv.etypes[0] = ti.baseId + vv := typeInfoLoad{pv.fNames[:0], pv.encNames[:0], pv.etypes[:1], pv.sfis[:0]} + x.rget(rt, rtid, omitEmpty, nil, &vv) + ti.sfip, ti.sfi, ti.anyOmitEmpty = rgetResolveSFI(vv.sfis, pv.sfiidx[:0]) + pp.Put(pi) + } + // sfi = sfip + + var vs []rtid2ti + x.mu.Lock() + sp = x.infos.load() + if sp == nil { + pti = &ti + vs = []rtid2ti{{rtid, pti}} + x.infos.store(&vs) + } else { + idx, pti = x.find(sp, rtid) + if pti == nil { + s := *sp + pti = &ti + vs = make([]rtid2ti, len(s)+1) + copy(vs, s[:idx]) + vs[idx] = rtid2ti{rtid, pti} + copy(vs[idx+1:], s[idx:]) + x.infos.store(&vs) + } + } + x.mu.Unlock() + return +} + +func (x *TypeInfos) rget(rt reflect.Type, rtid uintptr, omitEmpty bool, + indexstack []uint16, pv *typeInfoLoad, +) { + // Read up fields and store how to access the value. + // + // It uses go's rules for message selectors, + // which say that the field with the shallowest depth is selected. + // + // Note: we consciously use slices, not a map, to simulate a set. + // Typically, types have < 16 fields, + // and iteration using equals is faster than maps there + flen := rt.NumField() + if flen > (1< maxLevelsEmbedding-1 { + panic(fmt.Errorf("codec: only supports up to %v depth of embedding - type has %v depth", maxLevelsEmbedding-1, len(indexstack))) + } + si.nis = uint8(len(indexstack)) + 1 + copy(si.is[:], indexstack) + si.is[len(indexstack)] = j + + if omitEmpty { + si.omitEmpty = true + } + pv.sfis = append(pv.sfis, si) + } +} + +// resolves the struct field info got from a call to rget. +// Returns a trimmed, unsorted and sorted []*structFieldInfo. +func rgetResolveSFI(x []*structFieldInfo, pv []sfiIdx) (y, z []*structFieldInfo, anyOmitEmpty bool) { + var n int + for i, v := range x { + xn := v.encName // TODO: fieldName or encName? use encName for now. + var found bool + for j, k := range pv { + if k.name == xn { + // one of them must be reset to nil, and the index updated appropriately to the other one + if v.nis == x[k.index].nis { + } else if v.nis < x[k.index].nis { + pv[j].index = i + if x[k.index] != nil { + x[k.index] = nil + n++ + } + } else { + if x[i] != nil { + x[i] = nil + n++ + } + } + found = true + break + } + } + if !found { + pv = append(pv, sfiIdx{xn, i}) + } + } + + // remove all the nils + y = make([]*structFieldInfo, len(x)-n) + n = 0 + for _, v := range x { + if v == nil { + continue + } + if !anyOmitEmpty && v.omitEmpty { + anyOmitEmpty = true + } + y[n] = v + n++ + } + + z = make([]*structFieldInfo, len(y)) + copy(z, y) + sort.Sort(sfiSortedByEncName(z)) + return +} + +func xprintf(format string, a ...interface{}) { + if xDebug { + fmt.Fprintf(os.Stderr, format, a...) + } +} + +func panicToErr(err *error) { + if recoverPanicToErr { + if x := recover(); x != nil { + // if false && xDebug { + // fmt.Printf("panic'ing with: %v\n", x) + // debug.PrintStack() + // } + panicValToErr(x, err) + } + } +} + +func panicToErrs2(err1, err2 *error) { + if recoverPanicToErr { + if x := recover(); x != nil { + panicValToErr(x, err1) + panicValToErr(x, err2) + } + } +} + +// func doPanic(tag string, format string, params ...interface{}) { +// params2 := make([]interface{}, len(params)+1) +// params2[0] = tag +// copy(params2[1:], params) +// panic(fmt.Errorf("%s: "+format, params2...)) +// } + +func isImmutableKind(k reflect.Kind) (v bool) { + return immutableKindsSet[k] + // return false || + // k == reflect.Int || + // k == reflect.Int8 || + // k == reflect.Int16 || + // k == reflect.Int32 || + // k == reflect.Int64 || + // k == reflect.Uint || + // k == reflect.Uint8 || + // k == reflect.Uint16 || + // k == reflect.Uint32 || + // k == reflect.Uint64 || + // k == reflect.Uintptr || + // k == reflect.Float32 || + // k == reflect.Float64 || + // k == reflect.Bool || + // k == reflect.String +} + +// ---- + +type codecFnInfo struct { + ti *typeInfo + xfFn Ext + xfTag uint64 + seq seqType + addr bool +} + +// codecFn encapsulates the captured variables and the encode function. +// This way, we only do some calculations one times, and pass to the +// code block that should be called (encapsulated in a function) +// instead of executing the checks every time. +type codecFn struct { + i codecFnInfo + fe func(*Encoder, *codecFnInfo, reflect.Value) + fd func(*Decoder, *codecFnInfo, reflect.Value) +} + +type codecRtidFn struct { + rtid uintptr + fn codecFn +} + +type codecFner struct { + hh Handle + h *BasicHandle + cs [arrayCacheLen]*[arrayCacheLen]codecRtidFn + s []*[arrayCacheLen]codecRtidFn + sn uint32 + be bool + js bool + cf [arrayCacheLen]codecRtidFn +} + +func (c *codecFner) reset(hh Handle) { + c.hh = hh + c.h = hh.getBasicHandle() + _, c.js = hh.(*JsonHandle) + c.be = hh.isBinary() +} + +func (c *codecFner) get(rt reflect.Type, checkFastpath, checkCodecSelfer bool) (fn *codecFn) { + rtid := rt2id(rt) + var j uint32 + var sn uint32 = c.sn + if sn == 0 { + c.s = c.cs[:1] + c.s[0] = &c.cf + c.cf[0].rtid = rtid + fn = &(c.cf[0].fn) + c.sn = 1 + } else { + LOOP1: + for _, x := range c.s { + for i := range x { + if j == sn { + break LOOP1 + } + if x[i].rtid == rtid { + fn = &(x[i].fn) + return + } + j++ + } + } + sx, sy := sn/arrayCacheLen, sn%arrayCacheLen + if sy == 0 { + c.s = append(c.s, &[arrayCacheLen]codecRtidFn{}) + } + c.s[sx][sy].rtid = rtid + fn = &(c.s[sx][sy].fn) + c.sn++ + } + + ti := c.h.getTypeInfo(rtid, rt) + fi := &(fn.i) + fi.ti = ti + + if checkCodecSelfer && ti.cs { + fn.fe = (*Encoder).selferMarshal + fn.fd = (*Decoder).selferUnmarshal + } else if rtid == rawTypId { + fn.fe = (*Encoder).raw + fn.fd = (*Decoder).raw + } else if rtid == rawExtTypId { + fn.fe = (*Encoder).rawExt + fn.fd = (*Decoder).rawExt + fn.i.addr = true + } else if c.hh.IsBuiltinType(rtid) { + fn.fe = (*Encoder).builtin + fn.fd = (*Decoder).builtin + fn.i.addr = true + } else if xfFn := c.h.getExt(rtid); xfFn != nil { + fi.xfTag, fi.xfFn = xfFn.tag, xfFn.ext + fn.fe = (*Encoder).ext + fn.fd = (*Decoder).ext + fn.i.addr = true + } else if supportMarshalInterfaces && c.be && ti.bm { + fn.fe = (*Encoder).binaryMarshal + fn.fd = (*Decoder).binaryUnmarshal + } else if supportMarshalInterfaces && !c.be && c.js && ti.jm { + //If JSON, we should check JSONMarshal before textMarshal + fn.fe = (*Encoder).jsonMarshal + fn.fd = (*Decoder).jsonUnmarshal + } else if supportMarshalInterfaces && !c.be && ti.tm { + fn.fe = (*Encoder).textMarshal + fn.fd = (*Decoder).textUnmarshal + } else { + rk := rt.Kind() + if fastpathEnabled && checkFastpath && (rk == reflect.Map || rk == reflect.Slice) { + if rt.PkgPath() == "" { // un-named slice or map + if idx := fastpathAV.index(rtid); idx != -1 { + fn.fe = fastpathAV[idx].encfn + fn.fd = fastpathAV[idx].decfn + fn.i.addr = true + } + } else { + // use mapping for underlying type if there + var rtu reflect.Type + if rk == reflect.Map { + rtu = reflect.MapOf(rt.Key(), rt.Elem()) + } else { + rtu = reflect.SliceOf(rt.Elem()) + } + rtuid := rt2id(rtu) + if idx := fastpathAV.index(rtuid); idx != -1 { + xfnf := fastpathAV[idx].encfn + xrt := fastpathAV[idx].rt + fn.fe = func(e *Encoder, xf *codecFnInfo, xrv reflect.Value) { + xfnf(e, xf, xrv.Convert(xrt)) + } + fn.i.addr = true + xfnf2 := fastpathAV[idx].decfn + fn.fd = func(d *Decoder, xf *codecFnInfo, xrv reflect.Value) { + xfnf2(d, xf, xrv.Convert(reflect.PtrTo(xrt))) + } + } + } + } + if fn.fe == nil && fn.fd == nil { + switch rk { + case reflect.Bool: + fn.fe = (*Encoder).kBool + fn.fd = (*Decoder).kBool + case reflect.String: + fn.fe = (*Encoder).kString + fn.fd = (*Decoder).kString + case reflect.Int: + fn.fd = (*Decoder).kInt + fn.fe = (*Encoder).kInt + case reflect.Int8: + fn.fe = (*Encoder).kInt + fn.fd = (*Decoder).kInt8 + case reflect.Int16: + fn.fe = (*Encoder).kInt + fn.fd = (*Decoder).kInt16 + case reflect.Int32: + fn.fe = (*Encoder).kInt + fn.fd = (*Decoder).kInt32 + case reflect.Int64: + fn.fe = (*Encoder).kInt + fn.fd = (*Decoder).kInt64 + case reflect.Uint: + fn.fd = (*Decoder).kUint + fn.fe = (*Encoder).kUint + case reflect.Uint8: + fn.fe = (*Encoder).kUint + fn.fd = (*Decoder).kUint8 + case reflect.Uint16: + fn.fe = (*Encoder).kUint + fn.fd = (*Decoder).kUint16 + case reflect.Uint32: + fn.fe = (*Encoder).kUint + fn.fd = (*Decoder).kUint32 + case reflect.Uint64: + fn.fe = (*Encoder).kUint + fn.fd = (*Decoder).kUint64 + // case reflect.Ptr: + // fn.fd = (*Decoder).kPtr + case reflect.Uintptr: + fn.fe = (*Encoder).kUint + fn.fd = (*Decoder).kUintptr + case reflect.Float32: + fn.fe = (*Encoder).kFloat32 + fn.fd = (*Decoder).kFloat32 + case reflect.Float64: + fn.fe = (*Encoder).kFloat64 + fn.fd = (*Decoder).kFloat64 + case reflect.Invalid: + fn.fe = (*Encoder).kInvalid + case reflect.Chan: + fi.seq = seqTypeChan + fn.fe = (*Encoder).kSlice + fn.fd = (*Decoder).kSlice + case reflect.Slice: + fi.seq = seqTypeSlice + fn.fe = (*Encoder).kSlice + fn.fd = (*Decoder).kSlice + case reflect.Array: + fi.seq = seqTypeArray + fn.fe = (*Encoder).kSlice + fi.addr = false + rt2 := reflect.SliceOf(rt.Elem()) + fn.fd = func(d *Decoder, xf *codecFnInfo, xrv reflect.Value) { + // println(">>>>>> decoding an array ... ") + d.cf.get(rt2, true, false).fd(d, xf, xrv.Slice(0, xrv.Len())) + // println(">>>>>> decoding an array ... DONE") + } + // fn.fd = (*Decoder).kArray + case reflect.Struct: + if ti.anyOmitEmpty { + fn.fe = (*Encoder).kStruct + } else { + fn.fe = (*Encoder).kStructNoOmitempty + } + fn.fd = (*Decoder).kStruct + // reflect.Ptr and reflect.Interface are handled already by preEncodeValue + // case reflect.Ptr: + // fn.fe = (*Encoder).kPtr + // case reflect.Interface: + // fn.fe = (*Encoder).kInterface + case reflect.Map: + fn.fe = (*Encoder).kMap + fn.fd = (*Decoder).kMap + case reflect.Interface: + // encode: reflect.Interface are handled already by preEncodeValue + fn.fd = (*Decoder).kInterface + default: + fn.fe = (*Encoder).kErr + fn.fd = (*Decoder).kErr + } + } + } + + return +} + +// ---- + +// these functions must be inlinable, and not call anybody +type checkOverflow struct{} + +func (_ checkOverflow) Float32(f float64) (overflow bool) { + if f < 0 { + f = -f + } + return math.MaxFloat32 < f && f <= math.MaxFloat64 +} + +func (_ checkOverflow) Uint(v uint64, bitsize uint8) (overflow bool) { + if bitsize == 0 || bitsize >= 64 || v == 0 { + return + } + if trunc := (v << (64 - bitsize)) >> (64 - bitsize); v != trunc { + overflow = true + } + return +} + +func (_ checkOverflow) Int(v int64, bitsize uint8) (overflow bool) { + if bitsize == 0 || bitsize >= 64 || v == 0 { + return + } + if trunc := (v << (64 - bitsize)) >> (64 - bitsize); v != trunc { + overflow = true + } + return +} + +func (_ checkOverflow) SignedInt(v uint64) (i int64, overflow bool) { + //e.g. -127 to 128 for int8 + pos := (v >> 63) == 0 + ui2 := v & 0x7fffffffffffffff + if pos { + if ui2 > math.MaxInt64 { + overflow = true + return + } + } else { + if ui2 > math.MaxInt64-1 { + overflow = true + return + } + } + i = int64(v) + return +} + +// ------------------ SORT ----------------- + +func isNaN(f float64) bool { return f != f } + +// ----------------------- + +type intSlice []int64 +type uintSlice []uint64 +type uintptrSlice []uintptr +type floatSlice []float64 +type boolSlice []bool +type stringSlice []string +type bytesSlice [][]byte + +func (p intSlice) Len() int { return len(p) } +func (p intSlice) Less(i, j int) bool { return p[i] < p[j] } +func (p intSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +func (p uintSlice) Len() int { return len(p) } +func (p uintSlice) Less(i, j int) bool { return p[i] < p[j] } +func (p uintSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +func (p uintptrSlice) Len() int { return len(p) } +func (p uintptrSlice) Less(i, j int) bool { return p[i] < p[j] } +func (p uintptrSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +func (p floatSlice) Len() int { return len(p) } +func (p floatSlice) Less(i, j int) bool { + return p[i] < p[j] || isNaN(p[i]) && !isNaN(p[j]) +} +func (p floatSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +func (p stringSlice) Len() int { return len(p) } +func (p stringSlice) Less(i, j int) bool { return p[i] < p[j] } +func (p stringSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +func (p bytesSlice) Len() int { return len(p) } +func (p bytesSlice) Less(i, j int) bool { return bytes.Compare(p[i], p[j]) == -1 } +func (p bytesSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +func (p boolSlice) Len() int { return len(p) } +func (p boolSlice) Less(i, j int) bool { return !p[i] && p[j] } +func (p boolSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +// --------------------- + +type intRv struct { + v int64 + r reflect.Value +} +type intRvSlice []intRv +type uintRv struct { + v uint64 + r reflect.Value +} +type uintRvSlice []uintRv +type floatRv struct { + v float64 + r reflect.Value +} +type floatRvSlice []floatRv +type boolRv struct { + v bool + r reflect.Value +} +type boolRvSlice []boolRv +type stringRv struct { + v string + r reflect.Value +} +type stringRvSlice []stringRv +type bytesRv struct { + v []byte + r reflect.Value +} +type bytesRvSlice []bytesRv + +func (p intRvSlice) Len() int { return len(p) } +func (p intRvSlice) Less(i, j int) bool { return p[i].v < p[j].v } +func (p intRvSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +func (p uintRvSlice) Len() int { return len(p) } +func (p uintRvSlice) Less(i, j int) bool { return p[i].v < p[j].v } +func (p uintRvSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +func (p floatRvSlice) Len() int { return len(p) } +func (p floatRvSlice) Less(i, j int) bool { + return p[i].v < p[j].v || isNaN(p[i].v) && !isNaN(p[j].v) +} +func (p floatRvSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +func (p stringRvSlice) Len() int { return len(p) } +func (p stringRvSlice) Less(i, j int) bool { return p[i].v < p[j].v } +func (p stringRvSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +func (p bytesRvSlice) Len() int { return len(p) } +func (p bytesRvSlice) Less(i, j int) bool { return bytes.Compare(p[i].v, p[j].v) == -1 } +func (p bytesRvSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +func (p boolRvSlice) Len() int { return len(p) } +func (p boolRvSlice) Less(i, j int) bool { return !p[i].v && p[j].v } +func (p boolRvSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +// ----------------- + +type bytesI struct { + v []byte + i interface{} +} + +type bytesISlice []bytesI + +func (p bytesISlice) Len() int { return len(p) } +func (p bytesISlice) Less(i, j int) bool { return bytes.Compare(p[i].v, p[j].v) == -1 } +func (p bytesISlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +// ----------------- + +type set []uintptr + +func (s *set) add(v uintptr) (exists bool) { + // e.ci is always nil, or len >= 1 + x := *s + if x == nil { + x = make([]uintptr, 1, 8) + x[0] = v + *s = x + return + } + // typically, length will be 1. make this perform. + if len(x) == 1 { + if j := x[0]; j == 0 { + x[0] = v + } else if j == v { + exists = true + } else { + x = append(x, v) + *s = x + } + return + } + // check if it exists + for _, j := range x { + if j == v { + exists = true + return + } + } + // try to replace a "deleted" slot + for i, j := range x { + if j == 0 { + x[i] = v + return + } + } + // if unable to replace deleted slot, just append it. + x = append(x, v) + *s = x + return +} + +func (s *set) remove(v uintptr) (exists bool) { + x := *s + if len(x) == 0 { + return + } + if len(x) == 1 { + if x[0] == v { + x[0] = 0 + } + return + } + for i, j := range x { + if j == v { + exists = true + x[i] = 0 // set it to 0, as way to delete it. + // copy(x[i:], x[i+1:]) + // x = x[:len(x)-1] + return + } + } + return +} + +// ------ + +// bitset types are better than [256]bool, because they permit the whole +// bitset array being on a single cache line and use less memory. + +// given x > 0 and n > 0 and x is exactly 2^n, then pos/x === pos>>n AND pos%x === pos&(x-1). +// consequently, pos/32 === pos>>5, pos/16 === pos>>4, pos/8 === pos>>3, pos%8 == pos&7 + +type bitset256 [32]byte + +func (x *bitset256) set(pos byte) { + x[pos>>3] |= (1 << (pos & 7)) +} +func (x *bitset256) unset(pos byte) { + x[pos>>3] &^= (1 << (pos & 7)) +} +func (x *bitset256) isset(pos byte) bool { + return x[pos>>3]&(1<<(pos&7)) != 0 +} + +type bitset128 [16]byte + +func (x *bitset128) set(pos byte) { + x[pos>>3] |= (1 << (pos & 7)) +} +func (x *bitset128) unset(pos byte) { + x[pos>>3] &^= (1 << (pos & 7)) +} +func (x *bitset128) isset(pos byte) bool { + return x[pos>>3]&(1<<(pos&7)) != 0 +} + +// ------------ + +type pooler struct { + // for stringRV + strRv8, strRv16, strRv32, strRv64, strRv128 sync.Pool + // for the decNaked + dn sync.Pool + tiload sync.Pool +} + +func (p *pooler) init() { + p.strRv8.New = func() interface{} { return new([8]stringRv) } + p.strRv16.New = func() interface{} { return new([16]stringRv) } + p.strRv32.New = func() interface{} { return new([32]stringRv) } + p.strRv64.New = func() interface{} { return new([64]stringRv) } + p.strRv128.New = func() interface{} { return new([128]stringRv) } + p.dn.New = func() interface{} { x := new(decNaked); x.init(); return x } + p.tiload.New = func() interface{} { return new(typeInfoLoadArray) } +} + +func (p *pooler) stringRv8() (sp *sync.Pool, v interface{}) { + return &p.strRv8, p.strRv8.Get() +} +func (p *pooler) stringRv16() (sp *sync.Pool, v interface{}) { + return &p.strRv16, p.strRv16.Get() +} +func (p *pooler) stringRv32() (sp *sync.Pool, v interface{}) { + return &p.strRv32, p.strRv32.Get() +} +func (p *pooler) stringRv64() (sp *sync.Pool, v interface{}) { + return &p.strRv64, p.strRv64.Get() +} +func (p *pooler) stringRv128() (sp *sync.Pool, v interface{}) { + return &p.strRv128, p.strRv128.Get() +} +func (p *pooler) decNaked() (sp *sync.Pool, v interface{}) { + return &p.dn, p.dn.Get() +} +func (p *pooler) tiLoad() (sp *sync.Pool, v interface{}) { + return &p.tiload, p.tiload.Get() +} diff --git a/vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/helper_internal.go b/vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/helper_internal.go new file mode 100644 index 00000000..eb18e2cc --- /dev/null +++ b/vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/helper_internal.go @@ -0,0 +1,221 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +// All non-std package dependencies live in this file, +// so porting to different environment is easy (just update functions). + +import ( + "errors" + "fmt" + "math" + "reflect" +) + +func panicValToErr(panicVal interface{}, err *error) { + if panicVal == nil { + return + } + // case nil + switch xerr := panicVal.(type) { + case error: + *err = xerr + case string: + *err = errors.New(xerr) + default: + *err = fmt.Errorf("%v", panicVal) + } + return +} + +func hIsEmptyValue(v reflect.Value, deref, checkStruct bool) bool { + switch v.Kind() { + case reflect.Invalid: + return true + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + if deref { + if v.IsNil() { + return true + } + return hIsEmptyValue(v.Elem(), deref, checkStruct) + } else { + return v.IsNil() + } + case reflect.Struct: + if !checkStruct { + return false + } + // return true if all fields are empty. else return false. + // we cannot use equality check, because some fields may be maps/slices/etc + // and consequently the structs are not comparable. + // return v.Interface() == reflect.Zero(v.Type()).Interface() + for i, n := 0, v.NumField(); i < n; i++ { + if !hIsEmptyValue(v.Field(i), deref, checkStruct) { + return false + } + } + return true + } + return false +} + +func isEmptyValue(v reflect.Value, deref, checkStruct bool) bool { + return hIsEmptyValue(v, deref, checkStruct) +} + +func pruneSignExt(v []byte, pos bool) (n int) { + if len(v) < 2 { + } else if pos && v[0] == 0 { + for ; v[n] == 0 && n+1 < len(v) && (v[n+1]&(1<<7) == 0); n++ { + } + } else if !pos && v[0] == 0xff { + for ; v[n] == 0xff && n+1 < len(v) && (v[n+1]&(1<<7) != 0); n++ { + } + } + return +} + +func implementsIntf(typ, iTyp reflect.Type) (success bool, indir int8) { + if typ == nil { + return + } + rt := typ + // The type might be a pointer and we need to keep + // dereferencing to the base type until we find an implementation. + for { + if rt.Implements(iTyp) { + return true, indir + } + if p := rt; p.Kind() == reflect.Ptr { + indir++ + if indir >= math.MaxInt8 { // insane number of indirections + return false, 0 + } + rt = p.Elem() + continue + } + break + } + // No luck yet, but if this is a base type (non-pointer), the pointer might satisfy. + if typ.Kind() != reflect.Ptr { + // Not a pointer, but does the pointer work? + if reflect.PtrTo(typ).Implements(iTyp) { + return true, -1 + } + } + return false, 0 +} + +// validate that this function is correct ... +// culled from OGRE (Object-Oriented Graphics Rendering Engine) +// function: halfToFloatI (http://stderr.org/doc/ogre-doc/api/OgreBitwise_8h-source.html) +func halfFloatToFloatBits(yy uint16) (d uint32) { + y := uint32(yy) + s := (y >> 15) & 0x01 + e := (y >> 10) & 0x1f + m := y & 0x03ff + + if e == 0 { + if m == 0 { // plu or minus 0 + return s << 31 + } else { // Denormalized number -- renormalize it + for (m & 0x00000400) == 0 { + m <<= 1 + e -= 1 + } + e += 1 + const zz uint32 = 0x0400 + m &= ^zz + } + } else if e == 31 { + if m == 0 { // Inf + return (s << 31) | 0x7f800000 + } else { // NaN + return (s << 31) | 0x7f800000 | (m << 13) + } + } + e = e + (127 - 15) + m = m << 13 + return (s << 31) | (e << 23) | m +} + +// GrowCap will return a new capacity for a slice, given the following: +// - oldCap: current capacity +// - unit: in-memory size of an element +// - num: number of elements to add +func growCap(oldCap, unit, num int) (newCap int) { + // appendslice logic (if cap < 1024, *2, else *1.25): + // leads to many copy calls, especially when copying bytes. + // bytes.Buffer model (2*cap + n): much better for bytes. + // smarter way is to take the byte-size of the appended element(type) into account + + // maintain 3 thresholds: + // t1: if cap <= t1, newcap = 2x + // t2: if cap <= t2, newcap = 1.75x + // t3: if cap <= t3, newcap = 1.5x + // else newcap = 1.25x + // + // t1, t2, t3 >= 1024 always. + // i.e. if unit size >= 16, then always do 2x or 1.25x (ie t1, t2, t3 are all same) + // + // With this, appending for bytes increase by: + // 100% up to 4K + // 75% up to 8K + // 50% up to 16K + // 25% beyond that + + // unit can be 0 e.g. for struct{}{}; handle that appropriately + var t1, t2, t3 int // thresholds + if unit <= 1 { + t1, t2, t3 = 4*1024, 8*1024, 16*1024 + } else if unit < 16 { + t3 = 16 / unit * 1024 + t1 = t3 * 1 / 4 + t2 = t3 * 2 / 4 + } else { + t1, t2, t3 = 1024, 1024, 1024 + } + + var x int // temporary variable + + // x is multiplier here: one of 5, 6, 7 or 8; incr of 25%, 50%, 75% or 100% respectively + if oldCap <= t1 { // [0,t1] + x = 8 + } else if oldCap > t3 { // (t3,infinity] + x = 5 + } else if oldCap <= t2 { // (t1,t2] + x = 7 + } else { // (t2,t3] + x = 6 + } + newCap = x * oldCap / 4 + + if num > 0 { + newCap += num + } + + // ensure newCap is a multiple of 64 (if it is > 64) or 16. + if newCap > 64 { + if x = newCap % 64; x != 0 { + x = newCap / 64 + newCap = 64 * (x + 1) + } + } else { + if x = newCap % 16; x != 0 { + x = newCap / 16 + newCap = 16 * (x + 1) + } + } + return +} diff --git a/vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/helper_not_unsafe.go b/vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/helper_not_unsafe.go new file mode 100644 index 00000000..9d9d048e --- /dev/null +++ b/vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/helper_not_unsafe.go @@ -0,0 +1,156 @@ +// +build !go1.7 safe appengine + +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +import ( + "reflect" + "sync/atomic" +) + +// stringView returns a view of the []byte as a string. +// In unsafe mode, it doesn't incur allocation and copying caused by conversion. +// In regular safe mode, it is an allocation and copy. +// +// Usage: Always maintain a reference to v while result of this call is in use, +// and call keepAlive4BytesView(v) at point where done with view. +func stringView(v []byte) string { + return string(v) +} + +// bytesView returns a view of the string as a []byte. +// In unsafe mode, it doesn't incur allocation and copying caused by conversion. +// In regular safe mode, it is an allocation and copy. +// +// Usage: Always maintain a reference to v while result of this call is in use, +// and call keepAlive4BytesView(v) at point where done with view. +func bytesView(v string) []byte { + return []byte(v) +} + +func definitelyNil(v interface{}) bool { + return false + // rv := reflect.ValueOf(v) + // switch rv.Kind() { + // case reflect.Invalid: + // return true + // case reflect.Ptr, reflect.Interface, reflect.Chan, reflect.Slice, reflect.Map, reflect.Func: + // return rv.IsNil() + // default: + // return false + // } +} + +// // keepAlive4BytesView maintains a reference to the input parameter for bytesView. +// // +// // Usage: call this at point where done with the bytes view. +// func keepAlive4BytesView(v string) {} + +// // keepAlive4BytesView maintains a reference to the input parameter for stringView. +// // +// // Usage: call this at point where done with the string view. +// func keepAlive4StringView(v []byte) {} + +func rv2i(rv reflect.Value) interface{} { + return rv.Interface() +} + +func rt2id(rt reflect.Type) uintptr { + return reflect.ValueOf(rt).Pointer() +} + +func rv2rtid(rv reflect.Value) uintptr { + return reflect.ValueOf(rv.Type()).Pointer() +} + +// -------------------------- +// type ptrToRvMap struct{} + +// func (_ *ptrToRvMap) init() {} +// func (_ *ptrToRvMap) get(i interface{}) reflect.Value { +// return reflect.ValueOf(i).Elem() +// } + +// -------------------------- +type atomicTypeInfoSlice struct { + v atomic.Value +} + +func (x *atomicTypeInfoSlice) load() *[]rtid2ti { + i := x.v.Load() + if i == nil { + return nil + } + return i.(*[]rtid2ti) +} + +func (x *atomicTypeInfoSlice) store(p *[]rtid2ti) { + x.v.Store(p) +} + +// -------------------------- +func (d *Decoder) raw(f *codecFnInfo, rv reflect.Value) { + rv.SetBytes(d.rawBytes()) +} + +func (d *Decoder) kString(f *codecFnInfo, rv reflect.Value) { + rv.SetString(d.d.DecodeString()) +} + +func (d *Decoder) kBool(f *codecFnInfo, rv reflect.Value) { + rv.SetBool(d.d.DecodeBool()) +} + +func (d *Decoder) kFloat32(f *codecFnInfo, rv reflect.Value) { + rv.SetFloat(d.d.DecodeFloat(true)) +} + +func (d *Decoder) kFloat64(f *codecFnInfo, rv reflect.Value) { + rv.SetFloat(d.d.DecodeFloat(false)) +} + +func (d *Decoder) kInt(f *codecFnInfo, rv reflect.Value) { + rv.SetInt(d.d.DecodeInt(intBitsize)) +} + +func (d *Decoder) kInt8(f *codecFnInfo, rv reflect.Value) { + rv.SetInt(d.d.DecodeInt(8)) +} + +func (d *Decoder) kInt16(f *codecFnInfo, rv reflect.Value) { + rv.SetInt(d.d.DecodeInt(16)) +} + +func (d *Decoder) kInt32(f *codecFnInfo, rv reflect.Value) { + rv.SetInt(d.d.DecodeInt(32)) +} + +func (d *Decoder) kInt64(f *codecFnInfo, rv reflect.Value) { + rv.SetInt(d.d.DecodeInt(64)) +} + +func (d *Decoder) kUint(f *codecFnInfo, rv reflect.Value) { + rv.SetUint(d.d.DecodeUint(uintBitsize)) +} + +func (d *Decoder) kUintptr(f *codecFnInfo, rv reflect.Value) { + rv.SetUint(d.d.DecodeUint(uintBitsize)) +} + +func (d *Decoder) kUint8(f *codecFnInfo, rv reflect.Value) { + rv.SetUint(d.d.DecodeUint(8)) +} + +func (d *Decoder) kUint16(f *codecFnInfo, rv reflect.Value) { + rv.SetUint(d.d.DecodeUint(16)) +} + +func (d *Decoder) kUint32(f *codecFnInfo, rv reflect.Value) { + rv.SetUint(d.d.DecodeUint(32)) +} + +func (d *Decoder) kUint64(f *codecFnInfo, rv reflect.Value) { + rv.SetUint(d.d.DecodeUint(64)) +} diff --git a/vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/helper_unsafe.go b/vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/helper_unsafe.go new file mode 100644 index 00000000..c7c01b81 --- /dev/null +++ b/vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/helper_unsafe.go @@ -0,0 +1,418 @@ +// +build !safe +// +build !appengine +// +build go1.7 + +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +import ( + "reflect" + "sync/atomic" + "unsafe" +) + +// This file has unsafe variants of some helper methods. +// NOTE: See helper_not_unsafe.go for the usage information. + +// var zeroRTv [4]uintptr + +const unsafeFlagIndir = 1 << 7 // keep in sync with GO_ROOT/src/reflect/value.go + +type unsafeString struct { + Data uintptr + Len int +} + +type unsafeSlice struct { + Data uintptr + Len int + Cap int +} + +type unsafeIntf struct { + typ unsafe.Pointer + word unsafe.Pointer +} + +type unsafeReflectValue struct { + typ unsafe.Pointer + ptr unsafe.Pointer + flag uintptr +} + +func stringView(v []byte) string { + if len(v) == 0 { + return "" + } + + bx := (*unsafeSlice)(unsafe.Pointer(&v)) + sx := unsafeString{bx.Data, bx.Len} + return *(*string)(unsafe.Pointer(&sx)) +} + +func bytesView(v string) []byte { + if len(v) == 0 { + return zeroByteSlice + } + + sx := (*unsafeString)(unsafe.Pointer(&v)) + bx := unsafeSlice{sx.Data, sx.Len, sx.Len} + return *(*[]byte)(unsafe.Pointer(&bx)) +} + +func definitelyNil(v interface{}) bool { + return (*unsafeIntf)(unsafe.Pointer(&v)).word == nil +} + +// func keepAlive4BytesView(v string) { +// runtime.KeepAlive(v) +// } + +// func keepAlive4StringView(v []byte) { +// runtime.KeepAlive(v) +// } + +// TODO: consider a more generally-known optimization for reflect.Value ==> Interface +// +// Currently, we use this fragile method that taps into implememtation details from +// the source go stdlib reflect/value.go, +// and trims the implementation. +func rv2i(rv reflect.Value) interface{} { + if false { + return rv.Interface() + } + urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + // references that are single-words (map, ptr) may be double-referenced as flagIndir + kk := urv.flag & (1<<5 - 1) + if (kk == uintptr(reflect.Map) || kk == uintptr(reflect.Ptr)) && urv.flag&unsafeFlagIndir != 0 { + return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: *(*unsafe.Pointer)(urv.ptr), typ: urv.typ})) + } + return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: urv.ptr, typ: urv.typ})) +} + +func rt2id(rt reflect.Type) uintptr { + return uintptr(((*unsafeIntf)(unsafe.Pointer(&rt))).word) +} + +func rv2rtid(rv reflect.Value) uintptr { + return uintptr((*unsafeReflectValue)(unsafe.Pointer(&rv)).typ) +} + +// func rv0t(rt reflect.Type) reflect.Value { +// ut := (*unsafeIntf)(unsafe.Pointer(&rt)) +// // we need to determine whether ifaceIndir, and then whether to just pass 0 as the ptr +// uv := unsafeReflectValue{ut.word, &zeroRTv, flag(rt.Kind())} +// return *(*reflect.Value)(unsafe.Pointer(&uv}) +// } + +// -------------------------- +type atomicTypeInfoSlice struct { + v unsafe.Pointer +} + +func (x *atomicTypeInfoSlice) load() *[]rtid2ti { + return (*[]rtid2ti)(atomic.LoadPointer(&x.v)) +} + +func (x *atomicTypeInfoSlice) store(p *[]rtid2ti) { + atomic.StorePointer(&x.v, unsafe.Pointer(p)) +} + +// -------------------------- +func (d *Decoder) raw(f *codecFnInfo, rv reflect.Value) { + urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + // if urv.flag&unsafeFlagIndir != 0 { + // urv.ptr = *(*unsafe.Pointer)(urv.ptr) + // } + *(*[]byte)(urv.ptr) = d.rawBytes() +} + +func (d *Decoder) kString(f *codecFnInfo, rv reflect.Value) { + urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + *(*string)(urv.ptr) = d.d.DecodeString() +} + +func (d *Decoder) kBool(f *codecFnInfo, rv reflect.Value) { + urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + *(*bool)(urv.ptr) = d.d.DecodeBool() +} + +func (d *Decoder) kFloat32(f *codecFnInfo, rv reflect.Value) { + urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + *(*float32)(urv.ptr) = float32(d.d.DecodeFloat(true)) +} + +func (d *Decoder) kFloat64(f *codecFnInfo, rv reflect.Value) { + urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + *(*float64)(urv.ptr) = d.d.DecodeFloat(false) +} + +func (d *Decoder) kInt(f *codecFnInfo, rv reflect.Value) { + urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + *(*int)(urv.ptr) = int(d.d.DecodeInt(intBitsize)) +} + +func (d *Decoder) kInt8(f *codecFnInfo, rv reflect.Value) { + urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + *(*int8)(urv.ptr) = int8(d.d.DecodeInt(8)) +} + +func (d *Decoder) kInt16(f *codecFnInfo, rv reflect.Value) { + urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + *(*int16)(urv.ptr) = int16(d.d.DecodeInt(16)) +} + +func (d *Decoder) kInt32(f *codecFnInfo, rv reflect.Value) { + urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + *(*int32)(urv.ptr) = int32(d.d.DecodeInt(32)) +} + +func (d *Decoder) kInt64(f *codecFnInfo, rv reflect.Value) { + urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + *(*int64)(urv.ptr) = d.d.DecodeInt(64) +} + +func (d *Decoder) kUint(f *codecFnInfo, rv reflect.Value) { + urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + *(*uint)(urv.ptr) = uint(d.d.DecodeUint(uintBitsize)) +} + +func (d *Decoder) kUintptr(f *codecFnInfo, rv reflect.Value) { + urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + *(*uintptr)(urv.ptr) = uintptr(d.d.DecodeUint(uintBitsize)) +} + +func (d *Decoder) kUint8(f *codecFnInfo, rv reflect.Value) { + urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + *(*uint8)(urv.ptr) = uint8(d.d.DecodeUint(8)) +} + +func (d *Decoder) kUint16(f *codecFnInfo, rv reflect.Value) { + urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + *(*uint16)(urv.ptr) = uint16(d.d.DecodeUint(16)) +} + +func (d *Decoder) kUint32(f *codecFnInfo, rv reflect.Value) { + urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + *(*uint32)(urv.ptr) = uint32(d.d.DecodeUint(32)) +} + +func (d *Decoder) kUint64(f *codecFnInfo, rv reflect.Value) { + urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + *(*uint64)(urv.ptr) = d.d.DecodeUint(64) +} + +// ------------ + +// func rt2id(rt reflect.Type) uintptr { +// return uintptr(((*unsafeIntf)(unsafe.Pointer(&rt))).word) +// // var i interface{} = rt +// // // ui := (*unsafeIntf)(unsafe.Pointer(&i)) +// // return ((*unsafeIntf)(unsafe.Pointer(&i))).word +// } + +// func rv2i(rv reflect.Value) interface{} { +// urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) +// // non-reference type: already indir +// // reference type: depend on flagIndir property ('cos maybe was double-referenced) +// // const (unsafeRvFlagKindMask = 1<<5 - 1 , unsafeRvFlagIndir = 1 << 7 ) +// // rvk := reflect.Kind(urv.flag & (1<<5 - 1)) +// // if (rvk == reflect.Chan || +// // rvk == reflect.Func || +// // rvk == reflect.Interface || +// // rvk == reflect.Map || +// // rvk == reflect.Ptr || +// // rvk == reflect.UnsafePointer) && urv.flag&(1<<8) != 0 { +// // fmt.Printf(">>>>> ---- double indirect reference: %v, %v\n", rvk, rv.Type()) +// // return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: *(*unsafe.Pointer)(urv.ptr), typ: urv.typ})) +// // } +// if urv.flag&(1<<5-1) == uintptr(reflect.Map) && urv.flag&(1<<7) != 0 { +// // fmt.Printf(">>>>> ---- double indirect reference: %v, %v\n", rvk, rv.Type()) +// return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: *(*unsafe.Pointer)(urv.ptr), typ: urv.typ})) +// } +// // fmt.Printf(">>>>> ++++ direct reference: %v, %v\n", rvk, rv.Type()) +// return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: urv.ptr, typ: urv.typ})) +// } + +// const ( +// unsafeRvFlagKindMask = 1<<5 - 1 +// unsafeRvKindDirectIface = 1 << 5 +// unsafeRvFlagIndir = 1 << 7 +// unsafeRvFlagAddr = 1 << 8 +// unsafeRvFlagMethod = 1 << 9 + +// _USE_RV_INTERFACE bool = false +// _UNSAFE_RV_DEBUG = true +// ) + +// type unsafeRtype struct { +// _ [2]uintptr +// _ uint32 +// _ uint8 +// _ uint8 +// _ uint8 +// kind uint8 +// _ [2]uintptr +// _ int32 +// } + +// func _rv2i(rv reflect.Value) interface{} { +// // Note: From use, +// // - it's never an interface +// // - the only calls here are for ifaceIndir types. +// // (though that conditional is wrong) +// // To know for sure, we need the value of t.kind (which is not exposed). +// // +// // Need to validate the path: type is indirect ==> only value is indirect ==> default (value is direct) +// // - Type indirect, Value indirect: ==> numbers, boolean, slice, struct, array, string +// // - Type Direct, Value indirect: ==> map??? +// // - Type Direct, Value direct: ==> pointers, unsafe.Pointer, func, chan, map +// // +// // TRANSLATES TO: +// // if typeIndirect { } else if valueIndirect { } else { } +// // +// // Since we don't deal with funcs, then "flagNethod" is unset, and can be ignored. + +// if _USE_RV_INTERFACE { +// return rv.Interface() +// } +// urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + +// // if urv.flag&unsafeRvFlagMethod != 0 || urv.flag&unsafeRvFlagKindMask == uintptr(reflect.Interface) { +// // println("***** IS flag method or interface: delegating to rv.Interface()") +// // return rv.Interface() +// // } + +// // if urv.flag&unsafeRvFlagKindMask == uintptr(reflect.Interface) { +// // println("***** IS Interface: delegate to rv.Interface") +// // return rv.Interface() +// // } +// // if urv.flag&unsafeRvFlagKindMask&unsafeRvKindDirectIface == 0 { +// // if urv.flag&unsafeRvFlagAddr == 0 { +// // println("***** IS ifaceIndir typ") +// // // ui := unsafeIntf{word: urv.ptr, typ: urv.typ} +// // // return *(*interface{})(unsafe.Pointer(&ui)) +// // // return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: urv.ptr, typ: urv.typ})) +// // } +// // } else if urv.flag&unsafeRvFlagIndir != 0 { +// // println("***** IS flagindir") +// // // return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: *(*unsafe.Pointer)(urv.ptr), typ: urv.typ})) +// // } else { +// // println("***** NOT flagindir") +// // return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: urv.ptr, typ: urv.typ})) +// // } +// // println("***** default: delegate to rv.Interface") + +// urt := (*unsafeRtype)(unsafe.Pointer(urv.typ)) +// if _UNSAFE_RV_DEBUG { +// fmt.Printf(">>>> start: %v: ", rv.Type()) +// fmt.Printf("%v - %v\n", *urv, *urt) +// } +// if urt.kind&unsafeRvKindDirectIface == 0 { +// if _UNSAFE_RV_DEBUG { +// fmt.Printf("**** +ifaceIndir type: %v\n", rv.Type()) +// } +// // println("***** IS ifaceIndir typ") +// // if true || urv.flag&unsafeRvFlagAddr == 0 { +// // // println(" ***** IS NOT addr") +// return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: urv.ptr, typ: urv.typ})) +// // } +// } else if urv.flag&unsafeRvFlagIndir != 0 { +// if _UNSAFE_RV_DEBUG { +// fmt.Printf("**** +flagIndir type: %v\n", rv.Type()) +// } +// // println("***** IS flagindir") +// return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: *(*unsafe.Pointer)(urv.ptr), typ: urv.typ})) +// } else { +// if _UNSAFE_RV_DEBUG { +// fmt.Printf("**** -flagIndir type: %v\n", rv.Type()) +// } +// // println("***** NOT flagindir") +// return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: urv.ptr, typ: urv.typ})) +// } +// // println("***** default: delegating to rv.Interface()") +// // return rv.Interface() +// } + +// var staticM0 = make(map[string]uint64) +// var staticI0 = (int32)(-5) + +// func staticRv2iTest() { +// i0 := (int32)(-5) +// m0 := make(map[string]uint16) +// m0["1"] = 1 +// for _, i := range []interface{}{ +// (int)(7), +// (uint)(8), +// (int16)(-9), +// (uint16)(19), +// (uintptr)(77), +// (bool)(true), +// float32(-32.7), +// float64(64.9), +// complex(float32(19), 5), +// complex(float64(-32), 7), +// [4]uint64{1, 2, 3, 4}, +// (chan<- int)(nil), // chan, +// rv2i, // func +// io.Writer(ioutil.Discard), +// make(map[string]uint), +// (map[string]uint)(nil), +// staticM0, +// m0, +// &m0, +// i0, +// &i0, +// &staticI0, +// &staticM0, +// []uint32{6, 7, 8}, +// "abc", +// Raw{}, +// RawExt{}, +// &Raw{}, +// &RawExt{}, +// unsafe.Pointer(&i0), +// } { +// i2 := rv2i(reflect.ValueOf(i)) +// eq := reflect.DeepEqual(i, i2) +// fmt.Printf(">>>> %v == %v? %v\n", i, i2, eq) +// } +// // os.Exit(0) +// } + +// func init() { +// staticRv2iTest() +// } + +// func rv2i(rv reflect.Value) interface{} { +// if _USE_RV_INTERFACE || rv.Kind() == reflect.Interface || rv.CanAddr() { +// return rv.Interface() +// } +// // var i interface{} +// // ui := (*unsafeIntf)(unsafe.Pointer(&i)) +// var ui unsafeIntf +// urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) +// // fmt.Printf("urv: flag: %b, typ: %b, ptr: %b\n", urv.flag, uintptr(urv.typ), uintptr(urv.ptr)) +// if (urv.flag&unsafeRvFlagKindMask)&unsafeRvKindDirectIface == 0 { +// if urv.flag&unsafeRvFlagAddr != 0 { +// println("***** indirect and addressable! Needs typed move - delegate to rv.Interface()") +// return rv.Interface() +// } +// println("****** indirect type/kind") +// ui.word = urv.ptr +// } else if urv.flag&unsafeRvFlagIndir != 0 { +// println("****** unsafe rv flag indir") +// ui.word = *(*unsafe.Pointer)(urv.ptr) +// } else { +// println("****** default: assign prt to word directly") +// ui.word = urv.ptr +// } +// // ui.word = urv.ptr +// ui.typ = urv.typ +// // fmt.Printf("(pointers) ui.typ: %p, word: %p\n", ui.typ, ui.word) +// // fmt.Printf("(binary) ui.typ: %b, word: %b\n", uintptr(ui.typ), uintptr(ui.word)) +// return *(*interface{})(unsafe.Pointer(&ui)) +// // return i +// } diff --git a/vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/json.go b/vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/json.go new file mode 100644 index 00000000..a6af2c80 --- /dev/null +++ b/vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/json.go @@ -0,0 +1,1172 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +// By default, this json support uses base64 encoding for bytes, because you cannot +// store and read any arbitrary string in json (only unicode). +// However, the user can configre how to encode/decode bytes. +// +// This library specifically supports UTF-8 for encoding and decoding only. +// +// Note that the library will happily encode/decode things which are not valid +// json e.g. a map[int64]string. We do it for consistency. With valid json, +// we will encode and decode appropriately. +// Users can specify their map type if necessary to force it. +// +// Note: +// - we cannot use strconv.Quote and strconv.Unquote because json quotes/unquotes differently. +// We implement it here. +// - Also, strconv.ParseXXX for floats and integers +// - only works on strings resulting in unnecessary allocation and []byte-string conversion. +// - it does a lot of redundant checks, because json numbers are simpler that what it supports. +// - We parse numbers (floats and integers) directly here. +// We only delegate parsing floats if it is a hairy float which could cause a loss of precision. +// In that case, we delegate to strconv.ParseFloat. +// +// Note: +// - encode does not beautify. There is no whitespace when encoding. +// - rpc calls which take single integer arguments or write single numeric arguments will need care. + +// Top-level methods of json(End|Dec)Driver (which are implementations of (en|de)cDriver +// MUST not call one-another. + +import ( + "bytes" + "encoding/base64" + "reflect" + "strconv" + "unicode/utf16" + "unicode/utf8" +) + +//-------------------------------- + +var ( + // jsonLiterals = [...]byte{'t', 'r', 'u', 'e', 'f', 'a', 'l', 's', 'e', 'n', 'u', 'l', 'l'} + + // jsonFloat64Pow10 = [...]float64{ + // 1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9, + // 1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18, 1e19, + // 1e20, 1e21, 1e22, + // } + + // jsonUint64Pow10 = [...]uint64{ + // 1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9, + // 1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18, 1e19, + // } + + // jsonTabs and jsonSpaces are used as caches for indents + jsonTabs, jsonSpaces string + + jsonCharHtmlSafeSet bitset128 + jsonCharSafeSet bitset128 + jsonCharWhitespaceSet bitset256 + jsonNumSet bitset256 + // jsonIsFloatSet bitset256 + + jsonU4Set [256]byte +) + +const ( + // jsonUnreadAfterDecNum controls whether we unread after decoding a number. + // + // instead of unreading, just update d.tok (iff it's not a whitespace char) + // However, doing this means that we may HOLD onto some data which belongs to another stream. + // Thus, it is safest to unread the data when done. + // keep behind a constant flag for now. + jsonUnreadAfterDecNum = true + + // If !jsonValidateSymbols, decoding will be faster, by skipping some checks: + // - If we see first character of null, false or true, + // do not validate subsequent characters. + // - e.g. if we see a n, assume null and skip next 3 characters, + // and do not validate they are ull. + // P.S. Do not expect a significant decoding boost from this. + jsonValidateSymbols = true + + jsonSpacesOrTabsLen = 128 + + jsonU4SetErrVal = 128 +) + +func init() { + var bs [jsonSpacesOrTabsLen]byte + for i := 0; i < jsonSpacesOrTabsLen; i++ { + bs[i] = ' ' + } + jsonSpaces = string(bs[:]) + + for i := 0; i < jsonSpacesOrTabsLen; i++ { + bs[i] = '\t' + } + jsonTabs = string(bs[:]) + + // populate the safe values as true: note: ASCII control characters are (0-31) + // jsonCharSafeSet: all true except (0-31) " \ + // jsonCharHtmlSafeSet: all true except (0-31) " \ < > & + var i byte + for i = 32; i < utf8.RuneSelf; i++ { + switch i { + case '"', '\\': + case '<', '>', '&': + jsonCharSafeSet.set(i) // = true + default: + jsonCharSafeSet.set(i) + jsonCharHtmlSafeSet.set(i) + } + } + for i = 0; i <= utf8.RuneSelf; i++ { + switch i { + case ' ', '\t', '\r', '\n': + jsonCharWhitespaceSet.set(i) + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'e', 'E', '.', '+', '-': + jsonNumSet.set(i) + } + } + for j := range jsonU4Set { + switch i = byte(j); i { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + jsonU4Set[i] = i - '0' + case 'a', 'b', 'c', 'd', 'e', 'f': + jsonU4Set[i] = i - 'a' + 10 + case 'A', 'B', 'C', 'D', 'E', 'F': + jsonU4Set[i] = i - 'A' + 10 + default: + jsonU4Set[i] = jsonU4SetErrVal + } + // switch i = byte(j); i { + // case 'e', 'E', '.': + // jsonIsFloatSet.set(i) + // } + } + // jsonU4Set[255] = jsonU4SetErrVal +} + +type jsonEncDriver struct { + e *Encoder + w encWriter + h *JsonHandle + b [64]byte // scratch + bs []byte // scratch + se setExtWrapper + ds string // indent string + dl uint16 // indent level + dt bool // indent using tabs + d bool // indent + c containerState + noBuiltInTypes +} + +// indent is done as below: +// - newline and indent are added before each mapKey or arrayElem +// - newline and indent are added before each ending, +// except there was no entry (so we can have {} or []) + +func (e *jsonEncDriver) WriteArrayStart(length int) { + if e.d { + e.dl++ + } + e.w.writen1('[') + e.c = containerArrayStart +} + +func (e *jsonEncDriver) WriteArrayElem() { + if e.c != containerArrayStart { + e.w.writen1(',') + } + if e.d { + e.writeIndent() + } + e.c = containerArrayElem +} + +func (e *jsonEncDriver) WriteArrayEnd() { + if e.d { + e.dl-- + if e.c != containerArrayStart { + e.writeIndent() + } + } + e.w.writen1(']') + e.c = containerArrayEnd +} + +func (e *jsonEncDriver) WriteMapStart(length int) { + if e.d { + e.dl++ + } + e.w.writen1('{') + e.c = containerMapStart +} + +func (e *jsonEncDriver) WriteMapElemKey() { + if e.c != containerMapStart { + e.w.writen1(',') + } + if e.d { + e.writeIndent() + } + e.c = containerMapKey +} + +func (e *jsonEncDriver) WriteMapElemValue() { + if e.d { + e.w.writen2(':', ' ') + } else { + e.w.writen1(':') + } + e.c = containerMapValue +} + +func (e *jsonEncDriver) WriteMapEnd() { + if e.d { + e.dl-- + if e.c != containerMapStart { + e.writeIndent() + } + } + e.w.writen1('}') + e.c = containerMapEnd +} + +// func (e *jsonEncDriver) sendContainerState(c containerState) { +// // determine whether to output separators +// switch c { +// case containerMapKey: +// if e.c != containerMapStart { +// e.w.writen1(',') +// } +// if e.d { +// e.writeIndent() +// } +// case containerMapValue: +// if e.d { +// e.w.writen2(':', ' ') +// } else { +// e.w.writen1(':') +// } +// case containerMapEnd: +// if e.d { +// e.dl-- +// if e.c != containerMapStart { +// e.writeIndent() +// } +// } +// e.w.writen1('}') +// case containerArrayElem: +// if e.c != containerArrayStart { +// e.w.writen1(',') +// } +// if e.d { +// e.writeIndent() +// } +// case containerArrayEnd: +// if e.d { +// e.dl-- +// if e.c != containerArrayStart { +// e.writeIndent() +// } +// } +// e.w.writen1(']') +// } +// e.c = c +// } + +func (e *jsonEncDriver) writeIndent() { + e.w.writen1('\n') + if x := len(e.ds) * int(e.dl); x <= jsonSpacesOrTabsLen { + if e.dt { + e.w.writestr(jsonTabs[:x]) + } else { + e.w.writestr(jsonSpaces[:x]) + } + } else { + for i := uint16(0); i < e.dl; i++ { + e.w.writestr(e.ds) + } + } +} + +func (e *jsonEncDriver) EncodeNil() { + e.w.writen4('n', 'u', 'l', 'l') // e.w.writeb(jsonLiterals[9:13]) // null +} + +func (e *jsonEncDriver) EncodeBool(b bool) { + if b { + e.w.writen4('t', 'r', 'u', 'e') // e.w.writeb(jsonLiterals[0:4]) // true + } else { + e.w.writen5('f', 'a', 'l', 's', 'e') // e.w.writeb(jsonLiterals[4:9]) // false + } +} + +func (e *jsonEncDriver) EncodeFloat32(f float32) { + e.encodeFloat(float64(f), 32) +} + +func (e *jsonEncDriver) EncodeFloat64(f float64) { + e.encodeFloat(f, 64) +} + +func (e *jsonEncDriver) encodeFloat(f float64, numbits int) { + x := strconv.AppendFloat(e.b[:0], f, 'G', -1, numbits) + // if bytes.IndexByte(x, 'E') == -1 && bytes.IndexByte(x, '.') == -1 { + if !jsonIsFloatBytesB2(x) { + x = append(x, '.', '0') + } + e.w.writeb(x) +} + +func (e *jsonEncDriver) EncodeInt(v int64) { + if x := e.h.IntegerAsString; x == 'A' || x == 'L' && (v > 1<<53 || v < -(1<<53)) { + e.w.writen1('"') + e.w.writeb(strconv.AppendInt(e.b[:0], v, 10)) + e.w.writen1('"') + return + } + e.w.writeb(strconv.AppendInt(e.b[:0], v, 10)) +} + +func (e *jsonEncDriver) EncodeUint(v uint64) { + if x := e.h.IntegerAsString; x == 'A' || x == 'L' && v > 1<<53 { + e.w.writen1('"') + e.w.writeb(strconv.AppendUint(e.b[:0], v, 10)) + e.w.writen1('"') + return + } + e.w.writeb(strconv.AppendUint(e.b[:0], v, 10)) +} + +func (e *jsonEncDriver) EncodeExt(rv interface{}, xtag uint64, ext Ext, en *Encoder) { + if v := ext.ConvertExt(rv); v == nil { + e.w.writen4('n', 'u', 'l', 'l') // e.w.writeb(jsonLiterals[9:13]) // null // e.EncodeNil() + } else { + en.encode(v) + } +} + +func (e *jsonEncDriver) EncodeRawExt(re *RawExt, en *Encoder) { + // only encodes re.Value (never re.Data) + if re.Value == nil { + e.w.writen4('n', 'u', 'l', 'l') // e.w.writeb(jsonLiterals[9:13]) // null // e.EncodeNil() + } else { + en.encode(re.Value) + } +} + +func (e *jsonEncDriver) EncodeString(c charEncoding, v string) { + e.quoteStr(v) +} + +func (e *jsonEncDriver) EncodeSymbol(v string) { + e.quoteStr(v) +} + +func (e *jsonEncDriver) EncodeStringBytes(c charEncoding, v []byte) { + // if encoding raw bytes and RawBytesExt is configured, use it to encode + if c == c_RAW && e.se.i != nil { + e.EncodeExt(v, 0, &e.se, e.e) + return + } + if c == c_RAW { + slen := base64.StdEncoding.EncodedLen(len(v)) + if cap(e.bs) >= slen { + e.bs = e.bs[:slen] + } else { + e.bs = make([]byte, slen) + } + base64.StdEncoding.Encode(e.bs, v) + e.w.writen1('"') + e.w.writeb(e.bs) + e.w.writen1('"') + } else { + e.quoteStr(stringView(v)) + } +} + +func (e *jsonEncDriver) EncodeAsis(v []byte) { + e.w.writeb(v) +} + +func (e *jsonEncDriver) quoteStr(s string) { + // adapted from std pkg encoding/json + const hex = "0123456789abcdef" + w := e.w + w.writen1('"') + var start int + for i, slen := 0, len(s); i < slen; { + // encode all bytes < 0x20 (except \r, \n). + // also encode < > & to prevent security holes when served to some browsers. + if b := s[i]; b < utf8.RuneSelf { + // if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' { + if jsonCharHtmlSafeSet.isset(b) || (e.h.HTMLCharsAsIs && jsonCharSafeSet.isset(b)) { + i++ + continue + } + if start < i { + w.writestr(s[start:i]) + } + switch b { + case '\\', '"': + w.writen2('\\', b) + case '\n': + w.writen2('\\', 'n') + case '\r': + w.writen2('\\', 'r') + case '\b': + w.writen2('\\', 'b') + case '\f': + w.writen2('\\', 'f') + case '\t': + w.writen2('\\', 't') + default: + w.writestr(`\u00`) + w.writen2(hex[b>>4], hex[b&0xF]) + } + i++ + start = i + continue + } + c, size := utf8.DecodeRuneInString(s[i:]) + if c == utf8.RuneError && size == 1 { + if start < i { + w.writestr(s[start:i]) + } + w.writestr(`\ufffd`) + i += size + start = i + continue + } + // U+2028 is LINE SEPARATOR. U+2029 is PARAGRAPH SEPARATOR. + // Both technically valid JSON, but bomb on JSONP, so fix here unconditionally. + if c == '\u2028' || c == '\u2029' { + if start < i { + w.writestr(s[start:i]) + } + w.writestr(`\u202`) + w.writen1(hex[c&0xF]) + i += size + start = i + continue + } + i += size + } + if start < len(s) { + w.writestr(s[start:]) + } + w.writen1('"') +} + +func (e *jsonEncDriver) atEndOfEncode() { + if e.h.TermWhitespace { + if e.d { + e.w.writen1('\n') + } else { + e.w.writen1(' ') + } + } +} + +type jsonDecDriver struct { + noBuiltInTypes + d *Decoder + h *JsonHandle + r decReader + + c containerState + // tok is used to store the token read right after skipWhiteSpace. + tok uint8 + + fnull bool // found null from appendStringAsBytes + + bstr [8]byte // scratch used for string \UXXX parsing + b [64]byte // scratch, used for parsing strings or numbers + b2 [64]byte // scratch, used only for decodeBytes (after base64) + bs []byte // scratch. Initialized from b. Used for parsing strings or numbers. + + se setExtWrapper + + // n jsonNum +} + +func jsonIsWS(b byte) bool { + // return b == ' ' || b == '\t' || b == '\r' || b == '\n' + return jsonCharWhitespaceSet.isset(b) +} + +func (d *jsonDecDriver) uncacheRead() { + if d.tok != 0 { + d.r.unreadn1() + d.tok = 0 + } +} + +func (d *jsonDecDriver) ReadMapStart() int { + if d.tok == 0 { + d.tok = d.r.skip(&jsonCharWhitespaceSet) + } + if d.tok != '{' { + d.d.errorf("json: expect char '%c' but got char '%c'", '{', d.tok) + } + d.tok = 0 + d.c = containerMapStart + return -1 +} + +func (d *jsonDecDriver) ReadArrayStart() int { + if d.tok == 0 { + d.tok = d.r.skip(&jsonCharWhitespaceSet) + } + if d.tok != '[' { + d.d.errorf("json: expect char '%c' but got char '%c'", '[', d.tok) + } + d.tok = 0 + d.c = containerArrayStart + return -1 +} + +func (d *jsonDecDriver) CheckBreak() bool { + if d.tok == 0 { + d.tok = d.r.skip(&jsonCharWhitespaceSet) + } + return d.tok == '}' || d.tok == ']' +} + +func (d *jsonDecDriver) ReadArrayElem() { + if d.tok == 0 { + d.tok = d.r.skip(&jsonCharWhitespaceSet) + } + if d.c != containerArrayStart { + const xc uint8 = ',' + if d.tok != xc { + d.d.errorf("json: expect char '%c' but got char '%c'", xc, d.tok) + } + d.tok = 0 + } + d.c = containerArrayElem +} + +func (d *jsonDecDriver) ReadArrayEnd() { + if d.tok == 0 { + d.tok = d.r.skip(&jsonCharWhitespaceSet) + } + const xc uint8 = ']' + if d.tok != xc { + d.d.errorf("json: expect char '%c' but got char '%c'", xc, d.tok) + } + d.tok = 0 + d.c = containerArrayEnd +} + +func (d *jsonDecDriver) ReadMapElemKey() { + if d.tok == 0 { + d.tok = d.r.skip(&jsonCharWhitespaceSet) + } + if d.c != containerMapStart { + const xc uint8 = ',' + if d.tok != xc { + d.d.errorf("json: expect char '%c' but got char '%c'", xc, d.tok) + } + d.tok = 0 + } + d.c = containerMapKey +} + +func (d *jsonDecDriver) ReadMapElemValue() { + if d.tok == 0 { + d.tok = d.r.skip(&jsonCharWhitespaceSet) + } + const xc uint8 = ':' + if d.tok != xc { + d.d.errorf("json: expect char '%c' but got char '%c'", xc, d.tok) + } + d.tok = 0 + d.c = containerMapValue +} + +func (d *jsonDecDriver) ReadMapEnd() { + if d.tok == 0 { + d.tok = d.r.skip(&jsonCharWhitespaceSet) + } + const xc uint8 = '}' + if d.tok != xc { + d.d.errorf("json: expect char '%c' but got char '%c'", xc, d.tok) + } + d.tok = 0 + d.c = containerMapEnd +} + +// func (d *jsonDecDriver) readContainerState(c containerState, xc uint8, check bool) { +// if d.tok == 0 { +// d.tok = d.r.skip(&jsonCharWhitespaceSet) +// } +// if check { +// if d.tok != xc { +// d.d.errorf("json: expect char '%c' but got char '%c'", xc, d.tok) +// } +// d.tok = 0 +// } +// d.c = c +// } + +// func (d *jsonDecDriver) sendContainerState(c containerState) { +// if d.tok == 0 { +// d.tok = d.r.skip(&jsonCharWhitespaceSet) +// } +// var xc uint8 // char expected +// switch c { +// case containerMapKey: +// if d.c != containerMapStart { +// xc = ',' +// } +// case containerMapValue: +// xc = ':' +// case containerMapEnd: +// xc = '}' +// case containerArrayElem: +// if d.c != containerArrayStart { +// xc = ',' +// } +// case containerArrayEnd: +// xc = ']' +// } +// if xc != 0 { +// if d.tok != xc { +// d.d.errorf("json: expect char '%c' but got char '%c'", xc, d.tok) +// } +// d.tok = 0 +// } +// d.c = c +// } + +// func (d *jsonDecDriver) readLiteralIdx(fromIdx, toIdx uint8) { +// bs := d.r.readx(int(toIdx - fromIdx)) +// d.tok = 0 +// if jsonValidateSymbols && !bytes.Equal(bs, jsonLiterals[fromIdx:toIdx]) { +// d.d.errorf("json: expecting %s: got %s", jsonLiterals[fromIdx:toIdx], bs) +// return +// } +// } + +func (d *jsonDecDriver) readSymbol3(v1, v2, v3 uint8) { + b1, b2, b3 := d.r.readn3() + d.tok = 0 + if jsonValidateSymbols && (b1 != v1 || b2 != v2 || b3 != v3) { + d.d.errorf("json: expecting %c, %c, %c: got %c, %c, %c", b1, b2, b3, v1, v2, v3) + return + } +} + +func (d *jsonDecDriver) readSymbol4(v1, v2, v3, v4 uint8) { + b1, b2, b3, b4 := d.r.readn4() + d.tok = 0 + if jsonValidateSymbols && (b1 != v1 || b2 != v2 || b3 != v3 || b4 != v4) { + d.d.errorf("json: expecting %c, %c, %c, %c: got %c, %c, %c, %c", b1, b2, b3, b4, v1, v2, v3, v4) + return + } +} + +func (d *jsonDecDriver) TryDecodeAsNil() bool { + if d.tok == 0 { + d.tok = d.r.skip(&jsonCharWhitespaceSet) + } + if d.tok == 'n' { + d.readSymbol3('u', 'l', 'l') // d.readLiteralIdx(10, 13) // ull + return true + } + return false +} + +func (d *jsonDecDriver) DecodeBool() bool { + if d.tok == 0 { + d.tok = d.r.skip(&jsonCharWhitespaceSet) + } + if d.tok == 'f' { + d.readSymbol4('a', 'l', 's', 'e') // d.readLiteralIdx(5, 9) // alse + return false + } + if d.tok == 't' { + d.readSymbol3('r', 'u', 'e') // d.readLiteralIdx(1, 4) // rue + return true + } + d.d.errorf("json: decode bool: got first char %c", d.tok) + return false // "unreachable" +} + +func (d *jsonDecDriver) ContainerType() (vt valueType) { + // check container type by checking the first char + if d.tok == 0 { + d.tok = d.r.skip(&jsonCharWhitespaceSet) + } + if b := d.tok; b == '{' { + return valueTypeMap + } else if b == '[' { + return valueTypeArray + } else if b == 'n' { + return valueTypeNil + } else if b == '"' { + return valueTypeString + } + return valueTypeUnset + // d.d.errorf("isContainerType: unsupported parameter: %v", vt) + // return false // "unreachable" +} + +func (d *jsonDecDriver) decNumBytes() (bs []byte) { + // stores num bytes in d.bs + if d.tok == 0 { + d.tok = d.r.skip(&jsonCharWhitespaceSet) + } + if d.tok == '"' { + bs = d.r.readUntil(d.b2[:0], '"') + bs = bs[:len(bs)-1] + } else { + d.r.unreadn1() + bs = d.r.readTo(d.bs[:0], &jsonNumSet) + } + d.tok = 0 + return bs +} + +func (d *jsonDecDriver) DecodeUint(bitsize uint8) (u uint64) { + bs := d.decNumBytes() + u, err := strconv.ParseUint(stringView(bs), 10, int(bitsize)) + if err != nil { + d.d.errorf("json: decode uint from %s: %v", bs, err) + return + } + return +} + +func (d *jsonDecDriver) DecodeInt(bitsize uint8) (i int64) { + bs := d.decNumBytes() + i, err := strconv.ParseInt(stringView(bs), 10, int(bitsize)) + if err != nil { + d.d.errorf("json: decode int from %s: %v", bs, err) + return + } + return +} + +func (d *jsonDecDriver) DecodeFloat(chkOverflow32 bool) (f float64) { + bs := d.decNumBytes() + bitsize := 64 + if chkOverflow32 { + bitsize = 32 + } + f, err := strconv.ParseFloat(stringView(bs), bitsize) + if err != nil { + d.d.errorf("json: decode float from %s: %v", bs, err) + return + } + return +} + +func (d *jsonDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) { + if ext == nil { + re := rv.(*RawExt) + re.Tag = xtag + d.d.decode(&re.Value) + } else { + var v interface{} + d.d.decode(&v) + ext.UpdateExt(rv, v) + } + return +} + +func (d *jsonDecDriver) DecodeBytes(bs []byte, zerocopy bool) (bsOut []byte) { + // if decoding into raw bytes, and the RawBytesExt is configured, use it to decode. + if d.se.i != nil { + bsOut = bs + d.DecodeExt(&bsOut, 0, &d.se) + return + } + d.appendStringAsBytes() + // base64 encodes []byte{} as "", and we encode nil []byte as null. + // Consequently, base64 should decode null as a nil []byte, and "" as an empty []byte{}. + // appendStringAsBytes returns a zero-len slice for both, so as not to reset d.bs. + // However, it sets a fnull field to true, so we can check if a null was found. + if len(d.bs) == 0 { + if d.fnull { + return nil + } + return []byte{} + } + bs0 := d.bs + slen := base64.StdEncoding.DecodedLen(len(bs0)) + if slen <= cap(bs) { + bsOut = bs[:slen] + } else if zerocopy && slen <= cap(d.b2) { + bsOut = d.b2[:slen] + } else { + bsOut = make([]byte, slen) + } + slen2, err := base64.StdEncoding.Decode(bsOut, bs0) + if err != nil { + d.d.errorf("json: error decoding base64 binary '%s': %v", bs0, err) + return nil + } + if slen != slen2 { + bsOut = bsOut[:slen2] + } + return +} + +const jsonAlwaysReturnInternString = false + +func (d *jsonDecDriver) DecodeString() (s string) { + d.appendStringAsBytes() + // if x := d.s.sc; x != nil && x.so && x.st == '}' { // map key + if jsonAlwaysReturnInternString || d.c == containerMapKey { + return d.d.string(d.bs) + } + return string(d.bs) +} + +func (d *jsonDecDriver) DecodeStringAsBytes() (s []byte) { + d.appendStringAsBytes() + return d.bs +} + +func (d *jsonDecDriver) appendStringAsBytes() { + if d.tok == 0 { + d.tok = d.r.skip(&jsonCharWhitespaceSet) + } + + d.fnull = false + if d.tok != '"' { + // d.d.errorf("json: expect char '%c' but got char '%c'", '"', d.tok) + // handle non-string scalar: null, true, false or a number + switch d.tok { + case 'n': + d.readSymbol3('u', 'l', 'l') // d.readLiteralIdx(10, 13) // ull + d.bs = d.bs[:0] + d.fnull = true + case 'f': + d.readSymbol4('a', 'l', 's', 'e') // d.readLiteralIdx(5, 9) // alse + d.bs = d.bs[:5] + copy(d.bs, "false") + case 't': + d.readSymbol3('r', 'u', 'e') // d.readLiteralIdx(1, 4) // rue + d.bs = d.bs[:4] + copy(d.bs, "true") + default: + // try to parse a valid number + bs := d.decNumBytes() + d.bs = d.bs[:len(bs)] + copy(d.bs, bs) + } + return + } + + d.tok = 0 + r := d.r + var cs = r.readUntil(d.b2[:0], '"') + var cslen = len(cs) + var c uint8 + v := d.bs[:0] + // append on each byte seen can be expensive, so we just + // keep track of where we last read a contiguous set of + // non-special bytes (using cursor variable), + // and when we see a special byte + // e.g. end-of-slice, " or \, + // we will append the full range into the v slice before proceeding + for i, cursor := 0, 0; ; { + if i == cslen { + v = append(v, cs[cursor:]...) + cs = r.readUntil(d.b2[:0], '"') + cslen = len(cs) + i, cursor = 0, 0 + } + c = cs[i] + if c == '"' { + v = append(v, cs[cursor:i]...) + break + } + if c != '\\' { + i++ + continue + } + v = append(v, cs[cursor:i]...) + i++ + c = cs[i] + switch c { + case '"', '\\', '/', '\'': + v = append(v, c) + case 'b': + v = append(v, '\b') + case 'f': + v = append(v, '\f') + case 'n': + v = append(v, '\n') + case 'r': + v = append(v, '\r') + case 't': + v = append(v, '\t') + case 'u': + var r rune + var rr uint32 + c = cs[i+4] // may help reduce bounds-checking + for j := 1; j < 5; j++ { + c = jsonU4Set[cs[i+j]] + if c == jsonU4SetErrVal { + d.d.errorf(`json: unquoteStr: invalid hex char in \u unicode sequence: %q`, c) + } + rr = rr*16 + uint32(c) + } + r = rune(rr) + i += 4 + if utf16.IsSurrogate(r) { + if !(cs[i+2] == 'u' && cs[i+i] == '\\') { + d.d.errorf(`json: unquoteStr: invalid unicode sequence. Expecting \u`) + return + } + i += 2 + c = cs[i+4] // may help reduce bounds-checking + var rr1 uint32 + for j := 1; j < 5; j++ { + c = jsonU4Set[cs[i+j]] + if c == jsonU4SetErrVal { + d.d.errorf(`json: unquoteStr: invalid hex char in \u unicode sequence: %q`, c) + } + rr1 = rr1*16 + uint32(c) + } + r = utf16.DecodeRune(r, rune(rr1)) + i += 4 + } + w2 := utf8.EncodeRune(d.bstr[:], r) + v = append(v, d.bstr[:w2]...) + default: + d.d.errorf("json: unsupported escaped value: %c", c) + } + i++ + cursor = i + } + d.bs = v +} + +// func (d *jsonDecDriver) jsonU4Arr(bs [4]byte) (r rune) { +// // u, _ := strconv.ParseUint(string(d.bstr[:4]), 16, 64) +// var u uint32 +// for _, v := range bs { +// if '0' <= v && v <= '9' { +// v = v - '0' +// } else if 'a' <= v && v <= 'f' { +// v = v - 'a' + 10 +// } else if 'A' <= v && v <= 'f' { +// v = v - 'A' + 10 +// } else { +// // d.d.errorf(`json: unquoteStr: invalid hex char in \u unicode sequence: %q`, v) +// return utf8.RuneError +// } +// u = u*16 + uint32(v) +// } +// return rune(u) +// } + +func (d *jsonDecDriver) DecodeNaked() { + z := d.d.n + // var decodeFurther bool + + if d.tok == 0 { + d.tok = d.r.skip(&jsonCharWhitespaceSet) + } + switch d.tok { + case 'n': + d.readSymbol3('u', 'l', 'l') // d.readLiteralIdx(10, 13) // ull + z.v = valueTypeNil + case 'f': + d.readSymbol4('a', 'l', 's', 'e') // d.readLiteralIdx(5, 9) // alse + z.v = valueTypeBool + z.b = false + case 't': + d.readSymbol3('r', 'u', 'e') // d.readLiteralIdx(1, 4) // rue + z.v = valueTypeBool + z.b = true + case '{': + z.v = valueTypeMap // don't consume. kInterfaceNaked will call ReadMapStart + case '[': + z.v = valueTypeArray // don't consume. kInterfaceNaked will call ReadArrayStart + case '"': + z.v = valueTypeString + z.s = d.DecodeString() + default: // number + bs := d.decNumBytes() + var err error + if len(bs) == 0 { + d.d.errorf("json: decode number from empty string") + return + } else if d.h.PreferFloat || jsonIsFloatBytesB3(bs) { // bytes.IndexByte(bs, '.') != -1 ||... + // } else if d.h.PreferFloat || bytes.ContainsAny(bs, ".eE") { + z.v = valueTypeFloat + z.f, err = strconv.ParseFloat(stringView(bs), 64) + } else if d.h.SignedInteger || bs[0] == '-' { + z.v = valueTypeInt + z.i, err = strconv.ParseInt(stringView(bs), 10, 64) + } else { + z.v = valueTypeUint + z.u, err = strconv.ParseUint(stringView(bs), 10, 64) + } + if err != nil { + if z.v == valueTypeInt || z.v == valueTypeUint { + if v, ok := err.(*strconv.NumError); ok && (v.Err == strconv.ErrRange || v.Err == strconv.ErrSyntax) { + z.v = valueTypeFloat + z.f, err = strconv.ParseFloat(stringView(bs), 64) + } + } + if err != nil { + d.d.errorf("json: decode number from %s: %v", bs, err) + return + } + } + } + // if decodeFurther { + // d.s.sc.retryRead() + // } + return +} + +//---------------------- + +// JsonHandle is a handle for JSON encoding format. +// +// Json is comprehensively supported: +// - decodes numbers into interface{} as int, uint or float64 +// - configurable way to encode/decode []byte . +// by default, encodes and decodes []byte using base64 Std Encoding +// - UTF-8 support for encoding and decoding +// +// It has better performance than the json library in the standard library, +// by leveraging the performance improvements of the codec library and +// minimizing allocations. +// +// In addition, it doesn't read more bytes than necessary during a decode, which allows +// reading multiple values from a stream containing json and non-json content. +// For example, a user can read a json value, then a cbor value, then a msgpack value, +// all from the same stream in sequence. +type JsonHandle struct { + textEncodingType + BasicHandle + + // RawBytesExt, if configured, is used to encode and decode raw bytes in a custom way. + // If not configured, raw bytes are encoded to/from base64 text. + RawBytesExt InterfaceExt + + // Indent indicates how a value is encoded. + // - If positive, indent by that number of spaces. + // - If negative, indent by that number of tabs. + Indent int8 + + // IntegerAsString controls how integers (signed and unsigned) are encoded. + // + // Per the JSON Spec, JSON numbers are 64-bit floating point numbers. + // Consequently, integers > 2^53 cannot be represented as a JSON number without losing precision. + // This can be mitigated by configuring how to encode integers. + // + // IntegerAsString interpretes the following values: + // - if 'L', then encode integers > 2^53 as a json string. + // - if 'A', then encode all integers as a json string + // containing the exact integer representation as a decimal. + // - else encode all integers as a json number (default) + IntegerAsString uint8 + + // HTMLCharsAsIs controls how to encode some special characters to html: < > & + // + // By default, we encode them as \uXXX + // to prevent security holes when served from some browsers. + HTMLCharsAsIs bool + + // PreferFloat says that we will default to decoding a number as a float. + // If not set, we will examine the characters of the number and decode as an + // integer type if it doesn't have any of the characters [.eE]. + PreferFloat bool + + // TermWhitespace says that we add a whitespace character + // at the end of an encoding. + // + // The whitespace is important, especially if using numbers in a context + // where multiple items are written to a stream. + TermWhitespace bool +} + +func (h *JsonHandle) hasElemSeparators() bool { return true } + +func (h *JsonHandle) SetInterfaceExt(rt reflect.Type, tag uint64, ext InterfaceExt) (err error) { + return h.SetExt(rt, tag, &setExtWrapper{i: ext}) +} + +func (h *JsonHandle) newEncDriver(e *Encoder) encDriver { + hd := jsonEncDriver{e: e, h: h} + hd.bs = hd.b[:0] + + hd.reset() + + return &hd +} + +func (h *JsonHandle) newDecDriver(d *Decoder) decDriver { + // d := jsonDecDriver{r: r.(*bytesDecReader), h: h} + hd := jsonDecDriver{d: d, h: h} + hd.bs = hd.b[:0] + hd.reset() + return &hd +} + +func (e *jsonEncDriver) reset() { + e.w = e.e.w + e.se.i = e.h.RawBytesExt + if e.bs != nil { + e.bs = e.bs[:0] + } + e.d, e.dt, e.dl, e.ds = false, false, 0, "" + e.c = 0 + if e.h.Indent > 0 { + e.d = true + e.ds = jsonSpaces[:e.h.Indent] + } else if e.h.Indent < 0 { + e.d = true + e.dt = true + e.ds = jsonTabs[:-(e.h.Indent)] + } +} + +func (d *jsonDecDriver) reset() { + d.r = d.d.r + d.se.i = d.h.RawBytesExt + if d.bs != nil { + d.bs = d.bs[:0] + } + d.c, d.tok = 0, 0 + // d.n.reset() +} + +// func jsonIsFloatBytes(bs []byte) bool { +// for _, v := range bs { +// // if v == '.' || v == 'e' || v == 'E' { +// if jsonIsFloatSet.isset(v) { +// return true +// } +// } +// return false +// } + +func jsonIsFloatBytesB2(bs []byte) bool { + return bytes.IndexByte(bs, '.') != -1 || + bytes.IndexByte(bs, 'E') != -1 +} + +func jsonIsFloatBytesB3(bs []byte) bool { + return bytes.IndexByte(bs, '.') != -1 || + bytes.IndexByte(bs, 'E') != -1 || + bytes.IndexByte(bs, 'e') != -1 +} + +// var jsonEncodeTerminate = []byte{' '} + +// func (h *JsonHandle) rpcEncodeTerminate() []byte { +// return jsonEncodeTerminate +// } + +var _ decDriver = (*jsonDecDriver)(nil) +var _ encDriver = (*jsonEncDriver)(nil) diff --git a/vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/msgpack.go b/vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/msgpack.go new file mode 100644 index 00000000..fcc3177a --- /dev/null +++ b/vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/msgpack.go @@ -0,0 +1,899 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +/* +MSGPACK + +Msgpack-c implementation powers the c, c++, python, ruby, etc libraries. +We need to maintain compatibility with it and how it encodes integer values +without caring about the type. + +For compatibility with behaviour of msgpack-c reference implementation: + - Go intX (>0) and uintX + IS ENCODED AS + msgpack +ve fixnum, unsigned + - Go intX (<0) + IS ENCODED AS + msgpack -ve fixnum, signed + +*/ +package codec + +import ( + "fmt" + "io" + "math" + "net/rpc" + "reflect" +) + +const ( + mpPosFixNumMin byte = 0x00 + mpPosFixNumMax = 0x7f + mpFixMapMin = 0x80 + mpFixMapMax = 0x8f + mpFixArrayMin = 0x90 + mpFixArrayMax = 0x9f + mpFixStrMin = 0xa0 + mpFixStrMax = 0xbf + mpNil = 0xc0 + _ = 0xc1 + mpFalse = 0xc2 + mpTrue = 0xc3 + mpFloat = 0xca + mpDouble = 0xcb + mpUint8 = 0xcc + mpUint16 = 0xcd + mpUint32 = 0xce + mpUint64 = 0xcf + mpInt8 = 0xd0 + mpInt16 = 0xd1 + mpInt32 = 0xd2 + mpInt64 = 0xd3 + + // extensions below + mpBin8 = 0xc4 + mpBin16 = 0xc5 + mpBin32 = 0xc6 + mpExt8 = 0xc7 + mpExt16 = 0xc8 + mpExt32 = 0xc9 + mpFixExt1 = 0xd4 + mpFixExt2 = 0xd5 + mpFixExt4 = 0xd6 + mpFixExt8 = 0xd7 + mpFixExt16 = 0xd8 + + mpStr8 = 0xd9 // new + mpStr16 = 0xda + mpStr32 = 0xdb + + mpArray16 = 0xdc + mpArray32 = 0xdd + + mpMap16 = 0xde + mpMap32 = 0xdf + + mpNegFixNumMin = 0xe0 + mpNegFixNumMax = 0xff +) + +// MsgpackSpecRpcMultiArgs is a special type which signifies to the MsgpackSpecRpcCodec +// that the backend RPC service takes multiple arguments, which have been arranged +// in sequence in the slice. +// +// The Codec then passes it AS-IS to the rpc service (without wrapping it in an +// array of 1 element). +type MsgpackSpecRpcMultiArgs []interface{} + +// A MsgpackContainer type specifies the different types of msgpackContainers. +type msgpackContainerType struct { + fixCutoff int + bFixMin, b8, b16, b32 byte + hasFixMin, has8, has8Always bool +} + +var ( + msgpackContainerStr = msgpackContainerType{32, mpFixStrMin, mpStr8, mpStr16, mpStr32, true, true, false} + msgpackContainerBin = msgpackContainerType{0, 0, mpBin8, mpBin16, mpBin32, false, true, true} + msgpackContainerList = msgpackContainerType{16, mpFixArrayMin, 0, mpArray16, mpArray32, true, false, false} + msgpackContainerMap = msgpackContainerType{16, mpFixMapMin, 0, mpMap16, mpMap32, true, false, false} +) + +//--------------------------------------------- + +type msgpackEncDriver struct { + noBuiltInTypes + encDriverNoopContainerWriter + // encNoSeparator + e *Encoder + w encWriter + h *MsgpackHandle + x [8]byte +} + +func (e *msgpackEncDriver) EncodeNil() { + e.w.writen1(mpNil) +} + +func (e *msgpackEncDriver) EncodeInt(i int64) { + if i >= 0 { + e.EncodeUint(uint64(i)) + } else if i >= -32 { + e.w.writen1(byte(i)) + } else if i >= math.MinInt8 { + e.w.writen2(mpInt8, byte(i)) + } else if i >= math.MinInt16 { + e.w.writen1(mpInt16) + bigenHelper{e.x[:2], e.w}.writeUint16(uint16(i)) + } else if i >= math.MinInt32 { + e.w.writen1(mpInt32) + bigenHelper{e.x[:4], e.w}.writeUint32(uint32(i)) + } else { + e.w.writen1(mpInt64) + bigenHelper{e.x[:8], e.w}.writeUint64(uint64(i)) + } +} + +func (e *msgpackEncDriver) EncodeUint(i uint64) { + if i <= math.MaxInt8 { + e.w.writen1(byte(i)) + } else if i <= math.MaxUint8 { + e.w.writen2(mpUint8, byte(i)) + } else if i <= math.MaxUint16 { + e.w.writen1(mpUint16) + bigenHelper{e.x[:2], e.w}.writeUint16(uint16(i)) + } else if i <= math.MaxUint32 { + e.w.writen1(mpUint32) + bigenHelper{e.x[:4], e.w}.writeUint32(uint32(i)) + } else { + e.w.writen1(mpUint64) + bigenHelper{e.x[:8], e.w}.writeUint64(uint64(i)) + } +} + +func (e *msgpackEncDriver) EncodeBool(b bool) { + if b { + e.w.writen1(mpTrue) + } else { + e.w.writen1(mpFalse) + } +} + +func (e *msgpackEncDriver) EncodeFloat32(f float32) { + e.w.writen1(mpFloat) + bigenHelper{e.x[:4], e.w}.writeUint32(math.Float32bits(f)) +} + +func (e *msgpackEncDriver) EncodeFloat64(f float64) { + e.w.writen1(mpDouble) + bigenHelper{e.x[:8], e.w}.writeUint64(math.Float64bits(f)) +} + +func (e *msgpackEncDriver) EncodeExt(v interface{}, xtag uint64, ext Ext, _ *Encoder) { + bs := ext.WriteExt(v) + if bs == nil { + e.EncodeNil() + return + } + if e.h.WriteExt { + e.encodeExtPreamble(uint8(xtag), len(bs)) + e.w.writeb(bs) + } else { + e.EncodeStringBytes(c_RAW, bs) + } +} + +func (e *msgpackEncDriver) EncodeRawExt(re *RawExt, _ *Encoder) { + e.encodeExtPreamble(uint8(re.Tag), len(re.Data)) + e.w.writeb(re.Data) +} + +func (e *msgpackEncDriver) encodeExtPreamble(xtag byte, l int) { + if l == 1 { + e.w.writen2(mpFixExt1, xtag) + } else if l == 2 { + e.w.writen2(mpFixExt2, xtag) + } else if l == 4 { + e.w.writen2(mpFixExt4, xtag) + } else if l == 8 { + e.w.writen2(mpFixExt8, xtag) + } else if l == 16 { + e.w.writen2(mpFixExt16, xtag) + } else if l < 256 { + e.w.writen2(mpExt8, byte(l)) + e.w.writen1(xtag) + } else if l < 65536 { + e.w.writen1(mpExt16) + bigenHelper{e.x[:2], e.w}.writeUint16(uint16(l)) + e.w.writen1(xtag) + } else { + e.w.writen1(mpExt32) + bigenHelper{e.x[:4], e.w}.writeUint32(uint32(l)) + e.w.writen1(xtag) + } +} + +func (e *msgpackEncDriver) WriteArrayStart(length int) { + e.writeContainerLen(msgpackContainerList, length) +} + +func (e *msgpackEncDriver) WriteMapStart(length int) { + e.writeContainerLen(msgpackContainerMap, length) +} + +func (e *msgpackEncDriver) EncodeString(c charEncoding, s string) { + slen := len(s) + if c == c_RAW && e.h.WriteExt { + e.writeContainerLen(msgpackContainerBin, slen) + } else { + e.writeContainerLen(msgpackContainerStr, slen) + } + if slen > 0 { + e.w.writestr(s) + } +} + +func (e *msgpackEncDriver) EncodeSymbol(v string) { + e.EncodeString(c_UTF8, v) +} + +func (e *msgpackEncDriver) EncodeStringBytes(c charEncoding, bs []byte) { + slen := len(bs) + if c == c_RAW && e.h.WriteExt { + e.writeContainerLen(msgpackContainerBin, slen) + } else { + e.writeContainerLen(msgpackContainerStr, slen) + } + if slen > 0 { + e.w.writeb(bs) + } +} + +func (e *msgpackEncDriver) writeContainerLen(ct msgpackContainerType, l int) { + if ct.hasFixMin && l < ct.fixCutoff { + e.w.writen1(ct.bFixMin | byte(l)) + } else if ct.has8 && l < 256 && (ct.has8Always || e.h.WriteExt) { + e.w.writen2(ct.b8, uint8(l)) + } else if l < 65536 { + e.w.writen1(ct.b16) + bigenHelper{e.x[:2], e.w}.writeUint16(uint16(l)) + } else { + e.w.writen1(ct.b32) + bigenHelper{e.x[:4], e.w}.writeUint32(uint32(l)) + } +} + +//--------------------------------------------- + +type msgpackDecDriver struct { + d *Decoder + r decReader // *Decoder decReader decReaderT + h *MsgpackHandle + b [scratchByteArrayLen]byte + bd byte + bdRead bool + br bool // bytes reader + noBuiltInTypes + // noStreamingCodec + // decNoSeparator + decDriverNoopContainerReader +} + +// Note: This returns either a primitive (int, bool, etc) for non-containers, +// or a containerType, or a specific type denoting nil or extension. +// It is called when a nil interface{} is passed, leaving it up to the DecDriver +// to introspect the stream and decide how best to decode. +// It deciphers the value by looking at the stream first. +func (d *msgpackDecDriver) DecodeNaked() { + if !d.bdRead { + d.readNextBd() + } + bd := d.bd + n := d.d.n + var decodeFurther bool + + switch bd { + case mpNil: + n.v = valueTypeNil + d.bdRead = false + case mpFalse: + n.v = valueTypeBool + n.b = false + case mpTrue: + n.v = valueTypeBool + n.b = true + + case mpFloat: + n.v = valueTypeFloat + n.f = float64(math.Float32frombits(bigen.Uint32(d.r.readx(4)))) + case mpDouble: + n.v = valueTypeFloat + n.f = math.Float64frombits(bigen.Uint64(d.r.readx(8))) + + case mpUint8: + n.v = valueTypeUint + n.u = uint64(d.r.readn1()) + case mpUint16: + n.v = valueTypeUint + n.u = uint64(bigen.Uint16(d.r.readx(2))) + case mpUint32: + n.v = valueTypeUint + n.u = uint64(bigen.Uint32(d.r.readx(4))) + case mpUint64: + n.v = valueTypeUint + n.u = uint64(bigen.Uint64(d.r.readx(8))) + + case mpInt8: + n.v = valueTypeInt + n.i = int64(int8(d.r.readn1())) + case mpInt16: + n.v = valueTypeInt + n.i = int64(int16(bigen.Uint16(d.r.readx(2)))) + case mpInt32: + n.v = valueTypeInt + n.i = int64(int32(bigen.Uint32(d.r.readx(4)))) + case mpInt64: + n.v = valueTypeInt + n.i = int64(int64(bigen.Uint64(d.r.readx(8)))) + + default: + switch { + case bd >= mpPosFixNumMin && bd <= mpPosFixNumMax: + // positive fixnum (always signed) + n.v = valueTypeInt + n.i = int64(int8(bd)) + case bd >= mpNegFixNumMin && bd <= mpNegFixNumMax: + // negative fixnum + n.v = valueTypeInt + n.i = int64(int8(bd)) + case bd == mpStr8, bd == mpStr16, bd == mpStr32, bd >= mpFixStrMin && bd <= mpFixStrMax: + if d.h.RawToString { + n.v = valueTypeString + n.s = d.DecodeString() + } else { + n.v = valueTypeBytes + n.l = d.DecodeBytes(nil, false) + } + case bd == mpBin8, bd == mpBin16, bd == mpBin32: + n.v = valueTypeBytes + n.l = d.DecodeBytes(nil, false) + case bd == mpArray16, bd == mpArray32, bd >= mpFixArrayMin && bd <= mpFixArrayMax: + n.v = valueTypeArray + decodeFurther = true + case bd == mpMap16, bd == mpMap32, bd >= mpFixMapMin && bd <= mpFixMapMax: + n.v = valueTypeMap + decodeFurther = true + case bd >= mpFixExt1 && bd <= mpFixExt16, bd >= mpExt8 && bd <= mpExt32: + n.v = valueTypeExt + clen := d.readExtLen() + n.u = uint64(d.r.readn1()) + n.l = d.r.readx(clen) + default: + d.d.errorf("Nil-Deciphered DecodeValue: %s: hex: %x, dec: %d", msgBadDesc, bd, bd) + } + } + if !decodeFurther { + d.bdRead = false + } + if n.v == valueTypeUint && d.h.SignedInteger { + n.v = valueTypeInt + n.i = int64(n.u) + } + return +} + +// int can be decoded from msgpack type: intXXX or uintXXX +func (d *msgpackDecDriver) DecodeInt(bitsize uint8) (i int64) { + if !d.bdRead { + d.readNextBd() + } + switch d.bd { + case mpUint8: + i = int64(uint64(d.r.readn1())) + case mpUint16: + i = int64(uint64(bigen.Uint16(d.r.readx(2)))) + case mpUint32: + i = int64(uint64(bigen.Uint32(d.r.readx(4)))) + case mpUint64: + i = int64(bigen.Uint64(d.r.readx(8))) + case mpInt8: + i = int64(int8(d.r.readn1())) + case mpInt16: + i = int64(int16(bigen.Uint16(d.r.readx(2)))) + case mpInt32: + i = int64(int32(bigen.Uint32(d.r.readx(4)))) + case mpInt64: + i = int64(bigen.Uint64(d.r.readx(8))) + default: + switch { + case d.bd >= mpPosFixNumMin && d.bd <= mpPosFixNumMax: + i = int64(int8(d.bd)) + case d.bd >= mpNegFixNumMin && d.bd <= mpNegFixNumMax: + i = int64(int8(d.bd)) + default: + d.d.errorf("Unhandled single-byte unsigned integer value: %s: %x", msgBadDesc, d.bd) + return + } + } + // check overflow (logic adapted from std pkg reflect/value.go OverflowUint() + if bitsize > 0 { + if trunc := (i << (64 - bitsize)) >> (64 - bitsize); i != trunc { + d.d.errorf("Overflow int value: %v", i) + return + } + } + d.bdRead = false + return +} + +// uint can be decoded from msgpack type: intXXX or uintXXX +func (d *msgpackDecDriver) DecodeUint(bitsize uint8) (ui uint64) { + if !d.bdRead { + d.readNextBd() + } + switch d.bd { + case mpUint8: + ui = uint64(d.r.readn1()) + case mpUint16: + ui = uint64(bigen.Uint16(d.r.readx(2))) + case mpUint32: + ui = uint64(bigen.Uint32(d.r.readx(4))) + case mpUint64: + ui = bigen.Uint64(d.r.readx(8)) + case mpInt8: + if i := int64(int8(d.r.readn1())); i >= 0 { + ui = uint64(i) + } else { + d.d.errorf("Assigning negative signed value: %v, to unsigned type", i) + return + } + case mpInt16: + if i := int64(int16(bigen.Uint16(d.r.readx(2)))); i >= 0 { + ui = uint64(i) + } else { + d.d.errorf("Assigning negative signed value: %v, to unsigned type", i) + return + } + case mpInt32: + if i := int64(int32(bigen.Uint32(d.r.readx(4)))); i >= 0 { + ui = uint64(i) + } else { + d.d.errorf("Assigning negative signed value: %v, to unsigned type", i) + return + } + case mpInt64: + if i := int64(bigen.Uint64(d.r.readx(8))); i >= 0 { + ui = uint64(i) + } else { + d.d.errorf("Assigning negative signed value: %v, to unsigned type", i) + return + } + default: + switch { + case d.bd >= mpPosFixNumMin && d.bd <= mpPosFixNumMax: + ui = uint64(d.bd) + case d.bd >= mpNegFixNumMin && d.bd <= mpNegFixNumMax: + d.d.errorf("Assigning negative signed value: %v, to unsigned type", int(d.bd)) + return + default: + d.d.errorf("Unhandled single-byte unsigned integer value: %s: %x", msgBadDesc, d.bd) + return + } + } + // check overflow (logic adapted from std pkg reflect/value.go OverflowUint() + if bitsize > 0 { + if trunc := (ui << (64 - bitsize)) >> (64 - bitsize); ui != trunc { + d.d.errorf("Overflow uint value: %v", ui) + return + } + } + d.bdRead = false + return +} + +// float can either be decoded from msgpack type: float, double or intX +func (d *msgpackDecDriver) DecodeFloat(chkOverflow32 bool) (f float64) { + if !d.bdRead { + d.readNextBd() + } + if d.bd == mpFloat { + f = float64(math.Float32frombits(bigen.Uint32(d.r.readx(4)))) + } else if d.bd == mpDouble { + f = math.Float64frombits(bigen.Uint64(d.r.readx(8))) + } else { + f = float64(d.DecodeInt(0)) + } + if chkOverflow32 && chkOvf.Float32(f) { + d.d.errorf("msgpack: float32 overflow: %v", f) + return + } + d.bdRead = false + return +} + +// bool can be decoded from bool, fixnum 0 or 1. +func (d *msgpackDecDriver) DecodeBool() (b bool) { + if !d.bdRead { + d.readNextBd() + } + if d.bd == mpFalse || d.bd == 0 { + // b = false + } else if d.bd == mpTrue || d.bd == 1 { + b = true + } else { + d.d.errorf("Invalid single-byte value for bool: %s: %x", msgBadDesc, d.bd) + return + } + d.bdRead = false + return +} + +func (d *msgpackDecDriver) DecodeBytes(bs []byte, zerocopy bool) (bsOut []byte) { + if !d.bdRead { + d.readNextBd() + } + + // DecodeBytes could be from: bin str fixstr fixarray array ... + var clen int + vt := d.ContainerType() + switch vt { + case valueTypeBytes: + // valueTypeBytes may be a mpBin or an mpStr container + if bd := d.bd; bd == mpBin8 || bd == mpBin16 || bd == mpBin32 { + clen = d.readContainerLen(msgpackContainerBin) + } else { + clen = d.readContainerLen(msgpackContainerStr) + } + case valueTypeString: + clen = d.readContainerLen(msgpackContainerStr) + case valueTypeArray: + clen = d.readContainerLen(msgpackContainerList) + // ensure everything after is one byte each + for i := 0; i < clen; i++ { + d.readNextBd() + if d.bd == mpNil { + bs = append(bs, 0) + } else if d.bd == mpUint8 { + bs = append(bs, d.r.readn1()) + } else { + d.d.errorf("cannot read non-byte into a byte array") + return + } + } + d.bdRead = false + return bs + default: + d.d.errorf("invalid container type: expecting bin|str|array") + return + } + + // these are (bin|str)(8|16|32) + // println("DecodeBytes: clen: ", clen) + d.bdRead = false + // bytes may be nil, so handle it. if nil, clen=-1. + if clen < 0 { + return nil + } + if zerocopy { + if d.br { + return d.r.readx(clen) + } else if len(bs) == 0 { + bs = d.b[:] + } + } + return decByteSlice(d.r, clen, d.d.h.MaxInitLen, bs) +} + +func (d *msgpackDecDriver) DecodeString() (s string) { + return string(d.DecodeBytes(d.b[:], true)) +} + +func (d *msgpackDecDriver) DecodeStringAsBytes() (s []byte) { + return d.DecodeBytes(d.b[:], true) +} + +func (d *msgpackDecDriver) readNextBd() { + d.bd = d.r.readn1() + d.bdRead = true +} + +func (d *msgpackDecDriver) uncacheRead() { + if d.bdRead { + d.r.unreadn1() + d.bdRead = false + } +} + +func (d *msgpackDecDriver) ContainerType() (vt valueType) { + if !d.bdRead { + d.readNextBd() + } + bd := d.bd + if bd == mpNil { + return valueTypeNil + } else if bd == mpBin8 || bd == mpBin16 || bd == mpBin32 || + (!d.h.RawToString && + (bd == mpStr8 || bd == mpStr16 || bd == mpStr32 || (bd >= mpFixStrMin && bd <= mpFixStrMax))) { + return valueTypeBytes + } else if d.h.RawToString && + (bd == mpStr8 || bd == mpStr16 || bd == mpStr32 || (bd >= mpFixStrMin && bd <= mpFixStrMax)) { + return valueTypeString + } else if bd == mpArray16 || bd == mpArray32 || (bd >= mpFixArrayMin && bd <= mpFixArrayMax) { + return valueTypeArray + } else if bd == mpMap16 || bd == mpMap32 || (bd >= mpFixMapMin && bd <= mpFixMapMax) { + return valueTypeMap + } else { + // d.d.errorf("isContainerType: unsupported parameter: %v", vt) + } + return valueTypeUnset +} + +func (d *msgpackDecDriver) TryDecodeAsNil() (v bool) { + if !d.bdRead { + d.readNextBd() + } + if d.bd == mpNil { + d.bdRead = false + v = true + } + return +} + +func (d *msgpackDecDriver) readContainerLen(ct msgpackContainerType) (clen int) { + bd := d.bd + if bd == mpNil { + clen = -1 // to represent nil + } else if bd == ct.b8 { + clen = int(d.r.readn1()) + } else if bd == ct.b16 { + clen = int(bigen.Uint16(d.r.readx(2))) + } else if bd == ct.b32 { + clen = int(bigen.Uint32(d.r.readx(4))) + } else if (ct.bFixMin & bd) == ct.bFixMin { + clen = int(ct.bFixMin ^ bd) + } else { + d.d.errorf("readContainerLen: %s: hex: %x, decimal: %d", msgBadDesc, bd, bd) + return + } + d.bdRead = false + return +} + +func (d *msgpackDecDriver) ReadMapStart() int { + if !d.bdRead { + d.readNextBd() + } + return d.readContainerLen(msgpackContainerMap) +} + +func (d *msgpackDecDriver) ReadArrayStart() int { + if !d.bdRead { + d.readNextBd() + } + return d.readContainerLen(msgpackContainerList) +} + +func (d *msgpackDecDriver) readExtLen() (clen int) { + switch d.bd { + case mpNil: + clen = -1 // to represent nil + case mpFixExt1: + clen = 1 + case mpFixExt2: + clen = 2 + case mpFixExt4: + clen = 4 + case mpFixExt8: + clen = 8 + case mpFixExt16: + clen = 16 + case mpExt8: + clen = int(d.r.readn1()) + case mpExt16: + clen = int(bigen.Uint16(d.r.readx(2))) + case mpExt32: + clen = int(bigen.Uint32(d.r.readx(4))) + default: + d.d.errorf("decoding ext bytes: found unexpected byte: %x", d.bd) + return + } + return +} + +func (d *msgpackDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) { + if xtag > 0xff { + d.d.errorf("decodeExt: tag must be <= 0xff; got: %v", xtag) + return + } + realxtag1, xbs := d.decodeExtV(ext != nil, uint8(xtag)) + realxtag = uint64(realxtag1) + if ext == nil { + re := rv.(*RawExt) + re.Tag = realxtag + re.Data = detachZeroCopyBytes(d.br, re.Data, xbs) + } else { + ext.ReadExt(rv, xbs) + } + return +} + +func (d *msgpackDecDriver) decodeExtV(verifyTag bool, tag byte) (xtag byte, xbs []byte) { + if !d.bdRead { + d.readNextBd() + } + xbd := d.bd + if xbd == mpBin8 || xbd == mpBin16 || xbd == mpBin32 { + xbs = d.DecodeBytes(nil, true) + } else if xbd == mpStr8 || xbd == mpStr16 || xbd == mpStr32 || + (xbd >= mpFixStrMin && xbd <= mpFixStrMax) { + xbs = d.DecodeStringAsBytes() + } else { + clen := d.readExtLen() + xtag = d.r.readn1() + if verifyTag && xtag != tag { + d.d.errorf("Wrong extension tag. Got %b. Expecting: %v", xtag, tag) + return + } + xbs = d.r.readx(clen) + } + d.bdRead = false + return +} + +//-------------------------------------------------- + +//MsgpackHandle is a Handle for the Msgpack Schema-Free Encoding Format. +type MsgpackHandle struct { + BasicHandle + + // RawToString controls how raw bytes are decoded into a nil interface{}. + RawToString bool + + // WriteExt flag supports encoding configured extensions with extension tags. + // It also controls whether other elements of the new spec are encoded (ie Str8). + // + // With WriteExt=false, configured extensions are serialized as raw bytes + // and Str8 is not encoded. + // + // A stream can still be decoded into a typed value, provided an appropriate value + // is provided, but the type cannot be inferred from the stream. If no appropriate + // type is provided (e.g. decoding into a nil interface{}), you get back + // a []byte or string based on the setting of RawToString. + WriteExt bool + binaryEncodingType + noElemSeparators +} + +func (h *MsgpackHandle) SetBytesExt(rt reflect.Type, tag uint64, ext BytesExt) (err error) { + return h.SetExt(rt, tag, &setExtWrapper{b: ext}) +} + +func (h *MsgpackHandle) newEncDriver(e *Encoder) encDriver { + return &msgpackEncDriver{e: e, w: e.w, h: h} +} + +func (h *MsgpackHandle) newDecDriver(d *Decoder) decDriver { + return &msgpackDecDriver{d: d, h: h, r: d.r, br: d.bytes} +} + +func (e *msgpackEncDriver) reset() { + e.w = e.e.w +} + +func (d *msgpackDecDriver) reset() { + d.r, d.br = d.d.r, d.d.bytes + d.bd, d.bdRead = 0, false +} + +//-------------------------------------------------- + +type msgpackSpecRpcCodec struct { + rpcCodec +} + +// /////////////// Spec RPC Codec /////////////////// +func (c *msgpackSpecRpcCodec) WriteRequest(r *rpc.Request, body interface{}) error { + // WriteRequest can write to both a Go service, and other services that do + // not abide by the 1 argument rule of a Go service. + // We discriminate based on if the body is a MsgpackSpecRpcMultiArgs + var bodyArr []interface{} + if m, ok := body.(MsgpackSpecRpcMultiArgs); ok { + bodyArr = ([]interface{})(m) + } else { + bodyArr = []interface{}{body} + } + r2 := []interface{}{0, uint32(r.Seq), r.ServiceMethod, bodyArr} + return c.write(r2, nil, false, true) +} + +func (c *msgpackSpecRpcCodec) WriteResponse(r *rpc.Response, body interface{}) error { + var moe interface{} + if r.Error != "" { + moe = r.Error + } + if moe != nil && body != nil { + body = nil + } + r2 := []interface{}{1, uint32(r.Seq), moe, body} + return c.write(r2, nil, false, true) +} + +func (c *msgpackSpecRpcCodec) ReadResponseHeader(r *rpc.Response) error { + return c.parseCustomHeader(1, &r.Seq, &r.Error) +} + +func (c *msgpackSpecRpcCodec) ReadRequestHeader(r *rpc.Request) error { + return c.parseCustomHeader(0, &r.Seq, &r.ServiceMethod) +} + +func (c *msgpackSpecRpcCodec) ReadRequestBody(body interface{}) error { + if body == nil { // read and discard + return c.read(nil) + } + bodyArr := []interface{}{body} + return c.read(&bodyArr) +} + +func (c *msgpackSpecRpcCodec) parseCustomHeader(expectTypeByte byte, msgid *uint64, methodOrError *string) (err error) { + + if c.isClosed() { + return io.EOF + } + + // We read the response header by hand + // so that the body can be decoded on its own from the stream at a later time. + + const fia byte = 0x94 //four item array descriptor value + // Not sure why the panic of EOF is swallowed above. + // if bs1 := c.dec.r.readn1(); bs1 != fia { + // err = fmt.Errorf("Unexpected value for array descriptor: Expecting %v. Received %v", fia, bs1) + // return + // } + var b byte + b, err = c.br.ReadByte() + if err != nil { + return + } + if b != fia { + err = fmt.Errorf("Unexpected value for array descriptor: Expecting %v. Received %v", fia, b) + return + } + + if err = c.read(&b); err != nil { + return + } + if b != expectTypeByte { + err = fmt.Errorf("Unexpected byte descriptor in header. Expecting %v. Received %v", expectTypeByte, b) + return + } + if err = c.read(msgid); err != nil { + return + } + if err = c.read(methodOrError); err != nil { + return + } + return +} + +//-------------------------------------------------- + +// msgpackSpecRpc is the implementation of Rpc that uses custom communication protocol +// as defined in the msgpack spec at https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md +type msgpackSpecRpc struct{} + +// MsgpackSpecRpc implements Rpc using the communication protocol defined in +// the msgpack spec at https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md . +// Its methods (ServerCodec and ClientCodec) return values that implement RpcCodecBuffered. +var MsgpackSpecRpc msgpackSpecRpc + +func (x msgpackSpecRpc) ServerCodec(conn io.ReadWriteCloser, h Handle) rpc.ServerCodec { + return &msgpackSpecRpcCodec{newRPCCodec(conn, h)} +} + +func (x msgpackSpecRpc) ClientCodec(conn io.ReadWriteCloser, h Handle) rpc.ClientCodec { + return &msgpackSpecRpcCodec{newRPCCodec(conn, h)} +} + +var _ decDriver = (*msgpackDecDriver)(nil) +var _ encDriver = (*msgpackEncDriver)(nil) diff --git a/vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/noop.go b/vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/noop.go new file mode 100644 index 00000000..015af581 --- /dev/null +++ b/vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/noop.go @@ -0,0 +1,214 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// +build ignore + +package codec + +import ( + "math/rand" + "time" +) + +// NoopHandle returns a no-op handle. It basically does nothing. +// It is only useful for benchmarking, as it gives an idea of the +// overhead from the codec framework. +// +// LIBRARY USERS: *** DO NOT USE *** +func NoopHandle(slen int) *noopHandle { + h := noopHandle{} + h.rand = rand.New(rand.NewSource(time.Now().UnixNano())) + h.B = make([][]byte, slen) + h.S = make([]string, slen) + for i := 0; i < len(h.S); i++ { + b := make([]byte, i+1) + for j := 0; j < len(b); j++ { + b[j] = 'a' + byte(i) + } + h.B[i] = b + h.S[i] = string(b) + } + return &h +} + +// noopHandle does nothing. +// It is used to simulate the overhead of the codec framework. +type noopHandle struct { + BasicHandle + binaryEncodingType + noopDrv // noopDrv is unexported here, so we can get a copy of it when needed. +} + +type noopDrv struct { + d *Decoder + e *Encoder + i int + S []string + B [][]byte + mks []bool // stack. if map (true), else if array (false) + mk bool // top of stack. what container are we on? map or array? + ct valueType // last response for IsContainerType. + cb int // counter for ContainerType + rand *rand.Rand +} + +func (h *noopDrv) r(v int) int { return h.rand.Intn(v) } +func (h *noopDrv) m(v int) int { h.i++; return h.i % v } + +func (h *noopDrv) newEncDriver(e *Encoder) encDriver { h.e = e; return h } +func (h *noopDrv) newDecDriver(d *Decoder) decDriver { h.d = d; return h } + +func (h *noopDrv) reset() {} +func (h *noopDrv) uncacheRead() {} + +// --- encDriver + +// stack functions (for map and array) +func (h *noopDrv) start(b bool) { + // println("start", len(h.mks)+1) + h.mks = append(h.mks, b) + h.mk = b +} +func (h *noopDrv) end() { + // println("end: ", len(h.mks)-1) + h.mks = h.mks[:len(h.mks)-1] + if len(h.mks) > 0 { + h.mk = h.mks[len(h.mks)-1] + } else { + h.mk = false + } +} + +func (h *noopDrv) EncodeBuiltin(rt uintptr, v interface{}) {} +func (h *noopDrv) EncodeNil() {} +func (h *noopDrv) EncodeInt(i int64) {} +func (h *noopDrv) EncodeUint(i uint64) {} +func (h *noopDrv) EncodeBool(b bool) {} +func (h *noopDrv) EncodeFloat32(f float32) {} +func (h *noopDrv) EncodeFloat64(f float64) {} +func (h *noopDrv) EncodeRawExt(re *RawExt, e *Encoder) {} +func (h *noopDrv) EncodeArrayStart(length int) { h.start(true) } +func (h *noopDrv) EncodeMapStart(length int) { h.start(false) } +func (h *noopDrv) EncodeEnd() { h.end() } + +func (h *noopDrv) EncodeString(c charEncoding, v string) {} +func (h *noopDrv) EncodeSymbol(v string) {} +func (h *noopDrv) EncodeStringBytes(c charEncoding, v []byte) {} + +func (h *noopDrv) EncodeExt(rv interface{}, xtag uint64, ext Ext, e *Encoder) {} + +// ---- decDriver +func (h *noopDrv) initReadNext() {} +func (h *noopDrv) CheckBreak() bool { return false } +func (h *noopDrv) IsBuiltinType(rt uintptr) bool { return false } +func (h *noopDrv) DecodeBuiltin(rt uintptr, v interface{}) {} +func (h *noopDrv) DecodeInt(bitsize uint8) (i int64) { return int64(h.m(15)) } +func (h *noopDrv) DecodeUint(bitsize uint8) (ui uint64) { return uint64(h.m(35)) } +func (h *noopDrv) DecodeFloat(chkOverflow32 bool) (f float64) { return float64(h.m(95)) } +func (h *noopDrv) DecodeBool() (b bool) { return h.m(2) == 0 } +func (h *noopDrv) DecodeString() (s string) { return h.S[h.m(8)] } +func (h *noopDrv) DecodeStringAsBytes() []byte { return h.DecodeBytes(nil, true) } + +func (h *noopDrv) DecodeBytes(bs []byte, zerocopy bool) []byte { return h.B[h.m(len(h.B))] } + +func (h *noopDrv) ReadEnd() { h.end() } + +// toggle map/slice +func (h *noopDrv) ReadMapStart() int { h.start(true); return h.m(10) } +func (h *noopDrv) ReadArrayStart() int { h.start(false); return h.m(10) } + +func (h *noopDrv) ContainerType() (vt valueType) { + // return h.m(2) == 0 + // handle kStruct, which will bomb is it calls this and doesn't get back a map or array. + // consequently, if the return value is not map or array, reset it to one of them based on h.m(7) % 2 + // for kstruct: at least one out of every 2 times, return one of valueTypeMap or Array (else kstruct bombs) + // however, every 10th time it is called, we just return something else. + var vals = [...]valueType{valueTypeArray, valueTypeMap} + // ------------ TAKE ------------ + // if h.cb%2 == 0 { + // if h.ct == valueTypeMap || h.ct == valueTypeArray { + // } else { + // h.ct = vals[h.m(2)] + // } + // } else if h.cb%5 == 0 { + // h.ct = valueType(h.m(8)) + // } else { + // h.ct = vals[h.m(2)] + // } + // ------------ TAKE ------------ + // if h.cb%16 == 0 { + // h.ct = valueType(h.cb % 8) + // } else { + // h.ct = vals[h.cb%2] + // } + h.ct = vals[h.cb%2] + h.cb++ + return h.ct + + // if h.ct == valueTypeNil || h.ct == valueTypeString || h.ct == valueTypeBytes { + // return h.ct + // } + // return valueTypeUnset + // TODO: may need to tweak this so it works. + // if h.ct == valueTypeMap && vt == valueTypeArray || h.ct == valueTypeArray && vt == valueTypeMap { + // h.cb = !h.cb + // h.ct = vt + // return h.cb + // } + // // go in a loop and check it. + // h.ct = vt + // h.cb = h.m(7) == 0 + // return h.cb +} +func (h *noopDrv) TryDecodeAsNil() bool { + if h.mk { + return false + } else { + return h.m(8) == 0 + } +} +func (h *noopDrv) DecodeExt(rv interface{}, xtag uint64, ext Ext) uint64 { + return 0 +} + +func (h *noopDrv) DecodeNaked() { + // use h.r (random) not h.m() because h.m() could cause the same value to be given. + var sk int + if h.mk { + // if mapkey, do not support values of nil OR bytes, array, map or rawext + sk = h.r(7) + 1 + } else { + sk = h.r(12) + } + n := &h.d.n + switch sk { + case 0: + n.v = valueTypeNil + case 1: + n.v, n.b = valueTypeBool, false + case 2: + n.v, n.b = valueTypeBool, true + case 3: + n.v, n.i = valueTypeInt, h.DecodeInt(64) + case 4: + n.v, n.u = valueTypeUint, h.DecodeUint(64) + case 5: + n.v, n.f = valueTypeFloat, h.DecodeFloat(true) + case 6: + n.v, n.f = valueTypeFloat, h.DecodeFloat(false) + case 7: + n.v, n.s = valueTypeString, h.DecodeString() + case 8: + n.v, n.l = valueTypeBytes, h.B[h.m(len(h.B))] + case 9: + n.v = valueTypeArray + case 10: + n.v = valueTypeMap + default: + n.v = valueTypeExt + n.u = h.DecodeUint(64) + n.l = h.B[h.m(len(h.B))] + } + h.ct = n.v + return +} diff --git a/vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/rpc.go b/vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/rpc.go new file mode 100644 index 00000000..4c307804 --- /dev/null +++ b/vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/rpc.go @@ -0,0 +1,187 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +import ( + "bufio" + "errors" + "io" + "net/rpc" + "sync" +) + +// // rpcEncodeTerminator allows a handler specify a []byte terminator to send after each Encode. +// // +// // Some codecs like json need to put a space after each encoded value, to serve as a +// // delimiter for things like numbers (else json codec will continue reading till EOF). +// type rpcEncodeTerminator interface { +// rpcEncodeTerminate() []byte +// } + +// Rpc provides a rpc Server or Client Codec for rpc communication. +type Rpc interface { + ServerCodec(conn io.ReadWriteCloser, h Handle) rpc.ServerCodec + ClientCodec(conn io.ReadWriteCloser, h Handle) rpc.ClientCodec +} + +// RpcCodecBuffered allows access to the underlying bufio.Reader/Writer +// used by the rpc connection. It accommodates use-cases where the connection +// should be used by rpc and non-rpc functions, e.g. streaming a file after +// sending an rpc response. +type RpcCodecBuffered interface { + BufferedReader() *bufio.Reader + BufferedWriter() *bufio.Writer +} + +// ------------------------------------- + +// rpcCodec defines the struct members and common methods. +type rpcCodec struct { + rwc io.ReadWriteCloser + dec *Decoder + enc *Encoder + bw *bufio.Writer + br *bufio.Reader + mu sync.Mutex + h Handle + + cls bool + clsmu sync.RWMutex +} + +func newRPCCodec(conn io.ReadWriteCloser, h Handle) rpcCodec { + bw := bufio.NewWriter(conn) + br := bufio.NewReader(conn) + + // defensive: ensure that jsonH has TermWhitespace turned on. + if jsonH, ok := h.(*JsonHandle); ok && !jsonH.TermWhitespace { + panic(errors.New("rpc requires a JsonHandle with TermWhitespace set to true")) + } + + return rpcCodec{ + rwc: conn, + bw: bw, + br: br, + enc: NewEncoder(bw, h), + dec: NewDecoder(br, h), + h: h, + } +} + +func (c *rpcCodec) BufferedReader() *bufio.Reader { + return c.br +} + +func (c *rpcCodec) BufferedWriter() *bufio.Writer { + return c.bw +} + +func (c *rpcCodec) write(obj1, obj2 interface{}, writeObj2, doFlush bool) (err error) { + if c.isClosed() { + return io.EOF + } + if err = c.enc.Encode(obj1); err != nil { + return + } + // t, tOk := c.h.(rpcEncodeTerminator) + // if tOk { + // c.bw.Write(t.rpcEncodeTerminate()) + // } + if writeObj2 { + if err = c.enc.Encode(obj2); err != nil { + return + } + // if tOk { + // c.bw.Write(t.rpcEncodeTerminate()) + // } + } + if doFlush { + return c.bw.Flush() + } + return +} + +func (c *rpcCodec) read(obj interface{}) (err error) { + if c.isClosed() { + return io.EOF + } + //If nil is passed in, we should still attempt to read content to nowhere. + if obj == nil { + var obj2 interface{} + return c.dec.Decode(&obj2) + } + return c.dec.Decode(obj) +} + +func (c *rpcCodec) isClosed() bool { + c.clsmu.RLock() + x := c.cls + c.clsmu.RUnlock() + return x +} + +func (c *rpcCodec) Close() error { + if c.isClosed() { + return io.EOF + } + c.clsmu.Lock() + c.cls = true + c.clsmu.Unlock() + return c.rwc.Close() +} + +func (c *rpcCodec) ReadResponseBody(body interface{}) error { + return c.read(body) +} + +// ------------------------------------- + +type goRpcCodec struct { + rpcCodec +} + +func (c *goRpcCodec) WriteRequest(r *rpc.Request, body interface{}) error { + // Must protect for concurrent access as per API + c.mu.Lock() + defer c.mu.Unlock() + return c.write(r, body, true, true) +} + +func (c *goRpcCodec) WriteResponse(r *rpc.Response, body interface{}) error { + c.mu.Lock() + defer c.mu.Unlock() + return c.write(r, body, true, true) +} + +func (c *goRpcCodec) ReadResponseHeader(r *rpc.Response) error { + return c.read(r) +} + +func (c *goRpcCodec) ReadRequestHeader(r *rpc.Request) error { + return c.read(r) +} + +func (c *goRpcCodec) ReadRequestBody(body interface{}) error { + return c.read(body) +} + +// ------------------------------------- + +// goRpc is the implementation of Rpc that uses the communication protocol +// as defined in net/rpc package. +type goRpc struct{} + +// GoRpc implements Rpc using the communication protocol defined in net/rpc package. +// Its methods (ServerCodec and ClientCodec) return values that implement RpcCodecBuffered. +var GoRpc goRpc + +func (x goRpc) ServerCodec(conn io.ReadWriteCloser, h Handle) rpc.ServerCodec { + return &goRpcCodec{newRPCCodec(conn, h)} +} + +func (x goRpc) ClientCodec(conn io.ReadWriteCloser, h Handle) rpc.ClientCodec { + return &goRpcCodec{newRPCCodec(conn, h)} +} + +var _ RpcCodecBuffered = (*rpcCodec)(nil) // ensure *rpcCodec implements RpcCodecBuffered diff --git a/vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/simple.go b/vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/simple.go new file mode 100644 index 00000000..b69a15e7 --- /dev/null +++ b/vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/simple.go @@ -0,0 +1,541 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +import ( + "math" + "reflect" +) + +const ( + _ uint8 = iota + simpleVdNil = 1 + simpleVdFalse = 2 + simpleVdTrue = 3 + simpleVdFloat32 = 4 + simpleVdFloat64 = 5 + + // each lasts for 4 (ie n, n+1, n+2, n+3) + simpleVdPosInt = 8 + simpleVdNegInt = 12 + + // containers: each lasts for 4 (ie n, n+1, n+2, ... n+7) + simpleVdString = 216 + simpleVdByteArray = 224 + simpleVdArray = 232 + simpleVdMap = 240 + simpleVdExt = 248 +) + +type simpleEncDriver struct { + noBuiltInTypes + encDriverNoopContainerWriter + // encNoSeparator + e *Encoder + h *SimpleHandle + w encWriter + b [8]byte +} + +func (e *simpleEncDriver) EncodeNil() { + e.w.writen1(simpleVdNil) +} + +func (e *simpleEncDriver) EncodeBool(b bool) { + if b { + e.w.writen1(simpleVdTrue) + } else { + e.w.writen1(simpleVdFalse) + } +} + +func (e *simpleEncDriver) EncodeFloat32(f float32) { + e.w.writen1(simpleVdFloat32) + bigenHelper{e.b[:4], e.w}.writeUint32(math.Float32bits(f)) +} + +func (e *simpleEncDriver) EncodeFloat64(f float64) { + e.w.writen1(simpleVdFloat64) + bigenHelper{e.b[:8], e.w}.writeUint64(math.Float64bits(f)) +} + +func (e *simpleEncDriver) EncodeInt(v int64) { + if v < 0 { + e.encUint(uint64(-v), simpleVdNegInt) + } else { + e.encUint(uint64(v), simpleVdPosInt) + } +} + +func (e *simpleEncDriver) EncodeUint(v uint64) { + e.encUint(v, simpleVdPosInt) +} + +func (e *simpleEncDriver) encUint(v uint64, bd uint8) { + if v <= math.MaxUint8 { + e.w.writen2(bd, uint8(v)) + } else if v <= math.MaxUint16 { + e.w.writen1(bd + 1) + bigenHelper{e.b[:2], e.w}.writeUint16(uint16(v)) + } else if v <= math.MaxUint32 { + e.w.writen1(bd + 2) + bigenHelper{e.b[:4], e.w}.writeUint32(uint32(v)) + } else { // if v <= math.MaxUint64 { + e.w.writen1(bd + 3) + bigenHelper{e.b[:8], e.w}.writeUint64(v) + } +} + +func (e *simpleEncDriver) encLen(bd byte, length int) { + if length == 0 { + e.w.writen1(bd) + } else if length <= math.MaxUint8 { + e.w.writen1(bd + 1) + e.w.writen1(uint8(length)) + } else if length <= math.MaxUint16 { + e.w.writen1(bd + 2) + bigenHelper{e.b[:2], e.w}.writeUint16(uint16(length)) + } else if int64(length) <= math.MaxUint32 { + e.w.writen1(bd + 3) + bigenHelper{e.b[:4], e.w}.writeUint32(uint32(length)) + } else { + e.w.writen1(bd + 4) + bigenHelper{e.b[:8], e.w}.writeUint64(uint64(length)) + } +} + +func (e *simpleEncDriver) EncodeExt(rv interface{}, xtag uint64, ext Ext, _ *Encoder) { + bs := ext.WriteExt(rv) + if bs == nil { + e.EncodeNil() + return + } + e.encodeExtPreamble(uint8(xtag), len(bs)) + e.w.writeb(bs) +} + +func (e *simpleEncDriver) EncodeRawExt(re *RawExt, _ *Encoder) { + e.encodeExtPreamble(uint8(re.Tag), len(re.Data)) + e.w.writeb(re.Data) +} + +func (e *simpleEncDriver) encodeExtPreamble(xtag byte, length int) { + e.encLen(simpleVdExt, length) + e.w.writen1(xtag) +} + +func (e *simpleEncDriver) WriteArrayStart(length int) { + e.encLen(simpleVdArray, length) +} + +func (e *simpleEncDriver) WriteMapStart(length int) { + e.encLen(simpleVdMap, length) +} + +func (e *simpleEncDriver) EncodeString(c charEncoding, v string) { + e.encLen(simpleVdString, len(v)) + e.w.writestr(v) +} + +func (e *simpleEncDriver) EncodeSymbol(v string) { + e.EncodeString(c_UTF8, v) +} + +func (e *simpleEncDriver) EncodeStringBytes(c charEncoding, v []byte) { + e.encLen(simpleVdByteArray, len(v)) + e.w.writeb(v) +} + +//------------------------------------ + +type simpleDecDriver struct { + d *Decoder + h *SimpleHandle + r decReader + bdRead bool + bd byte + br bool // bytes reader + b [scratchByteArrayLen]byte + noBuiltInTypes + // noStreamingCodec + decDriverNoopContainerReader +} + +func (d *simpleDecDriver) readNextBd() { + d.bd = d.r.readn1() + d.bdRead = true +} + +func (d *simpleDecDriver) uncacheRead() { + if d.bdRead { + d.r.unreadn1() + d.bdRead = false + } +} + +func (d *simpleDecDriver) ContainerType() (vt valueType) { + if !d.bdRead { + d.readNextBd() + } + if d.bd == simpleVdNil { + return valueTypeNil + } else if d.bd == simpleVdByteArray || d.bd == simpleVdByteArray+1 || + d.bd == simpleVdByteArray+2 || d.bd == simpleVdByteArray+3 || d.bd == simpleVdByteArray+4 { + return valueTypeBytes + } else if d.bd == simpleVdString || d.bd == simpleVdString+1 || + d.bd == simpleVdString+2 || d.bd == simpleVdString+3 || d.bd == simpleVdString+4 { + return valueTypeString + } else if d.bd == simpleVdArray || d.bd == simpleVdArray+1 || + d.bd == simpleVdArray+2 || d.bd == simpleVdArray+3 || d.bd == simpleVdArray+4 { + return valueTypeArray + } else if d.bd == simpleVdMap || d.bd == simpleVdMap+1 || + d.bd == simpleVdMap+2 || d.bd == simpleVdMap+3 || d.bd == simpleVdMap+4 { + return valueTypeMap + } else { + // d.d.errorf("isContainerType: unsupported parameter: %v", vt) + } + return valueTypeUnset +} + +func (d *simpleDecDriver) TryDecodeAsNil() bool { + if !d.bdRead { + d.readNextBd() + } + if d.bd == simpleVdNil { + d.bdRead = false + return true + } + return false +} + +func (d *simpleDecDriver) decCheckInteger() (ui uint64, neg bool) { + if !d.bdRead { + d.readNextBd() + } + switch d.bd { + case simpleVdPosInt: + ui = uint64(d.r.readn1()) + case simpleVdPosInt + 1: + ui = uint64(bigen.Uint16(d.r.readx(2))) + case simpleVdPosInt + 2: + ui = uint64(bigen.Uint32(d.r.readx(4))) + case simpleVdPosInt + 3: + ui = uint64(bigen.Uint64(d.r.readx(8))) + case simpleVdNegInt: + ui = uint64(d.r.readn1()) + neg = true + case simpleVdNegInt + 1: + ui = uint64(bigen.Uint16(d.r.readx(2))) + neg = true + case simpleVdNegInt + 2: + ui = uint64(bigen.Uint32(d.r.readx(4))) + neg = true + case simpleVdNegInt + 3: + ui = uint64(bigen.Uint64(d.r.readx(8))) + neg = true + default: + d.d.errorf("decIntAny: Integer only valid from pos/neg integer1..8. Invalid descriptor: %v", d.bd) + return + } + // don't do this check, because callers may only want the unsigned value. + // if ui > math.MaxInt64 { + // d.d.errorf("decIntAny: Integer out of range for signed int64: %v", ui) + // return + // } + return +} + +func (d *simpleDecDriver) DecodeInt(bitsize uint8) (i int64) { + ui, neg := d.decCheckInteger() + i, overflow := chkOvf.SignedInt(ui) + if overflow { + d.d.errorf("simple: overflow converting %v to signed integer", ui) + return + } + if neg { + i = -i + } + if chkOvf.Int(i, bitsize) { + d.d.errorf("simple: overflow integer: %v", i) + return + } + d.bdRead = false + return +} + +func (d *simpleDecDriver) DecodeUint(bitsize uint8) (ui uint64) { + ui, neg := d.decCheckInteger() + if neg { + d.d.errorf("Assigning negative signed value to unsigned type") + return + } + if chkOvf.Uint(ui, bitsize) { + d.d.errorf("simple: overflow integer: %v", ui) + return + } + d.bdRead = false + return +} + +func (d *simpleDecDriver) DecodeFloat(chkOverflow32 bool) (f float64) { + if !d.bdRead { + d.readNextBd() + } + if d.bd == simpleVdFloat32 { + f = float64(math.Float32frombits(bigen.Uint32(d.r.readx(4)))) + } else if d.bd == simpleVdFloat64 { + f = math.Float64frombits(bigen.Uint64(d.r.readx(8))) + } else { + if d.bd >= simpleVdPosInt && d.bd <= simpleVdNegInt+3 { + f = float64(d.DecodeInt(64)) + } else { + d.d.errorf("Float only valid from float32/64: Invalid descriptor: %v", d.bd) + return + } + } + if chkOverflow32 && chkOvf.Float32(f) { + d.d.errorf("msgpack: float32 overflow: %v", f) + return + } + d.bdRead = false + return +} + +// bool can be decoded from bool only (single byte). +func (d *simpleDecDriver) DecodeBool() (b bool) { + if !d.bdRead { + d.readNextBd() + } + if d.bd == simpleVdTrue { + b = true + } else if d.bd == simpleVdFalse { + } else { + d.d.errorf("Invalid single-byte value for bool: %s: %x", msgBadDesc, d.bd) + return + } + d.bdRead = false + return +} + +func (d *simpleDecDriver) ReadMapStart() (length int) { + if !d.bdRead { + d.readNextBd() + } + d.bdRead = false + return d.decLen() +} + +func (d *simpleDecDriver) ReadArrayStart() (length int) { + if !d.bdRead { + d.readNextBd() + } + d.bdRead = false + return d.decLen() +} + +func (d *simpleDecDriver) decLen() int { + switch d.bd % 8 { + case 0: + return 0 + case 1: + return int(d.r.readn1()) + case 2: + return int(bigen.Uint16(d.r.readx(2))) + case 3: + ui := uint64(bigen.Uint32(d.r.readx(4))) + if chkOvf.Uint(ui, intBitsize) { + d.d.errorf("simple: overflow integer: %v", ui) + return 0 + } + return int(ui) + case 4: + ui := bigen.Uint64(d.r.readx(8)) + if chkOvf.Uint(ui, intBitsize) { + d.d.errorf("simple: overflow integer: %v", ui) + return 0 + } + return int(ui) + } + d.d.errorf("decLen: Cannot read length: bd%%8 must be in range 0..4. Got: %d", d.bd%8) + return -1 +} + +func (d *simpleDecDriver) DecodeString() (s string) { + return string(d.DecodeBytes(d.b[:], true)) +} + +func (d *simpleDecDriver) DecodeStringAsBytes() (s []byte) { + return d.DecodeBytes(d.b[:], true) +} + +func (d *simpleDecDriver) DecodeBytes(bs []byte, zerocopy bool) (bsOut []byte) { + if !d.bdRead { + d.readNextBd() + } + if d.bd == simpleVdNil { + d.bdRead = false + return + } + clen := d.decLen() + d.bdRead = false + if zerocopy { + if d.br { + return d.r.readx(clen) + } else if len(bs) == 0 { + bs = d.b[:] + } + } + return decByteSlice(d.r, clen, d.d.h.MaxInitLen, bs) +} + +func (d *simpleDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) { + if xtag > 0xff { + d.d.errorf("decodeExt: tag must be <= 0xff; got: %v", xtag) + return + } + realxtag1, xbs := d.decodeExtV(ext != nil, uint8(xtag)) + realxtag = uint64(realxtag1) + if ext == nil { + re := rv.(*RawExt) + re.Tag = realxtag + re.Data = detachZeroCopyBytes(d.br, re.Data, xbs) + } else { + ext.ReadExt(rv, xbs) + } + return +} + +func (d *simpleDecDriver) decodeExtV(verifyTag bool, tag byte) (xtag byte, xbs []byte) { + if !d.bdRead { + d.readNextBd() + } + switch d.bd { + case simpleVdExt, simpleVdExt + 1, simpleVdExt + 2, simpleVdExt + 3, simpleVdExt + 4: + l := d.decLen() + xtag = d.r.readn1() + if verifyTag && xtag != tag { + d.d.errorf("Wrong extension tag. Got %b. Expecting: %v", xtag, tag) + return + } + xbs = d.r.readx(l) + case simpleVdByteArray, simpleVdByteArray + 1, simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4: + xbs = d.DecodeBytes(nil, true) + default: + d.d.errorf("Invalid d.bd for extensions (Expecting extensions or byte array). Got: 0x%x", d.bd) + return + } + d.bdRead = false + return +} + +func (d *simpleDecDriver) DecodeNaked() { + if !d.bdRead { + d.readNextBd() + } + + n := d.d.n + var decodeFurther bool + + switch d.bd { + case simpleVdNil: + n.v = valueTypeNil + case simpleVdFalse: + n.v = valueTypeBool + n.b = false + case simpleVdTrue: + n.v = valueTypeBool + n.b = true + case simpleVdPosInt, simpleVdPosInt + 1, simpleVdPosInt + 2, simpleVdPosInt + 3: + if d.h.SignedInteger { + n.v = valueTypeInt + n.i = d.DecodeInt(64) + } else { + n.v = valueTypeUint + n.u = d.DecodeUint(64) + } + case simpleVdNegInt, simpleVdNegInt + 1, simpleVdNegInt + 2, simpleVdNegInt + 3: + n.v = valueTypeInt + n.i = d.DecodeInt(64) + case simpleVdFloat32: + n.v = valueTypeFloat + n.f = d.DecodeFloat(true) + case simpleVdFloat64: + n.v = valueTypeFloat + n.f = d.DecodeFloat(false) + case simpleVdString, simpleVdString + 1, simpleVdString + 2, simpleVdString + 3, simpleVdString + 4: + n.v = valueTypeString + n.s = d.DecodeString() + case simpleVdByteArray, simpleVdByteArray + 1, simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4: + n.v = valueTypeBytes + n.l = d.DecodeBytes(nil, false) + case simpleVdExt, simpleVdExt + 1, simpleVdExt + 2, simpleVdExt + 3, simpleVdExt + 4: + n.v = valueTypeExt + l := d.decLen() + n.u = uint64(d.r.readn1()) + n.l = d.r.readx(l) + case simpleVdArray, simpleVdArray + 1, simpleVdArray + 2, simpleVdArray + 3, simpleVdArray + 4: + n.v = valueTypeArray + decodeFurther = true + case simpleVdMap, simpleVdMap + 1, simpleVdMap + 2, simpleVdMap + 3, simpleVdMap + 4: + n.v = valueTypeMap + decodeFurther = true + default: + d.d.errorf("decodeNaked: Unrecognized d.bd: 0x%x", d.bd) + } + + if !decodeFurther { + d.bdRead = false + } + return +} + +//------------------------------------ + +// SimpleHandle is a Handle for a very simple encoding format. +// +// simple is a simplistic codec similar to binc, but not as compact. +// - Encoding of a value is always preceded by the descriptor byte (bd) +// - True, false, nil are encoded fully in 1 byte (the descriptor) +// - Integers (intXXX, uintXXX) are encoded in 1, 2, 4 or 8 bytes (plus a descriptor byte). +// There are positive (uintXXX and intXXX >= 0) and negative (intXXX < 0) integers. +// - Floats are encoded in 4 or 8 bytes (plus a descriptor byte) +// - Lenght of containers (strings, bytes, array, map, extensions) +// are encoded in 0, 1, 2, 4 or 8 bytes. +// Zero-length containers have no length encoded. +// For others, the number of bytes is given by pow(2, bd%3) +// - maps are encoded as [bd] [length] [[key][value]]... +// - arrays are encoded as [bd] [length] [value]... +// - extensions are encoded as [bd] [length] [tag] [byte]... +// - strings/bytearrays are encoded as [bd] [length] [byte]... +// +// The full spec will be published soon. +type SimpleHandle struct { + BasicHandle + binaryEncodingType + noElemSeparators +} + +func (h *SimpleHandle) SetBytesExt(rt reflect.Type, tag uint64, ext BytesExt) (err error) { + return h.SetExt(rt, tag, &setExtWrapper{b: ext}) +} + +func (h *SimpleHandle) newEncDriver(e *Encoder) encDriver { + return &simpleEncDriver{e: e, w: e.w, h: h} +} + +func (h *SimpleHandle) newDecDriver(d *Decoder) decDriver { + return &simpleDecDriver{d: d, h: h, r: d.r, br: d.bytes} +} + +func (e *simpleEncDriver) reset() { + e.w = e.e.w +} + +func (d *simpleDecDriver) reset() { + d.r, d.br = d.d.r, d.d.bytes + d.bd, d.bdRead = 0, false +} + +var _ decDriver = (*simpleDecDriver)(nil) +var _ encDriver = (*simpleEncDriver)(nil) diff --git a/vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/time.go b/vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/time.go new file mode 100644 index 00000000..55841d4c --- /dev/null +++ b/vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/time.go @@ -0,0 +1,220 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +import ( + "fmt" + "time" +) + +var timeDigits = [...]byte{'0', '1', '2', '3', '4', '5', '6', '7', '8', '9'} + +type timeExt struct{} + +func (x timeExt) WriteExt(v interface{}) (bs []byte) { + switch v2 := v.(type) { + case time.Time: + bs = encodeTime(v2) + case *time.Time: + bs = encodeTime(*v2) + default: + panic(fmt.Errorf("unsupported format for time conversion: expecting time.Time; got %T", v2)) + } + return +} +func (x timeExt) ReadExt(v interface{}, bs []byte) { + tt, err := decodeTime(bs) + if err != nil { + panic(err) + } + *(v.(*time.Time)) = tt +} + +func (x timeExt) ConvertExt(v interface{}) interface{} { + return x.WriteExt(v) +} +func (x timeExt) UpdateExt(v interface{}, src interface{}) { + x.ReadExt(v, src.([]byte)) +} + +// EncodeTime encodes a time.Time as a []byte, including +// information on the instant in time and UTC offset. +// +// Format Description +// +// A timestamp is composed of 3 components: +// +// - secs: signed integer representing seconds since unix epoch +// - nsces: unsigned integer representing fractional seconds as a +// nanosecond offset within secs, in the range 0 <= nsecs < 1e9 +// - tz: signed integer representing timezone offset in minutes east of UTC, +// and a dst (daylight savings time) flag +// +// When encoding a timestamp, the first byte is the descriptor, which +// defines which components are encoded and how many bytes are used to +// encode secs and nsecs components. *If secs/nsecs is 0 or tz is UTC, it +// is not encoded in the byte array explicitly*. +// +// Descriptor 8 bits are of the form `A B C DDD EE`: +// A: Is secs component encoded? 1 = true +// B: Is nsecs component encoded? 1 = true +// C: Is tz component encoded? 1 = true +// DDD: Number of extra bytes for secs (range 0-7). +// If A = 1, secs encoded in DDD+1 bytes. +// If A = 0, secs is not encoded, and is assumed to be 0. +// If A = 1, then we need at least 1 byte to encode secs. +// DDD says the number of extra bytes beyond that 1. +// E.g. if DDD=0, then secs is represented in 1 byte. +// if DDD=2, then secs is represented in 3 bytes. +// EE: Number of extra bytes for nsecs (range 0-3). +// If B = 1, nsecs encoded in EE+1 bytes (similar to secs/DDD above) +// +// Following the descriptor bytes, subsequent bytes are: +// +// secs component encoded in `DDD + 1` bytes (if A == 1) +// nsecs component encoded in `EE + 1` bytes (if B == 1) +// tz component encoded in 2 bytes (if C == 1) +// +// secs and nsecs components are integers encoded in a BigEndian +// 2-complement encoding format. +// +// tz component is encoded as 2 bytes (16 bits). Most significant bit 15 to +// Least significant bit 0 are described below: +// +// Timezone offset has a range of -12:00 to +14:00 (ie -720 to +840 minutes). +// Bit 15 = have\_dst: set to 1 if we set the dst flag. +// Bit 14 = dst\_on: set to 1 if dst is in effect at the time, or 0 if not. +// Bits 13..0 = timezone offset in minutes. It is a signed integer in Big Endian format. +// +func encodeTime(t time.Time) []byte { + //t := rv.Interface().(time.Time) + tsecs, tnsecs := t.Unix(), t.Nanosecond() + var ( + bd byte + btmp [8]byte + bs [16]byte + i int = 1 + ) + l := t.Location() + if l == time.UTC { + l = nil + } + if tsecs != 0 { + bd = bd | 0x80 + bigen.PutUint64(btmp[:], uint64(tsecs)) + f := pruneSignExt(btmp[:], tsecs >= 0) + bd = bd | (byte(7-f) << 2) + copy(bs[i:], btmp[f:]) + i = i + (8 - f) + } + if tnsecs != 0 { + bd = bd | 0x40 + bigen.PutUint32(btmp[:4], uint32(tnsecs)) + f := pruneSignExt(btmp[:4], true) + bd = bd | byte(3-f) + copy(bs[i:], btmp[f:4]) + i = i + (4 - f) + } + if l != nil { + bd = bd | 0x20 + // Note that Go Libs do not give access to dst flag. + _, zoneOffset := t.Zone() + //zoneName, zoneOffset := t.Zone() + zoneOffset /= 60 + z := uint16(zoneOffset) + bigen.PutUint16(btmp[:2], z) + // clear dst flags + bs[i] = btmp[0] & 0x3f + bs[i+1] = btmp[1] + i = i + 2 + } + bs[0] = bd + return bs[0:i] +} + +// DecodeTime decodes a []byte into a time.Time. +func decodeTime(bs []byte) (tt time.Time, err error) { + bd := bs[0] + var ( + tsec int64 + tnsec uint32 + tz uint16 + i byte = 1 + i2 byte + n byte + ) + if bd&(1<<7) != 0 { + var btmp [8]byte + n = ((bd >> 2) & 0x7) + 1 + i2 = i + n + copy(btmp[8-n:], bs[i:i2]) + //if first bit of bs[i] is set, then fill btmp[0..8-n] with 0xff (ie sign extend it) + if bs[i]&(1<<7) != 0 { + copy(btmp[0:8-n], bsAll0xff) + //for j,k := byte(0), 8-n; j < k; j++ { btmp[j] = 0xff } + } + i = i2 + tsec = int64(bigen.Uint64(btmp[:])) + } + if bd&(1<<6) != 0 { + var btmp [4]byte + n = (bd & 0x3) + 1 + i2 = i + n + copy(btmp[4-n:], bs[i:i2]) + i = i2 + tnsec = bigen.Uint32(btmp[:]) + } + if bd&(1<<5) == 0 { + tt = time.Unix(tsec, int64(tnsec)).UTC() + return + } + // In stdlib time.Parse, when a date is parsed without a zone name, it uses "" as zone name. + // However, we need name here, so it can be shown when time is printed. + // Zone name is in form: UTC-08:00. + // Note that Go Libs do not give access to dst flag, so we ignore dst bits + + i2 = i + 2 + tz = bigen.Uint16(bs[i:i2]) + i = i2 + // sign extend sign bit into top 2 MSB (which were dst bits): + if tz&(1<<13) == 0 { // positive + tz = tz & 0x3fff //clear 2 MSBs: dst bits + } else { // negative + tz = tz | 0xc000 //set 2 MSBs: dst bits + //tzname[3] = '-' (TODO: verify. this works here) + } + tzint := int16(tz) + if tzint == 0 { + tt = time.Unix(tsec, int64(tnsec)).UTC() + } else { + // For Go Time, do not use a descriptive timezone. + // It's unnecessary, and makes it harder to do a reflect.DeepEqual. + // The Offset already tells what the offset should be, if not on UTC and unknown zone name. + // var zoneName = timeLocUTCName(tzint) + tt = time.Unix(tsec, int64(tnsec)).In(time.FixedZone("", int(tzint)*60)) + } + return +} + +// func timeLocUTCName(tzint int16) string { +// if tzint == 0 { +// return "UTC" +// } +// var tzname = []byte("UTC+00:00") +// //tzname := fmt.Sprintf("UTC%s%02d:%02d", tzsign, tz/60, tz%60) //perf issue using Sprintf. inline below. +// //tzhr, tzmin := tz/60, tz%60 //faster if u convert to int first +// var tzhr, tzmin int16 +// if tzint < 0 { +// tzname[3] = '-' // (TODO: verify. this works here) +// tzhr, tzmin = -tzint/60, (-tzint)%60 +// } else { +// tzhr, tzmin = tzint/60, tzint%60 +// } +// tzname[4] = timeDigits[tzhr/10] +// tzname[5] = timeDigits[tzhr%10] +// tzname[7] = timeDigits[tzmin/10] +// tzname[8] = timeDigits[tzmin%10] +// return string(tzname) +// //return time.FixedZone(string(tzname), int(tzint)*60) +// } diff --git a/vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/xml.go b/vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/xml.go new file mode 100644 index 00000000..9386775d --- /dev/null +++ b/vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/xml.go @@ -0,0 +1,426 @@ +// +build ignore + +package codec + +import "reflect" + +/* + +A strict Non-validating namespace-aware XML 1.0 parser and (en|de)coder. + +We are attempting this due to perceived issues with encoding/xml: + - Complicated. It tried to do too much, and is not as simple to use as json. + - Due to over-engineering, reflection is over-used AND performance suffers: + java is 6X faster:http://fabsk.eu/blog/category/informatique/dev/golang/ + even PYTHON performs better: http://outgoing.typepad.com/outgoing/2014/07/exploring-golang.html + +codec framework will offer the following benefits + - VASTLY improved performance (when using reflection-mode or codecgen) + - simplicity and consistency: with the rest of the supported formats + - all other benefits of codec framework (streaming, codegeneration, etc) + +codec is not a drop-in replacement for encoding/xml. +It is a replacement, based on the simplicity and performance of codec. +Look at it like JAXB for Go. + +Challenges: + + - Need to output XML preamble, with all namespaces at the right location in the output. + - Each "end" block is dynamic, so we need to maintain a context-aware stack + - How to decide when to use an attribute VS an element + - How to handle chardata, attr, comment EXPLICITLY. + - Should it output fragments? + e.g. encoding a bool should just output true OR false, which is not well-formed XML. + +Extend the struct tag. See representative example: + type X struct { + ID uint8 codec:"xid|http://ugorji.net/x-namespace id,omitempty,toarray,attr,cdata" + } + +Based on this, we encode + - fields as elements, BUT encode as attributes if struct tag contains ",attr". + - text as entity-escaped text, BUT encode as CDATA if struct tag contains ",cdata". + +In this mode, we only encode as attribute if ",attr" is found, and only encode as CDATA +if ",cdata" is found in the struct tag. + +To handle namespaces: + - XMLHandle is denoted as being namespace-aware. + Consequently, we WILL use the ns:name pair to encode and decode if defined, else use the plain name. + - *Encoder and *Decoder know whether the Handle "prefers" namespaces. + - add *Encoder.getEncName(*structFieldInfo). + No one calls *structFieldInfo.indexForEncName directly anymore + - add *Decoder.getStructFieldInfo(encName string) // encName here is either like abc, or h1:nsabc + No one accesses .encName anymore except in + - let encode.go and decode.go use these (for consistency) + - only problem exists for gen.go, where we create a big switch on encName. + Now, we also have to add a switch on strings.endsWith(kName, encNsName) + - gen.go will need to have many more methods, and then double-on the 2 switch loops like: + switch k { + case "abc" : x.abc() + case "def" : x.def() + default { + switch { + case !nsAware: panic(...) + case strings.endsWith("nsabc"): x.abc() + default: panic(...) + } + } + } + +The structure below accomodates this: + + type typeInfo struct { + sfi []*structFieldInfo // sorted by encName + sfins // sorted by namespace + sfia // sorted, to have those with attributes at the top. Needed to write XML appropriately. + sfip // unsorted + } + type structFieldInfo struct { + encName + nsEncName + ns string + attr bool + cdata bool + } + +indexForEncName is now an internal helper function that takes a sorted array +(one of ti.sfins or ti.sfi). It is only used by *Encoder.getStructFieldInfo(...) + +There will be a separate parser from the builder. +The parser will have a method: next() xmlToken method. + +xmlToken has fields: + - type uint8: 0 | ElementStart | ElementEnd | AttrKey | AttrVal | Text + - value string + - ns string + +SEE: http://www.xml.com/pub/a/98/10/guide0.html?page=3#ENTDECL + +The following are skipped when parsing: + - External Entities (from external file) + - Notation Declaration e.g. + - Entity Declarations & References + - XML Declaration (assume UTF-8) + - XML Directive i.e. + - Other Declarations: Notation, etc. + - Comment + - Processing Instruction + - schema / DTD for validation: + We are not a VALIDATING parser. Validation is done elsewhere. + However, some parts of the DTD internal subset are used (SEE BELOW). + For Attribute List Declarations e.g. + + We considered using the ATTLIST to get "default" value, but not to validate the contents. (VETOED) + +The following XML features are supported + - Namespace + - Element + - Attribute + - cdata + - Unicode escape + +The following DTD (when as an internal sub-set) features are supported: + - Internal Entities e.g. + AND entities for the set: [<>&"'] + - Parameter entities e.g. + + +At decode time, a structure containing the following is kept + - namespace mapping + - default attribute values + - all internal entities (<>&"' and others written in the document) + +When decode starts, it parses XML namespace declarations and creates a map in the +xmlDecDriver. While parsing, that map continously gets updated. +The only problem happens when a namespace declaration happens on the node that it defines. +e.g. +To handle this, each Element must be fully parsed at a time, +even if it amounts to multiple tokens which are returned one at a time on request. + +xmlns is a special attribute name. + - It is used to define namespaces, including the default + - It is never returned as an AttrKey or AttrVal. + *We may decide later to allow user to use it e.g. you want to parse the xmlns mappings into a field.* + +Number, bool, null, mapKey, etc can all be decoded from any xmlToken. +This accomodates map[int]string for example. + +It should be possible to create a schema from the types, +or vice versa (generate types from schema with appropriate tags). +This is however out-of-scope from this parsing project. + +We should write all namespace information at the first point that it is referenced in the tree, +and use the mapping for all child nodes and attributes. This means that state is maintained +at a point in the tree. This also means that calls to Decode or MustDecode will reset some state. + +When decoding, it is important to keep track of entity references and default attribute values. +It seems these can only be stored in the DTD components. We should honor them when decoding. + +Configuration for XMLHandle will look like this: + + XMLHandle + DefaultNS string + // Encoding: + NS map[string]string // ns URI to key, used for encoding + // Decoding: in case ENTITY declared in external schema or dtd, store info needed here + Entities map[string]string // map of entity rep to character + + +During encode, if a namespace mapping is not defined for a namespace found on a struct, +then we create a mapping for it using nsN (where N is 1..1000000, and doesn't conflict +with any other namespace mapping). + +Note that different fields in a struct can have different namespaces. +However, all fields will default to the namespace on the _struct field (if defined). + +An XML document is a name, a map of attributes and a list of children. +Consequently, we cannot "DecodeNaked" into a map[string]interface{} (for example). +We have to "DecodeNaked" into something that resembles XML data. + +To support DecodeNaked (decode into nil interface{}) we have to define some "supporting" types: + type Name struct { // Prefered. Less allocations due to conversions. + Local string + Space string + } + type Element struct { + Name Name + Attrs map[Name]string + Children []interface{} // each child is either *Element or string + } +Only two "supporting" types are exposed for XML: Name and Element. + +We considered 'type Name string' where Name is like "Space Local" (space-separated). +We decided against it, because each creation of a name would lead to +double allocation (first convert []byte to string, then concatenate them into a string). +The benefit is that it is faster to read Attrs from a map. But given that Element is a value +object, we want to eschew methods and have public exposed variables. + +We also considered the following, where xml types were not value objects, and we used +intelligent accessor methods to extract information and for performance. +*** WE DECIDED AGAINST THIS. *** + type Attr struct { + Name Name + Value string + } + // Element is a ValueObject: There are no accessor methods. + // Make element self-contained. + type Element struct { + Name Name + attrsMap map[string]string // where key is "Space Local" + attrs []Attr + childrenT []string + childrenE []Element + childrenI []int // each child is a index into T or E. + } + func (x *Element) child(i) interface{} // returns string or *Element + +Per XML spec and our default handling, white space is insignificant between elements, +specifically between parent-child or siblings. White space occuring alone between start +and end element IS significant. However, if xml:space='preserve', then we 'preserve' +all whitespace. This is more critical when doing a DecodeNaked, but MAY not be as critical +when decoding into a typed value. + +**Note: there is no xml: namespace. The xml: attributes were defined before namespaces.** +**So treat them as just "directives" that should be interpreted to mean something**. + +On encoding, we don't add any prettifying markup (indenting, etc). + +A document or element can only be encoded/decoded from/to a struct. In this mode: + - struct name maps to element name (or tag-info from _struct field) + - fields are mapped to child elements or attributes + +A map is either encoded as attributes on current element, or as a set of child elements. +Maps are encoded as attributes iff their keys and values are primitives (number, bool, string). + +A list is encoded as a set of child elements. + +Primitives (number, bool, string) are encoded as an element, attribute or text +depending on the context. + +Extensions must encode themselves as a text string. + +Encoding is tough, specifically when encoding mappings, because we need to encode +as either attribute or element. To do this, we need to default to encoding as attributes, +and then let Encoder inform the Handle when to start encoding as nodes. +i.e. Encoder does something like: + + h.EncodeMapStart() + h.Encode(), h.Encode(), ... + h.EncodeMapNotAttrSignal() // this is not a bool, because it's a signal + h.Encode(), h.Encode(), ... + h.EncodeEnd() + +Only XMLHandle understands this, and will set itself to start encoding as elements. + +This support extends to maps. For example, if a struct field is a map, and it has +the struct tag signifying it should be attr, then all its fields are encoded as attributes. +e.g. + + type X struct { + M map[string]int `codec:"m,attr"` // encode as attributes + } + +Question: + - if encoding a map, what if map keys have spaces in them??? + Then they cannot be attributes or child elements. Error. + +Misc: + + - For attribute values, normalize by trimming beginning and ending white space, + and converting every white space sequence to a single space. + - ATTLIST restrictions are enforced. + e.g. default value of xml:space, skipping xml:XYZ style attributes, etc. + - Consider supporting NON-STRICT mode (e.g. to handle HTML parsing). + Some elements e.g. br, hr, etc need not close and should be auto-closed + ... (see http://www.w3.org/TR/html4/loose.dtd) + An expansive set of entities are pre-defined. + - Have easy way to create a HTML parser: + add a HTML() method to XMLHandle, that will set Strict=false, specify AutoClose, + and add HTML Entities to the list. + - Support validating element/attribute XMLName before writing it. + Keep this behind a flag, which is set to false by default (for performance). + type XMLHandle struct { + CheckName bool + } + +ROADMAP (1 weeks): + - build encoder (1 day) + - build decoder (based off xmlParser) (1 day) + - implement xmlParser (2 days). + Look at encoding/xml for inspiration. + - integrate and TEST (1 days) + - write article and post it (1 day) + + +*/ + +// ----------- PARSER ------------------- + +type xmlTokenType uint8 + +const ( + _ xmlTokenType = iota << 1 + xmlTokenElemStart + xmlTokenElemEnd + xmlTokenAttrKey + xmlTokenAttrVal + xmlTokenText +) + +type xmlToken struct { + Type xmlTokenType + Value string + Namespace string // blank for AttrVal and Text +} + +type xmlParser struct { + r decReader + toks []xmlToken // list of tokens. + ptr int // ptr into the toks slice + done bool // nothing else to parse. r now returns EOF. +} + +func (x *xmlParser) next() (t *xmlToken) { + // once x.done, or x.ptr == len(x.toks) == 0, then return nil (to signify finish) + if !x.done && len(x.toks) == 0 { + x.nextTag() + } + // parses one element at a time (into possible many tokens) + if x.ptr < len(x.toks) { + t = &(x.toks[x.ptr]) + x.ptr++ + if x.ptr == len(x.toks) { + x.ptr = 0 + x.toks = x.toks[:0] + } + } + return +} + +// nextTag will parses the next element and fill up toks. +// It set done flag if/once EOF is reached. +func (x *xmlParser) nextTag() { + // TODO: implement. +} + +// ----------- ENCODER ------------------- + +type xmlEncDriver struct { + e *Encoder + w encWriter + h *XMLHandle + b [64]byte // scratch + bs []byte // scratch + // s jsonStack + noBuiltInTypes +} + +// ----------- DECODER ------------------- + +type xmlDecDriver struct { + d *Decoder + h *XMLHandle + r decReader // *bytesDecReader decReader + ct valueType // container type. one of unset, array or map. + bstr [8]byte // scratch used for string \UXXX parsing + b [64]byte // scratch + + // wsSkipped bool // whitespace skipped + + // s jsonStack + + noBuiltInTypes +} + +// DecodeNaked will decode into an XMLNode + +// XMLName is a value object representing a namespace-aware NAME +type XMLName struct { + Local string + Space string +} + +// XMLNode represents a "union" of the different types of XML Nodes. +// Only one of fields (Text or *Element) is set. +type XMLNode struct { + Element *Element + Text string +} + +// XMLElement is a value object representing an fully-parsed XML element. +type XMLElement struct { + Name Name + Attrs map[XMLName]string + // Children is a list of child nodes, each being a *XMLElement or string + Children []XMLNode +} + +// ----------- HANDLE ------------------- + +type XMLHandle struct { + BasicHandle + textEncodingType + + DefaultNS string + NS map[string]string // ns URI to key, for encoding + Entities map[string]string // entity representation to string, for encoding. +} + +func (h *XMLHandle) newEncDriver(e *Encoder) encDriver { + return &xmlEncDriver{e: e, w: e.w, h: h} +} + +func (h *XMLHandle) newDecDriver(d *Decoder) decDriver { + // d := xmlDecDriver{r: r.(*bytesDecReader), h: h} + hd := xmlDecDriver{d: d, r: d.r, h: h} + hd.n.bytes = d.b[:] + return &hd +} + +func (h *XMLHandle) SetInterfaceExt(rt reflect.Type, tag uint64, ext InterfaceExt) (err error) { + return h.SetExt(rt, tag, &setExtWrapper{i: ext}) +} + +var _ decDriver = (*xmlDecDriver)(nil) +var _ encDriver = (*xmlEncDriver)(nil) diff --git a/vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/z.go b/vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/z.go new file mode 100644 index 00000000..b6ac0769 --- /dev/null +++ b/vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/z.go @@ -0,0 +1,23 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +import "sort" + +// TODO: this is brittle, as it depends on z.go's init() being called last. +// The current build tools all honor that files are passed in lexical order. +// However, we should consider using an init_channel, +// that each person doing init will write to. + +func init() { + if !useLookupRecognizedTypes { + return + } + sort.Sort(uintptrSlice(recognizedRtids)) + sort.Sort(uintptrSlice(recognizedRtidPtrs)) + recognizedRtidOrPtrs = make([]uintptr, len(recognizedRtids)+len(recognizedRtidPtrs)) + copy(recognizedRtidOrPtrs, recognizedRtids) + copy(recognizedRtidOrPtrs[len(recognizedRtids):], recognizedRtidPtrs) + sort.Sort(uintptrSlice(recognizedRtidOrPtrs)) +} diff --git a/vendor/github.com/coreos/etcd/version/LICENSE b/vendor/github.com/coreos/etcd/version/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/vendor/github.com/coreos/etcd/version/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/coreos/etcd/version/version.go b/vendor/github.com/coreos/etcd/version/version.go new file mode 100644 index 00000000..c55a8357 --- /dev/null +++ b/vendor/github.com/coreos/etcd/version/version.go @@ -0,0 +1,56 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package version implements etcd version parsing and contains latest version +// information. +package version + +import ( + "fmt" + "strings" + + "github.com/coreos/go-semver/semver" +) + +var ( + // MinClusterVersion is the min cluster version this etcd binary is compatible with. + MinClusterVersion = "3.0.0" + Version = "3.3.0+git" + APIVersion = "unknown" + + // Git SHA Value will be set during build + GitSHA = "Not provided (use ./build instead of go build)" +) + +func init() { + ver, err := semver.NewVersion(Version) + if err == nil { + APIVersion = fmt.Sprintf("%d.%d", ver.Major, ver.Minor) + } +} + +type Versions struct { + Server string `json:"etcdserver"` + Cluster string `json:"etcdcluster"` + // TODO: raft state machine version +} + +// Cluster only keeps the major.minor. +func Cluster(v string) string { + vs := strings.Split(v, ".") + if len(vs) <= 2 { + return v + } + return fmt.Sprintf("%s.%s", vs[0], vs[1]) +} diff --git a/vendor/github.com/fsnotify/fsnotify/LICENSE b/vendor/github.com/fsnotify/fsnotify/LICENSE new file mode 100644 index 00000000..f21e5408 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. +Copyright (c) 2012 fsnotify Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/fsnotify/fsnotify/fen.go b/vendor/github.com/fsnotify/fsnotify/fen.go new file mode 100644 index 00000000..ced39cb8 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/fen.go @@ -0,0 +1,37 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build solaris + +package fsnotify + +import ( + "errors" +) + +// Watcher watches a set of files, delivering events to a channel. +type Watcher struct { + Events chan Event + Errors chan error +} + +// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. +func NewWatcher() (*Watcher, error) { + return nil, errors.New("FEN based watcher not yet supported for fsnotify\n") +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + return nil +} + +// Add starts watching the named file or directory (non-recursively). +func (w *Watcher) Add(name string) error { + return nil +} + +// Remove stops watching the the named file or directory (non-recursively). +func (w *Watcher) Remove(name string) error { + return nil +} diff --git a/vendor/github.com/fsnotify/fsnotify/fsnotify.go b/vendor/github.com/fsnotify/fsnotify/fsnotify.go new file mode 100644 index 00000000..190bf0de --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/fsnotify.go @@ -0,0 +1,66 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !plan9 + +// Package fsnotify provides a platform-independent interface for file system notifications. +package fsnotify + +import ( + "bytes" + "errors" + "fmt" +) + +// Event represents a single file system notification. +type Event struct { + Name string // Relative path to the file or directory. + Op Op // File operation that triggered the event. +} + +// Op describes a set of file operations. +type Op uint32 + +// These are the generalized file operations that can trigger a notification. +const ( + Create Op = 1 << iota + Write + Remove + Rename + Chmod +) + +func (op Op) String() string { + // Use a buffer for efficient string concatenation + var buffer bytes.Buffer + + if op&Create == Create { + buffer.WriteString("|CREATE") + } + if op&Remove == Remove { + buffer.WriteString("|REMOVE") + } + if op&Write == Write { + buffer.WriteString("|WRITE") + } + if op&Rename == Rename { + buffer.WriteString("|RENAME") + } + if op&Chmod == Chmod { + buffer.WriteString("|CHMOD") + } + if buffer.Len() == 0 { + return "" + } + return buffer.String()[1:] // Strip leading pipe +} + +// String returns a string representation of the event in the form +// "file: REMOVE|WRITE|..." +func (e Event) String() string { + return fmt.Sprintf("%q: %s", e.Name, e.Op.String()) +} + +// Common errors that can be reported by a watcher +var ErrEventOverflow = errors.New("fsnotify queue overflow") diff --git a/vendor/github.com/fsnotify/fsnotify/inotify.go b/vendor/github.com/fsnotify/fsnotify/inotify.go new file mode 100644 index 00000000..d9fd1b88 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/inotify.go @@ -0,0 +1,337 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux + +package fsnotify + +import ( + "errors" + "fmt" + "io" + "os" + "path/filepath" + "strings" + "sync" + "unsafe" + + "golang.org/x/sys/unix" +) + +// Watcher watches a set of files, delivering events to a channel. +type Watcher struct { + Events chan Event + Errors chan error + mu sync.Mutex // Map access + fd int + poller *fdPoller + watches map[string]*watch // Map of inotify watches (key: path) + paths map[int]string // Map of watched paths (key: watch descriptor) + done chan struct{} // Channel for sending a "quit message" to the reader goroutine + doneResp chan struct{} // Channel to respond to Close +} + +// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. +func NewWatcher() (*Watcher, error) { + // Create inotify fd + fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC) + if fd == -1 { + return nil, errno + } + // Create epoll + poller, err := newFdPoller(fd) + if err != nil { + unix.Close(fd) + return nil, err + } + w := &Watcher{ + fd: fd, + poller: poller, + watches: make(map[string]*watch), + paths: make(map[int]string), + Events: make(chan Event), + Errors: make(chan error), + done: make(chan struct{}), + doneResp: make(chan struct{}), + } + + go w.readEvents() + return w, nil +} + +func (w *Watcher) isClosed() bool { + select { + case <-w.done: + return true + default: + return false + } +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + if w.isClosed() { + return nil + } + + // Send 'close' signal to goroutine, and set the Watcher to closed. + close(w.done) + + // Wake up goroutine + w.poller.wake() + + // Wait for goroutine to close + <-w.doneResp + + return nil +} + +// Add starts watching the named file or directory (non-recursively). +func (w *Watcher) Add(name string) error { + name = filepath.Clean(name) + if w.isClosed() { + return errors.New("inotify instance already closed") + } + + const agnosticEvents = unix.IN_MOVED_TO | unix.IN_MOVED_FROM | + unix.IN_CREATE | unix.IN_ATTRIB | unix.IN_MODIFY | + unix.IN_MOVE_SELF | unix.IN_DELETE | unix.IN_DELETE_SELF + + var flags uint32 = agnosticEvents + + w.mu.Lock() + defer w.mu.Unlock() + watchEntry := w.watches[name] + if watchEntry != nil { + flags |= watchEntry.flags | unix.IN_MASK_ADD + } + wd, errno := unix.InotifyAddWatch(w.fd, name, flags) + if wd == -1 { + return errno + } + + if watchEntry == nil { + w.watches[name] = &watch{wd: uint32(wd), flags: flags} + w.paths[wd] = name + } else { + watchEntry.wd = uint32(wd) + watchEntry.flags = flags + } + + return nil +} + +// Remove stops watching the named file or directory (non-recursively). +func (w *Watcher) Remove(name string) error { + name = filepath.Clean(name) + + // Fetch the watch. + w.mu.Lock() + defer w.mu.Unlock() + watch, ok := w.watches[name] + + // Remove it from inotify. + if !ok { + return fmt.Errorf("can't remove non-existent inotify watch for: %s", name) + } + + // We successfully removed the watch if InotifyRmWatch doesn't return an + // error, we need to clean up our internal state to ensure it matches + // inotify's kernel state. + delete(w.paths, int(watch.wd)) + delete(w.watches, name) + + // inotify_rm_watch will return EINVAL if the file has been deleted; + // the inotify will already have been removed. + // watches and pathes are deleted in ignoreLinux() implicitly and asynchronously + // by calling inotify_rm_watch() below. e.g. readEvents() goroutine receives IN_IGNORE + // so that EINVAL means that the wd is being rm_watch()ed or its file removed + // by another thread and we have not received IN_IGNORE event. + success, errno := unix.InotifyRmWatch(w.fd, watch.wd) + if success == -1 { + // TODO: Perhaps it's not helpful to return an error here in every case. + // the only two possible errors are: + // EBADF, which happens when w.fd is not a valid file descriptor of any kind. + // EINVAL, which is when fd is not an inotify descriptor or wd is not a valid watch descriptor. + // Watch descriptors are invalidated when they are removed explicitly or implicitly; + // explicitly by inotify_rm_watch, implicitly when the file they are watching is deleted. + return errno + } + + return nil +} + +type watch struct { + wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall) + flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags) +} + +// readEvents reads from the inotify file descriptor, converts the +// received events into Event objects and sends them via the Events channel +func (w *Watcher) readEvents() { + var ( + buf [unix.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events + n int // Number of bytes read with read() + errno error // Syscall errno + ok bool // For poller.wait + ) + + defer close(w.doneResp) + defer close(w.Errors) + defer close(w.Events) + defer unix.Close(w.fd) + defer w.poller.close() + + for { + // See if we have been closed. + if w.isClosed() { + return + } + + ok, errno = w.poller.wait() + if errno != nil { + select { + case w.Errors <- errno: + case <-w.done: + return + } + continue + } + + if !ok { + continue + } + + n, errno = unix.Read(w.fd, buf[:]) + // If a signal interrupted execution, see if we've been asked to close, and try again. + // http://man7.org/linux/man-pages/man7/signal.7.html : + // "Before Linux 3.8, reads from an inotify(7) file descriptor were not restartable" + if errno == unix.EINTR { + continue + } + + // unix.Read might have been woken up by Close. If so, we're done. + if w.isClosed() { + return + } + + if n < unix.SizeofInotifyEvent { + var err error + if n == 0 { + // If EOF is received. This should really never happen. + err = io.EOF + } else if n < 0 { + // If an error occurred while reading. + err = errno + } else { + // Read was too short. + err = errors.New("notify: short read in readEvents()") + } + select { + case w.Errors <- err: + case <-w.done: + return + } + continue + } + + var offset uint32 + // We don't know how many events we just read into the buffer + // While the offset points to at least one whole event... + for offset <= uint32(n-unix.SizeofInotifyEvent) { + // Point "raw" to the event in the buffer + raw := (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset])) + + mask := uint32(raw.Mask) + nameLen := uint32(raw.Len) + + if mask&unix.IN_Q_OVERFLOW != 0 { + select { + case w.Errors <- ErrEventOverflow: + case <-w.done: + return + } + } + + // If the event happened to the watched directory or the watched file, the kernel + // doesn't append the filename to the event, but we would like to always fill the + // the "Name" field with a valid filename. We retrieve the path of the watch from + // the "paths" map. + w.mu.Lock() + name, ok := w.paths[int(raw.Wd)] + // IN_DELETE_SELF occurs when the file/directory being watched is removed. + // This is a sign to clean up the maps, otherwise we are no longer in sync + // with the inotify kernel state which has already deleted the watch + // automatically. + if ok && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF { + delete(w.paths, int(raw.Wd)) + delete(w.watches, name) + } + w.mu.Unlock() + + if nameLen > 0 { + // Point "bytes" at the first byte of the filename + bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent])) + // The filename is padded with NULL bytes. TrimRight() gets rid of those. + name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000") + } + + event := newEvent(name, mask) + + // Send the events that are not ignored on the events channel + if !event.ignoreLinux(mask) { + select { + case w.Events <- event: + case <-w.done: + return + } + } + + // Move to the next event in the buffer + offset += unix.SizeofInotifyEvent + nameLen + } + } +} + +// Certain types of events can be "ignored" and not sent over the Events +// channel. Such as events marked ignore by the kernel, or MODIFY events +// against files that do not exist. +func (e *Event) ignoreLinux(mask uint32) bool { + // Ignore anything the inotify API says to ignore + if mask&unix.IN_IGNORED == unix.IN_IGNORED { + return true + } + + // If the event is not a DELETE or RENAME, the file must exist. + // Otherwise the event is ignored. + // *Note*: this was put in place because it was seen that a MODIFY + // event was sent after the DELETE. This ignores that MODIFY and + // assumes a DELETE will come or has come if the file doesn't exist. + if !(e.Op&Remove == Remove || e.Op&Rename == Rename) { + _, statErr := os.Lstat(e.Name) + return os.IsNotExist(statErr) + } + return false +} + +// newEvent returns an platform-independent Event based on an inotify mask. +func newEvent(name string, mask uint32) Event { + e := Event{Name: name} + if mask&unix.IN_CREATE == unix.IN_CREATE || mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO { + e.Op |= Create + } + if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF || mask&unix.IN_DELETE == unix.IN_DELETE { + e.Op |= Remove + } + if mask&unix.IN_MODIFY == unix.IN_MODIFY { + e.Op |= Write + } + if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF || mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM { + e.Op |= Rename + } + if mask&unix.IN_ATTRIB == unix.IN_ATTRIB { + e.Op |= Chmod + } + return e +} diff --git a/vendor/github.com/fsnotify/fsnotify/inotify_poller.go b/vendor/github.com/fsnotify/fsnotify/inotify_poller.go new file mode 100644 index 00000000..cc7db4b2 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/inotify_poller.go @@ -0,0 +1,187 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux + +package fsnotify + +import ( + "errors" + + "golang.org/x/sys/unix" +) + +type fdPoller struct { + fd int // File descriptor (as returned by the inotify_init() syscall) + epfd int // Epoll file descriptor + pipe [2]int // Pipe for waking up +} + +func emptyPoller(fd int) *fdPoller { + poller := new(fdPoller) + poller.fd = fd + poller.epfd = -1 + poller.pipe[0] = -1 + poller.pipe[1] = -1 + return poller +} + +// Create a new inotify poller. +// This creates an inotify handler, and an epoll handler. +func newFdPoller(fd int) (*fdPoller, error) { + var errno error + poller := emptyPoller(fd) + defer func() { + if errno != nil { + poller.close() + } + }() + poller.fd = fd + + // Create epoll fd + poller.epfd, errno = unix.EpollCreate1(0) + if poller.epfd == -1 { + return nil, errno + } + // Create pipe; pipe[0] is the read end, pipe[1] the write end. + errno = unix.Pipe2(poller.pipe[:], unix.O_NONBLOCK) + if errno != nil { + return nil, errno + } + + // Register inotify fd with epoll + event := unix.EpollEvent{ + Fd: int32(poller.fd), + Events: unix.EPOLLIN, + } + errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.fd, &event) + if errno != nil { + return nil, errno + } + + // Register pipe fd with epoll + event = unix.EpollEvent{ + Fd: int32(poller.pipe[0]), + Events: unix.EPOLLIN, + } + errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.pipe[0], &event) + if errno != nil { + return nil, errno + } + + return poller, nil +} + +// Wait using epoll. +// Returns true if something is ready to be read, +// false if there is not. +func (poller *fdPoller) wait() (bool, error) { + // 3 possible events per fd, and 2 fds, makes a maximum of 6 events. + // I don't know whether epoll_wait returns the number of events returned, + // or the total number of events ready. + // I decided to catch both by making the buffer one larger than the maximum. + events := make([]unix.EpollEvent, 7) + for { + n, errno := unix.EpollWait(poller.epfd, events, -1) + if n == -1 { + if errno == unix.EINTR { + continue + } + return false, errno + } + if n == 0 { + // If there are no events, try again. + continue + } + if n > 6 { + // This should never happen. More events were returned than should be possible. + return false, errors.New("epoll_wait returned more events than I know what to do with") + } + ready := events[:n] + epollhup := false + epollerr := false + epollin := false + for _, event := range ready { + if event.Fd == int32(poller.fd) { + if event.Events&unix.EPOLLHUP != 0 { + // This should not happen, but if it does, treat it as a wakeup. + epollhup = true + } + if event.Events&unix.EPOLLERR != 0 { + // If an error is waiting on the file descriptor, we should pretend + // something is ready to read, and let unix.Read pick up the error. + epollerr = true + } + if event.Events&unix.EPOLLIN != 0 { + // There is data to read. + epollin = true + } + } + if event.Fd == int32(poller.pipe[0]) { + if event.Events&unix.EPOLLHUP != 0 { + // Write pipe descriptor was closed, by us. This means we're closing down the + // watcher, and we should wake up. + } + if event.Events&unix.EPOLLERR != 0 { + // If an error is waiting on the pipe file descriptor. + // This is an absolute mystery, and should never ever happen. + return false, errors.New("Error on the pipe descriptor.") + } + if event.Events&unix.EPOLLIN != 0 { + // This is a regular wakeup, so we have to clear the buffer. + err := poller.clearWake() + if err != nil { + return false, err + } + } + } + } + + if epollhup || epollerr || epollin { + return true, nil + } + return false, nil + } +} + +// Close the write end of the poller. +func (poller *fdPoller) wake() error { + buf := make([]byte, 1) + n, errno := unix.Write(poller.pipe[1], buf) + if n == -1 { + if errno == unix.EAGAIN { + // Buffer is full, poller will wake. + return nil + } + return errno + } + return nil +} + +func (poller *fdPoller) clearWake() error { + // You have to be woken up a LOT in order to get to 100! + buf := make([]byte, 100) + n, errno := unix.Read(poller.pipe[0], buf) + if n == -1 { + if errno == unix.EAGAIN { + // Buffer is empty, someone else cleared our wake. + return nil + } + return errno + } + return nil +} + +// Close all poller file descriptors, but not the one passed to it. +func (poller *fdPoller) close() { + if poller.pipe[1] != -1 { + unix.Close(poller.pipe[1]) + } + if poller.pipe[0] != -1 { + unix.Close(poller.pipe[0]) + } + if poller.epfd != -1 { + unix.Close(poller.epfd) + } +} diff --git a/vendor/github.com/fsnotify/fsnotify/kqueue.go b/vendor/github.com/fsnotify/fsnotify/kqueue.go new file mode 100644 index 00000000..86e76a3d --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/kqueue.go @@ -0,0 +1,521 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build freebsd openbsd netbsd dragonfly darwin + +package fsnotify + +import ( + "errors" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "sync" + "time" + + "golang.org/x/sys/unix" +) + +// Watcher watches a set of files, delivering events to a channel. +type Watcher struct { + Events chan Event + Errors chan error + done chan struct{} // Channel for sending a "quit message" to the reader goroutine + + kq int // File descriptor (as returned by the kqueue() syscall). + + mu sync.Mutex // Protects access to watcher data + watches map[string]int // Map of watched file descriptors (key: path). + externalWatches map[string]bool // Map of watches added by user of the library. + dirFlags map[string]uint32 // Map of watched directories to fflags used in kqueue. + paths map[int]pathInfo // Map file descriptors to path names for processing kqueue events. + fileExists map[string]bool // Keep track of if we know this file exists (to stop duplicate create events). + isClosed bool // Set to true when Close() is first called +} + +type pathInfo struct { + name string + isDir bool +} + +// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. +func NewWatcher() (*Watcher, error) { + kq, err := kqueue() + if err != nil { + return nil, err + } + + w := &Watcher{ + kq: kq, + watches: make(map[string]int), + dirFlags: make(map[string]uint32), + paths: make(map[int]pathInfo), + fileExists: make(map[string]bool), + externalWatches: make(map[string]bool), + Events: make(chan Event), + Errors: make(chan error), + done: make(chan struct{}), + } + + go w.readEvents() + return w, nil +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + w.mu.Lock() + if w.isClosed { + w.mu.Unlock() + return nil + } + w.isClosed = true + + // copy paths to remove while locked + var pathsToRemove = make([]string, 0, len(w.watches)) + for name := range w.watches { + pathsToRemove = append(pathsToRemove, name) + } + w.mu.Unlock() + // unlock before calling Remove, which also locks + + for _, name := range pathsToRemove { + w.Remove(name) + } + + // send a "quit" message to the reader goroutine + close(w.done) + + return nil +} + +// Add starts watching the named file or directory (non-recursively). +func (w *Watcher) Add(name string) error { + w.mu.Lock() + w.externalWatches[name] = true + w.mu.Unlock() + _, err := w.addWatch(name, noteAllEvents) + return err +} + +// Remove stops watching the the named file or directory (non-recursively). +func (w *Watcher) Remove(name string) error { + name = filepath.Clean(name) + w.mu.Lock() + watchfd, ok := w.watches[name] + w.mu.Unlock() + if !ok { + return fmt.Errorf("can't remove non-existent kevent watch for: %s", name) + } + + const registerRemove = unix.EV_DELETE + if err := register(w.kq, []int{watchfd}, registerRemove, 0); err != nil { + return err + } + + unix.Close(watchfd) + + w.mu.Lock() + isDir := w.paths[watchfd].isDir + delete(w.watches, name) + delete(w.paths, watchfd) + delete(w.dirFlags, name) + w.mu.Unlock() + + // Find all watched paths that are in this directory that are not external. + if isDir { + var pathsToRemove []string + w.mu.Lock() + for _, path := range w.paths { + wdir, _ := filepath.Split(path.name) + if filepath.Clean(wdir) == name { + if !w.externalWatches[path.name] { + pathsToRemove = append(pathsToRemove, path.name) + } + } + } + w.mu.Unlock() + for _, name := range pathsToRemove { + // Since these are internal, not much sense in propagating error + // to the user, as that will just confuse them with an error about + // a path they did not explicitly watch themselves. + w.Remove(name) + } + } + + return nil +} + +// Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE) +const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | unix.NOTE_RENAME + +// keventWaitTime to block on each read from kevent +var keventWaitTime = durationToTimespec(100 * time.Millisecond) + +// addWatch adds name to the watched file set. +// The flags are interpreted as described in kevent(2). +// Returns the real path to the file which was added, if any, which may be different from the one passed in the case of symlinks. +func (w *Watcher) addWatch(name string, flags uint32) (string, error) { + var isDir bool + // Make ./name and name equivalent + name = filepath.Clean(name) + + w.mu.Lock() + if w.isClosed { + w.mu.Unlock() + return "", errors.New("kevent instance already closed") + } + watchfd, alreadyWatching := w.watches[name] + // We already have a watch, but we can still override flags. + if alreadyWatching { + isDir = w.paths[watchfd].isDir + } + w.mu.Unlock() + + if !alreadyWatching { + fi, err := os.Lstat(name) + if err != nil { + return "", err + } + + // Don't watch sockets. + if fi.Mode()&os.ModeSocket == os.ModeSocket { + return "", nil + } + + // Don't watch named pipes. + if fi.Mode()&os.ModeNamedPipe == os.ModeNamedPipe { + return "", nil + } + + // Follow Symlinks + // Unfortunately, Linux can add bogus symlinks to watch list without + // issue, and Windows can't do symlinks period (AFAIK). To maintain + // consistency, we will act like everything is fine. There will simply + // be no file events for broken symlinks. + // Hence the returns of nil on errors. + if fi.Mode()&os.ModeSymlink == os.ModeSymlink { + name, err = filepath.EvalSymlinks(name) + if err != nil { + return "", nil + } + + w.mu.Lock() + _, alreadyWatching = w.watches[name] + w.mu.Unlock() + + if alreadyWatching { + return name, nil + } + + fi, err = os.Lstat(name) + if err != nil { + return "", nil + } + } + + watchfd, err = unix.Open(name, openMode, 0700) + if watchfd == -1 { + return "", err + } + + isDir = fi.IsDir() + } + + const registerAdd = unix.EV_ADD | unix.EV_CLEAR | unix.EV_ENABLE + if err := register(w.kq, []int{watchfd}, registerAdd, flags); err != nil { + unix.Close(watchfd) + return "", err + } + + if !alreadyWatching { + w.mu.Lock() + w.watches[name] = watchfd + w.paths[watchfd] = pathInfo{name: name, isDir: isDir} + w.mu.Unlock() + } + + if isDir { + // Watch the directory if it has not been watched before, + // or if it was watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles) + w.mu.Lock() + + watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE && + (!alreadyWatching || (w.dirFlags[name]&unix.NOTE_WRITE) != unix.NOTE_WRITE) + // Store flags so this watch can be updated later + w.dirFlags[name] = flags + w.mu.Unlock() + + if watchDir { + if err := w.watchDirectoryFiles(name); err != nil { + return "", err + } + } + } + return name, nil +} + +// readEvents reads from kqueue and converts the received kevents into +// Event values that it sends down the Events channel. +func (w *Watcher) readEvents() { + eventBuffer := make([]unix.Kevent_t, 10) + +loop: + for { + // See if there is a message on the "done" channel + select { + case <-w.done: + break loop + default: + } + + // Get new events + kevents, err := read(w.kq, eventBuffer, &keventWaitTime) + // EINTR is okay, the syscall was interrupted before timeout expired. + if err != nil && err != unix.EINTR { + select { + case w.Errors <- err: + case <-w.done: + break loop + } + continue + } + + // Flush the events we received to the Events channel + for len(kevents) > 0 { + kevent := &kevents[0] + watchfd := int(kevent.Ident) + mask := uint32(kevent.Fflags) + w.mu.Lock() + path := w.paths[watchfd] + w.mu.Unlock() + event := newEvent(path.name, mask) + + if path.isDir && !(event.Op&Remove == Remove) { + // Double check to make sure the directory exists. This can happen when + // we do a rm -fr on a recursively watched folders and we receive a + // modification event first but the folder has been deleted and later + // receive the delete event + if _, err := os.Lstat(event.Name); os.IsNotExist(err) { + // mark is as delete event + event.Op |= Remove + } + } + + if event.Op&Rename == Rename || event.Op&Remove == Remove { + w.Remove(event.Name) + w.mu.Lock() + delete(w.fileExists, event.Name) + w.mu.Unlock() + } + + if path.isDir && event.Op&Write == Write && !(event.Op&Remove == Remove) { + w.sendDirectoryChangeEvents(event.Name) + } else { + // Send the event on the Events channel. + select { + case w.Events <- event: + case <-w.done: + break loop + } + } + + if event.Op&Remove == Remove { + // Look for a file that may have overwritten this. + // For example, mv f1 f2 will delete f2, then create f2. + if path.isDir { + fileDir := filepath.Clean(event.Name) + w.mu.Lock() + _, found := w.watches[fileDir] + w.mu.Unlock() + if found { + // make sure the directory exists before we watch for changes. When we + // do a recursive watch and perform rm -fr, the parent directory might + // have gone missing, ignore the missing directory and let the + // upcoming delete event remove the watch from the parent directory. + if _, err := os.Lstat(fileDir); err == nil { + w.sendDirectoryChangeEvents(fileDir) + } + } + } else { + filePath := filepath.Clean(event.Name) + if fileInfo, err := os.Lstat(filePath); err == nil { + w.sendFileCreatedEventIfNew(filePath, fileInfo) + } + } + } + + // Move to next event + kevents = kevents[1:] + } + } + + // cleanup + err := unix.Close(w.kq) + if err != nil { + // only way the previous loop breaks is if w.done was closed so we need to async send to w.Errors. + select { + case w.Errors <- err: + default: + } + } + close(w.Events) + close(w.Errors) +} + +// newEvent returns an platform-independent Event based on kqueue Fflags. +func newEvent(name string, mask uint32) Event { + e := Event{Name: name} + if mask&unix.NOTE_DELETE == unix.NOTE_DELETE { + e.Op |= Remove + } + if mask&unix.NOTE_WRITE == unix.NOTE_WRITE { + e.Op |= Write + } + if mask&unix.NOTE_RENAME == unix.NOTE_RENAME { + e.Op |= Rename + } + if mask&unix.NOTE_ATTRIB == unix.NOTE_ATTRIB { + e.Op |= Chmod + } + return e +} + +func newCreateEvent(name string) Event { + return Event{Name: name, Op: Create} +} + +// watchDirectoryFiles to mimic inotify when adding a watch on a directory +func (w *Watcher) watchDirectoryFiles(dirPath string) error { + // Get all files + files, err := ioutil.ReadDir(dirPath) + if err != nil { + return err + } + + for _, fileInfo := range files { + filePath := filepath.Join(dirPath, fileInfo.Name()) + filePath, err = w.internalWatch(filePath, fileInfo) + if err != nil { + return err + } + + w.mu.Lock() + w.fileExists[filePath] = true + w.mu.Unlock() + } + + return nil +} + +// sendDirectoryEvents searches the directory for newly created files +// and sends them over the event channel. This functionality is to have +// the BSD version of fsnotify match Linux inotify which provides a +// create event for files created in a watched directory. +func (w *Watcher) sendDirectoryChangeEvents(dirPath string) { + // Get all files + files, err := ioutil.ReadDir(dirPath) + if err != nil { + select { + case w.Errors <- err: + case <-w.done: + return + } + } + + // Search for new files + for _, fileInfo := range files { + filePath := filepath.Join(dirPath, fileInfo.Name()) + err := w.sendFileCreatedEventIfNew(filePath, fileInfo) + + if err != nil { + return + } + } +} + +// sendFileCreatedEvent sends a create event if the file isn't already being tracked. +func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fileInfo os.FileInfo) (err error) { + w.mu.Lock() + _, doesExist := w.fileExists[filePath] + w.mu.Unlock() + if !doesExist { + // Send create event + select { + case w.Events <- newCreateEvent(filePath): + case <-w.done: + return + } + } + + // like watchDirectoryFiles (but without doing another ReadDir) + filePath, err = w.internalWatch(filePath, fileInfo) + if err != nil { + return err + } + + w.mu.Lock() + w.fileExists[filePath] = true + w.mu.Unlock() + + return nil +} + +func (w *Watcher) internalWatch(name string, fileInfo os.FileInfo) (string, error) { + if fileInfo.IsDir() { + // mimic Linux providing delete events for subdirectories + // but preserve the flags used if currently watching subdirectory + w.mu.Lock() + flags := w.dirFlags[name] + w.mu.Unlock() + + flags |= unix.NOTE_DELETE | unix.NOTE_RENAME + return w.addWatch(name, flags) + } + + // watch file to mimic Linux inotify + return w.addWatch(name, noteAllEvents) +} + +// kqueue creates a new kernel event queue and returns a descriptor. +func kqueue() (kq int, err error) { + kq, err = unix.Kqueue() + if kq == -1 { + return kq, err + } + return kq, nil +} + +// register events with the queue +func register(kq int, fds []int, flags int, fflags uint32) error { + changes := make([]unix.Kevent_t, len(fds)) + + for i, fd := range fds { + // SetKevent converts int to the platform-specific types: + unix.SetKevent(&changes[i], fd, unix.EVFILT_VNODE, flags) + changes[i].Fflags = fflags + } + + // register the events + success, err := unix.Kevent(kq, changes, nil, nil) + if success == -1 { + return err + } + return nil +} + +// read retrieves pending events, or waits until an event occurs. +// A timeout of nil blocks indefinitely, while 0 polls the queue. +func read(kq int, events []unix.Kevent_t, timeout *unix.Timespec) ([]unix.Kevent_t, error) { + n, err := unix.Kevent(kq, nil, events, timeout) + if err != nil { + return nil, err + } + return events[0:n], nil +} + +// durationToTimespec prepares a timeout value +func durationToTimespec(d time.Duration) unix.Timespec { + return unix.NsecToTimespec(d.Nanoseconds()) +} diff --git a/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go b/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go new file mode 100644 index 00000000..7d8de145 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go @@ -0,0 +1,11 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build freebsd openbsd netbsd dragonfly + +package fsnotify + +import "golang.org/x/sys/unix" + +const openMode = unix.O_NONBLOCK | unix.O_RDONLY diff --git a/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go b/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go new file mode 100644 index 00000000..9139e171 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go @@ -0,0 +1,12 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin + +package fsnotify + +import "golang.org/x/sys/unix" + +// note: this constant is not defined on BSD +const openMode = unix.O_EVTONLY diff --git a/vendor/github.com/fsnotify/fsnotify/windows.go b/vendor/github.com/fsnotify/fsnotify/windows.go new file mode 100644 index 00000000..09436f31 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/windows.go @@ -0,0 +1,561 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +package fsnotify + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "runtime" + "sync" + "syscall" + "unsafe" +) + +// Watcher watches a set of files, delivering events to a channel. +type Watcher struct { + Events chan Event + Errors chan error + isClosed bool // Set to true when Close() is first called + mu sync.Mutex // Map access + port syscall.Handle // Handle to completion port + watches watchMap // Map of watches (key: i-number) + input chan *input // Inputs to the reader are sent on this channel + quit chan chan<- error +} + +// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. +func NewWatcher() (*Watcher, error) { + port, e := syscall.CreateIoCompletionPort(syscall.InvalidHandle, 0, 0, 0) + if e != nil { + return nil, os.NewSyscallError("CreateIoCompletionPort", e) + } + w := &Watcher{ + port: port, + watches: make(watchMap), + input: make(chan *input, 1), + Events: make(chan Event, 50), + Errors: make(chan error), + quit: make(chan chan<- error, 1), + } + go w.readEvents() + return w, nil +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + if w.isClosed { + return nil + } + w.isClosed = true + + // Send "quit" message to the reader goroutine + ch := make(chan error) + w.quit <- ch + if err := w.wakeupReader(); err != nil { + return err + } + return <-ch +} + +// Add starts watching the named file or directory (non-recursively). +func (w *Watcher) Add(name string) error { + if w.isClosed { + return errors.New("watcher already closed") + } + in := &input{ + op: opAddWatch, + path: filepath.Clean(name), + flags: sysFSALLEVENTS, + reply: make(chan error), + } + w.input <- in + if err := w.wakeupReader(); err != nil { + return err + } + return <-in.reply +} + +// Remove stops watching the the named file or directory (non-recursively). +func (w *Watcher) Remove(name string) error { + in := &input{ + op: opRemoveWatch, + path: filepath.Clean(name), + reply: make(chan error), + } + w.input <- in + if err := w.wakeupReader(); err != nil { + return err + } + return <-in.reply +} + +const ( + // Options for AddWatch + sysFSONESHOT = 0x80000000 + sysFSONLYDIR = 0x1000000 + + // Events + sysFSACCESS = 0x1 + sysFSALLEVENTS = 0xfff + sysFSATTRIB = 0x4 + sysFSCLOSE = 0x18 + sysFSCREATE = 0x100 + sysFSDELETE = 0x200 + sysFSDELETESELF = 0x400 + sysFSMODIFY = 0x2 + sysFSMOVE = 0xc0 + sysFSMOVEDFROM = 0x40 + sysFSMOVEDTO = 0x80 + sysFSMOVESELF = 0x800 + + // Special events + sysFSIGNORED = 0x8000 + sysFSQOVERFLOW = 0x4000 +) + +func newEvent(name string, mask uint32) Event { + e := Event{Name: name} + if mask&sysFSCREATE == sysFSCREATE || mask&sysFSMOVEDTO == sysFSMOVEDTO { + e.Op |= Create + } + if mask&sysFSDELETE == sysFSDELETE || mask&sysFSDELETESELF == sysFSDELETESELF { + e.Op |= Remove + } + if mask&sysFSMODIFY == sysFSMODIFY { + e.Op |= Write + } + if mask&sysFSMOVE == sysFSMOVE || mask&sysFSMOVESELF == sysFSMOVESELF || mask&sysFSMOVEDFROM == sysFSMOVEDFROM { + e.Op |= Rename + } + if mask&sysFSATTRIB == sysFSATTRIB { + e.Op |= Chmod + } + return e +} + +const ( + opAddWatch = iota + opRemoveWatch +) + +const ( + provisional uint64 = 1 << (32 + iota) +) + +type input struct { + op int + path string + flags uint32 + reply chan error +} + +type inode struct { + handle syscall.Handle + volume uint32 + index uint64 +} + +type watch struct { + ov syscall.Overlapped + ino *inode // i-number + path string // Directory path + mask uint64 // Directory itself is being watched with these notify flags + names map[string]uint64 // Map of names being watched and their notify flags + rename string // Remembers the old name while renaming a file + buf [4096]byte +} + +type indexMap map[uint64]*watch +type watchMap map[uint32]indexMap + +func (w *Watcher) wakeupReader() error { + e := syscall.PostQueuedCompletionStatus(w.port, 0, 0, nil) + if e != nil { + return os.NewSyscallError("PostQueuedCompletionStatus", e) + } + return nil +} + +func getDir(pathname string) (dir string, err error) { + attr, e := syscall.GetFileAttributes(syscall.StringToUTF16Ptr(pathname)) + if e != nil { + return "", os.NewSyscallError("GetFileAttributes", e) + } + if attr&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 { + dir = pathname + } else { + dir, _ = filepath.Split(pathname) + dir = filepath.Clean(dir) + } + return +} + +func getIno(path string) (ino *inode, err error) { + h, e := syscall.CreateFile(syscall.StringToUTF16Ptr(path), + syscall.FILE_LIST_DIRECTORY, + syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, + nil, syscall.OPEN_EXISTING, + syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OVERLAPPED, 0) + if e != nil { + return nil, os.NewSyscallError("CreateFile", e) + } + var fi syscall.ByHandleFileInformation + if e = syscall.GetFileInformationByHandle(h, &fi); e != nil { + syscall.CloseHandle(h) + return nil, os.NewSyscallError("GetFileInformationByHandle", e) + } + ino = &inode{ + handle: h, + volume: fi.VolumeSerialNumber, + index: uint64(fi.FileIndexHigh)<<32 | uint64(fi.FileIndexLow), + } + return ino, nil +} + +// Must run within the I/O thread. +func (m watchMap) get(ino *inode) *watch { + if i := m[ino.volume]; i != nil { + return i[ino.index] + } + return nil +} + +// Must run within the I/O thread. +func (m watchMap) set(ino *inode, watch *watch) { + i := m[ino.volume] + if i == nil { + i = make(indexMap) + m[ino.volume] = i + } + i[ino.index] = watch +} + +// Must run within the I/O thread. +func (w *Watcher) addWatch(pathname string, flags uint64) error { + dir, err := getDir(pathname) + if err != nil { + return err + } + if flags&sysFSONLYDIR != 0 && pathname != dir { + return nil + } + ino, err := getIno(dir) + if err != nil { + return err + } + w.mu.Lock() + watchEntry := w.watches.get(ino) + w.mu.Unlock() + if watchEntry == nil { + if _, e := syscall.CreateIoCompletionPort(ino.handle, w.port, 0, 0); e != nil { + syscall.CloseHandle(ino.handle) + return os.NewSyscallError("CreateIoCompletionPort", e) + } + watchEntry = &watch{ + ino: ino, + path: dir, + names: make(map[string]uint64), + } + w.mu.Lock() + w.watches.set(ino, watchEntry) + w.mu.Unlock() + flags |= provisional + } else { + syscall.CloseHandle(ino.handle) + } + if pathname == dir { + watchEntry.mask |= flags + } else { + watchEntry.names[filepath.Base(pathname)] |= flags + } + if err = w.startRead(watchEntry); err != nil { + return err + } + if pathname == dir { + watchEntry.mask &= ^provisional + } else { + watchEntry.names[filepath.Base(pathname)] &= ^provisional + } + return nil +} + +// Must run within the I/O thread. +func (w *Watcher) remWatch(pathname string) error { + dir, err := getDir(pathname) + if err != nil { + return err + } + ino, err := getIno(dir) + if err != nil { + return err + } + w.mu.Lock() + watch := w.watches.get(ino) + w.mu.Unlock() + if watch == nil { + return fmt.Errorf("can't remove non-existent watch for: %s", pathname) + } + if pathname == dir { + w.sendEvent(watch.path, watch.mask&sysFSIGNORED) + watch.mask = 0 + } else { + name := filepath.Base(pathname) + w.sendEvent(filepath.Join(watch.path, name), watch.names[name]&sysFSIGNORED) + delete(watch.names, name) + } + return w.startRead(watch) +} + +// Must run within the I/O thread. +func (w *Watcher) deleteWatch(watch *watch) { + for name, mask := range watch.names { + if mask&provisional == 0 { + w.sendEvent(filepath.Join(watch.path, name), mask&sysFSIGNORED) + } + delete(watch.names, name) + } + if watch.mask != 0 { + if watch.mask&provisional == 0 { + w.sendEvent(watch.path, watch.mask&sysFSIGNORED) + } + watch.mask = 0 + } +} + +// Must run within the I/O thread. +func (w *Watcher) startRead(watch *watch) error { + if e := syscall.CancelIo(watch.ino.handle); e != nil { + w.Errors <- os.NewSyscallError("CancelIo", e) + w.deleteWatch(watch) + } + mask := toWindowsFlags(watch.mask) + for _, m := range watch.names { + mask |= toWindowsFlags(m) + } + if mask == 0 { + if e := syscall.CloseHandle(watch.ino.handle); e != nil { + w.Errors <- os.NewSyscallError("CloseHandle", e) + } + w.mu.Lock() + delete(w.watches[watch.ino.volume], watch.ino.index) + w.mu.Unlock() + return nil + } + e := syscall.ReadDirectoryChanges(watch.ino.handle, &watch.buf[0], + uint32(unsafe.Sizeof(watch.buf)), false, mask, nil, &watch.ov, 0) + if e != nil { + err := os.NewSyscallError("ReadDirectoryChanges", e) + if e == syscall.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 { + // Watched directory was probably removed + if w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) { + if watch.mask&sysFSONESHOT != 0 { + watch.mask = 0 + } + } + err = nil + } + w.deleteWatch(watch) + w.startRead(watch) + return err + } + return nil +} + +// readEvents reads from the I/O completion port, converts the +// received events into Event objects and sends them via the Events channel. +// Entry point to the I/O thread. +func (w *Watcher) readEvents() { + var ( + n, key uint32 + ov *syscall.Overlapped + ) + runtime.LockOSThread() + + for { + e := syscall.GetQueuedCompletionStatus(w.port, &n, &key, &ov, syscall.INFINITE) + watch := (*watch)(unsafe.Pointer(ov)) + + if watch == nil { + select { + case ch := <-w.quit: + w.mu.Lock() + var indexes []indexMap + for _, index := range w.watches { + indexes = append(indexes, index) + } + w.mu.Unlock() + for _, index := range indexes { + for _, watch := range index { + w.deleteWatch(watch) + w.startRead(watch) + } + } + var err error + if e := syscall.CloseHandle(w.port); e != nil { + err = os.NewSyscallError("CloseHandle", e) + } + close(w.Events) + close(w.Errors) + ch <- err + return + case in := <-w.input: + switch in.op { + case opAddWatch: + in.reply <- w.addWatch(in.path, uint64(in.flags)) + case opRemoveWatch: + in.reply <- w.remWatch(in.path) + } + default: + } + continue + } + + switch e { + case syscall.ERROR_MORE_DATA: + if watch == nil { + w.Errors <- errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer") + } else { + // The i/o succeeded but the buffer is full. + // In theory we should be building up a full packet. + // In practice we can get away with just carrying on. + n = uint32(unsafe.Sizeof(watch.buf)) + } + case syscall.ERROR_ACCESS_DENIED: + // Watched directory was probably removed + w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) + w.deleteWatch(watch) + w.startRead(watch) + continue + case syscall.ERROR_OPERATION_ABORTED: + // CancelIo was called on this handle + continue + default: + w.Errors <- os.NewSyscallError("GetQueuedCompletionPort", e) + continue + case nil: + } + + var offset uint32 + for { + if n == 0 { + w.Events <- newEvent("", sysFSQOVERFLOW) + w.Errors <- errors.New("short read in readEvents()") + break + } + + // Point "raw" to the event in the buffer + raw := (*syscall.FileNotifyInformation)(unsafe.Pointer(&watch.buf[offset])) + buf := (*[syscall.MAX_PATH]uint16)(unsafe.Pointer(&raw.FileName)) + name := syscall.UTF16ToString(buf[:raw.FileNameLength/2]) + fullname := filepath.Join(watch.path, name) + + var mask uint64 + switch raw.Action { + case syscall.FILE_ACTION_REMOVED: + mask = sysFSDELETESELF + case syscall.FILE_ACTION_MODIFIED: + mask = sysFSMODIFY + case syscall.FILE_ACTION_RENAMED_OLD_NAME: + watch.rename = name + case syscall.FILE_ACTION_RENAMED_NEW_NAME: + if watch.names[watch.rename] != 0 { + watch.names[name] |= watch.names[watch.rename] + delete(watch.names, watch.rename) + mask = sysFSMOVESELF + } + } + + sendNameEvent := func() { + if w.sendEvent(fullname, watch.names[name]&mask) { + if watch.names[name]&sysFSONESHOT != 0 { + delete(watch.names, name) + } + } + } + if raw.Action != syscall.FILE_ACTION_RENAMED_NEW_NAME { + sendNameEvent() + } + if raw.Action == syscall.FILE_ACTION_REMOVED { + w.sendEvent(fullname, watch.names[name]&sysFSIGNORED) + delete(watch.names, name) + } + if w.sendEvent(fullname, watch.mask&toFSnotifyFlags(raw.Action)) { + if watch.mask&sysFSONESHOT != 0 { + watch.mask = 0 + } + } + if raw.Action == syscall.FILE_ACTION_RENAMED_NEW_NAME { + fullname = filepath.Join(watch.path, watch.rename) + sendNameEvent() + } + + // Move to the next event in the buffer + if raw.NextEntryOffset == 0 { + break + } + offset += raw.NextEntryOffset + + // Error! + if offset >= n { + w.Errors <- errors.New("Windows system assumed buffer larger than it is, events have likely been missed.") + break + } + } + + if err := w.startRead(watch); err != nil { + w.Errors <- err + } + } +} + +func (w *Watcher) sendEvent(name string, mask uint64) bool { + if mask == 0 { + return false + } + event := newEvent(name, uint32(mask)) + select { + case ch := <-w.quit: + w.quit <- ch + case w.Events <- event: + } + return true +} + +func toWindowsFlags(mask uint64) uint32 { + var m uint32 + if mask&sysFSACCESS != 0 { + m |= syscall.FILE_NOTIFY_CHANGE_LAST_ACCESS + } + if mask&sysFSMODIFY != 0 { + m |= syscall.FILE_NOTIFY_CHANGE_LAST_WRITE + } + if mask&sysFSATTRIB != 0 { + m |= syscall.FILE_NOTIFY_CHANGE_ATTRIBUTES + } + if mask&(sysFSMOVE|sysFSCREATE|sysFSDELETE) != 0 { + m |= syscall.FILE_NOTIFY_CHANGE_FILE_NAME | syscall.FILE_NOTIFY_CHANGE_DIR_NAME + } + return m +} + +func toFSnotifyFlags(action uint32) uint64 { + switch action { + case syscall.FILE_ACTION_ADDED: + return sysFSCREATE + case syscall.FILE_ACTION_REMOVED: + return sysFSDELETE + case syscall.FILE_ACTION_MODIFIED: + return sysFSMODIFY + case syscall.FILE_ACTION_RENAMED_OLD_NAME: + return sysFSMOVEDFROM + case syscall.FILE_ACTION_RENAMED_NEW_NAME: + return sysFSMOVEDTO + } + return 0 +} diff --git a/vendor/github.com/hashicorp/hcl/LICENSE b/vendor/github.com/hashicorp/hcl/LICENSE new file mode 100644 index 00000000..c33dcc7c --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/LICENSE @@ -0,0 +1,354 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/hcl/decoder.go b/vendor/github.com/hashicorp/hcl/decoder.go new file mode 100644 index 00000000..bed9ebbe --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/decoder.go @@ -0,0 +1,729 @@ +package hcl + +import ( + "errors" + "fmt" + "reflect" + "sort" + "strconv" + "strings" + + "github.com/hashicorp/hcl/hcl/ast" + "github.com/hashicorp/hcl/hcl/parser" + "github.com/hashicorp/hcl/hcl/token" +) + +// This is the tag to use with structures to have settings for HCL +const tagName = "hcl" + +var ( + // nodeType holds a reference to the type of ast.Node + nodeType reflect.Type = findNodeType() +) + +// Unmarshal accepts a byte slice as input and writes the +// data to the value pointed to by v. +func Unmarshal(bs []byte, v interface{}) error { + root, err := parse(bs) + if err != nil { + return err + } + + return DecodeObject(v, root) +} + +// Decode reads the given input and decodes it into the structure +// given by `out`. +func Decode(out interface{}, in string) error { + obj, err := Parse(in) + if err != nil { + return err + } + + return DecodeObject(out, obj) +} + +// DecodeObject is a lower-level version of Decode. It decodes a +// raw Object into the given output. +func DecodeObject(out interface{}, n ast.Node) error { + val := reflect.ValueOf(out) + if val.Kind() != reflect.Ptr { + return errors.New("result must be a pointer") + } + + // If we have the file, we really decode the root node + if f, ok := n.(*ast.File); ok { + n = f.Node + } + + var d decoder + return d.decode("root", n, val.Elem()) +} + +type decoder struct { + stack []reflect.Kind +} + +func (d *decoder) decode(name string, node ast.Node, result reflect.Value) error { + k := result + + // If we have an interface with a valid value, we use that + // for the check. + if result.Kind() == reflect.Interface { + elem := result.Elem() + if elem.IsValid() { + k = elem + } + } + + // Push current onto stack unless it is an interface. + if k.Kind() != reflect.Interface { + d.stack = append(d.stack, k.Kind()) + + // Schedule a pop + defer func() { + d.stack = d.stack[:len(d.stack)-1] + }() + } + + switch k.Kind() { + case reflect.Bool: + return d.decodeBool(name, node, result) + case reflect.Float32, reflect.Float64: + return d.decodeFloat(name, node, result) + case reflect.Int, reflect.Int32, reflect.Int64: + return d.decodeInt(name, node, result) + case reflect.Interface: + // When we see an interface, we make our own thing + return d.decodeInterface(name, node, result) + case reflect.Map: + return d.decodeMap(name, node, result) + case reflect.Ptr: + return d.decodePtr(name, node, result) + case reflect.Slice: + return d.decodeSlice(name, node, result) + case reflect.String: + return d.decodeString(name, node, result) + case reflect.Struct: + return d.decodeStruct(name, node, result) + default: + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: unknown kind to decode into: %s", name, k.Kind()), + } + } +} + +func (d *decoder) decodeBool(name string, node ast.Node, result reflect.Value) error { + switch n := node.(type) { + case *ast.LiteralType: + if n.Token.Type == token.BOOL { + v, err := strconv.ParseBool(n.Token.Text) + if err != nil { + return err + } + + result.Set(reflect.ValueOf(v)) + return nil + } + } + + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: unknown type %T", name, node), + } +} + +func (d *decoder) decodeFloat(name string, node ast.Node, result reflect.Value) error { + switch n := node.(type) { + case *ast.LiteralType: + if n.Token.Type == token.FLOAT || n.Token.Type == token.NUMBER { + v, err := strconv.ParseFloat(n.Token.Text, 64) + if err != nil { + return err + } + + result.Set(reflect.ValueOf(v).Convert(result.Type())) + return nil + } + } + + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: unknown type %T", name, node), + } +} + +func (d *decoder) decodeInt(name string, node ast.Node, result reflect.Value) error { + switch n := node.(type) { + case *ast.LiteralType: + switch n.Token.Type { + case token.NUMBER: + v, err := strconv.ParseInt(n.Token.Text, 0, 0) + if err != nil { + return err + } + + if result.Kind() == reflect.Interface { + result.Set(reflect.ValueOf(int(v))) + } else { + result.SetInt(v) + } + return nil + case token.STRING: + v, err := strconv.ParseInt(n.Token.Value().(string), 0, 0) + if err != nil { + return err + } + + if result.Kind() == reflect.Interface { + result.Set(reflect.ValueOf(int(v))) + } else { + result.SetInt(v) + } + return nil + } + } + + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: unknown type %T", name, node), + } +} + +func (d *decoder) decodeInterface(name string, node ast.Node, result reflect.Value) error { + // When we see an ast.Node, we retain the value to enable deferred decoding. + // Very useful in situations where we want to preserve ast.Node information + // like Pos + if result.Type() == nodeType && result.CanSet() { + result.Set(reflect.ValueOf(node)) + return nil + } + + var set reflect.Value + redecode := true + + // For testing types, ObjectType should just be treated as a list. We + // set this to a temporary var because we want to pass in the real node. + testNode := node + if ot, ok := node.(*ast.ObjectType); ok { + testNode = ot.List + } + + switch n := testNode.(type) { + case *ast.ObjectList: + // If we're at the root or we're directly within a slice, then we + // decode objects into map[string]interface{}, otherwise we decode + // them into lists. + if len(d.stack) == 0 || d.stack[len(d.stack)-1] == reflect.Slice { + var temp map[string]interface{} + tempVal := reflect.ValueOf(temp) + result := reflect.MakeMap( + reflect.MapOf( + reflect.TypeOf(""), + tempVal.Type().Elem())) + + set = result + } else { + var temp []map[string]interface{} + tempVal := reflect.ValueOf(temp) + result := reflect.MakeSlice( + reflect.SliceOf(tempVal.Type().Elem()), 0, len(n.Items)) + set = result + } + case *ast.ObjectType: + // If we're at the root or we're directly within a slice, then we + // decode objects into map[string]interface{}, otherwise we decode + // them into lists. + if len(d.stack) == 0 || d.stack[len(d.stack)-1] == reflect.Slice { + var temp map[string]interface{} + tempVal := reflect.ValueOf(temp) + result := reflect.MakeMap( + reflect.MapOf( + reflect.TypeOf(""), + tempVal.Type().Elem())) + + set = result + } else { + var temp []map[string]interface{} + tempVal := reflect.ValueOf(temp) + result := reflect.MakeSlice( + reflect.SliceOf(tempVal.Type().Elem()), 0, 1) + set = result + } + case *ast.ListType: + var temp []interface{} + tempVal := reflect.ValueOf(temp) + result := reflect.MakeSlice( + reflect.SliceOf(tempVal.Type().Elem()), 0, 0) + set = result + case *ast.LiteralType: + switch n.Token.Type { + case token.BOOL: + var result bool + set = reflect.Indirect(reflect.New(reflect.TypeOf(result))) + case token.FLOAT: + var result float64 + set = reflect.Indirect(reflect.New(reflect.TypeOf(result))) + case token.NUMBER: + var result int + set = reflect.Indirect(reflect.New(reflect.TypeOf(result))) + case token.STRING, token.HEREDOC: + set = reflect.Indirect(reflect.New(reflect.TypeOf(""))) + default: + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: cannot decode into interface: %T", name, node), + } + } + default: + return fmt.Errorf( + "%s: cannot decode into interface: %T", + name, node) + } + + // Set the result to what its supposed to be, then reset + // result so we don't reflect into this method anymore. + result.Set(set) + + if redecode { + // Revisit the node so that we can use the newly instantiated + // thing and populate it. + if err := d.decode(name, node, result); err != nil { + return err + } + } + + return nil +} + +func (d *decoder) decodeMap(name string, node ast.Node, result reflect.Value) error { + if item, ok := node.(*ast.ObjectItem); ok { + node = &ast.ObjectList{Items: []*ast.ObjectItem{item}} + } + + if ot, ok := node.(*ast.ObjectType); ok { + node = ot.List + } + + n, ok := node.(*ast.ObjectList) + if !ok { + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: not an object type for map (%T)", name, node), + } + } + + // If we have an interface, then we can address the interface, + // but not the slice itself, so get the element but set the interface + set := result + if result.Kind() == reflect.Interface { + result = result.Elem() + } + + resultType := result.Type() + resultElemType := resultType.Elem() + resultKeyType := resultType.Key() + if resultKeyType.Kind() != reflect.String { + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: map must have string keys", name), + } + } + + // Make a map if it is nil + resultMap := result + if result.IsNil() { + resultMap = reflect.MakeMap( + reflect.MapOf(resultKeyType, resultElemType)) + } + + // Go through each element and decode it. + done := make(map[string]struct{}) + for _, item := range n.Items { + if item.Val == nil { + continue + } + + // github.com/hashicorp/terraform/issue/5740 + if len(item.Keys) == 0 { + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: map must have string keys", name), + } + } + + // Get the key we're dealing with, which is the first item + keyStr := item.Keys[0].Token.Value().(string) + + // If we've already processed this key, then ignore it + if _, ok := done[keyStr]; ok { + continue + } + + // Determine the value. If we have more than one key, then we + // get the objectlist of only these keys. + itemVal := item.Val + if len(item.Keys) > 1 { + itemVal = n.Filter(keyStr) + done[keyStr] = struct{}{} + } + + // Make the field name + fieldName := fmt.Sprintf("%s.%s", name, keyStr) + + // Get the key/value as reflection values + key := reflect.ValueOf(keyStr) + val := reflect.Indirect(reflect.New(resultElemType)) + + // If we have a pre-existing value in the map, use that + oldVal := resultMap.MapIndex(key) + if oldVal.IsValid() { + val.Set(oldVal) + } + + // Decode! + if err := d.decode(fieldName, itemVal, val); err != nil { + return err + } + + // Set the value on the map + resultMap.SetMapIndex(key, val) + } + + // Set the final map if we can + set.Set(resultMap) + return nil +} + +func (d *decoder) decodePtr(name string, node ast.Node, result reflect.Value) error { + // Create an element of the concrete (non pointer) type and decode + // into that. Then set the value of the pointer to this type. + resultType := result.Type() + resultElemType := resultType.Elem() + val := reflect.New(resultElemType) + if err := d.decode(name, node, reflect.Indirect(val)); err != nil { + return err + } + + result.Set(val) + return nil +} + +func (d *decoder) decodeSlice(name string, node ast.Node, result reflect.Value) error { + // If we have an interface, then we can address the interface, + // but not the slice itself, so get the element but set the interface + set := result + if result.Kind() == reflect.Interface { + result = result.Elem() + } + // Create the slice if it isn't nil + resultType := result.Type() + resultElemType := resultType.Elem() + if result.IsNil() { + resultSliceType := reflect.SliceOf(resultElemType) + result = reflect.MakeSlice( + resultSliceType, 0, 0) + } + + // Figure out the items we'll be copying into the slice + var items []ast.Node + switch n := node.(type) { + case *ast.ObjectList: + items = make([]ast.Node, len(n.Items)) + for i, item := range n.Items { + items[i] = item + } + case *ast.ObjectType: + items = []ast.Node{n} + case *ast.ListType: + items = n.List + default: + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("unknown slice type: %T", node), + } + } + + for i, item := range items { + fieldName := fmt.Sprintf("%s[%d]", name, i) + + // Decode + val := reflect.Indirect(reflect.New(resultElemType)) + + // if item is an object that was decoded from ambiguous JSON and + // flattened, make sure it's expanded if it needs to decode into a + // defined structure. + item := expandObject(item, val) + + if err := d.decode(fieldName, item, val); err != nil { + return err + } + + // Append it onto the slice + result = reflect.Append(result, val) + } + + set.Set(result) + return nil +} + +// expandObject detects if an ambiguous JSON object was flattened to a List which +// should be decoded into a struct, and expands the ast to properly deocode. +func expandObject(node ast.Node, result reflect.Value) ast.Node { + item, ok := node.(*ast.ObjectItem) + if !ok { + return node + } + + elemType := result.Type() + + // our target type must be a struct + switch elemType.Kind() { + case reflect.Ptr: + switch elemType.Elem().Kind() { + case reflect.Struct: + //OK + default: + return node + } + case reflect.Struct: + //OK + default: + return node + } + + // A list value will have a key and field name. If it had more fields, + // it wouldn't have been flattened. + if len(item.Keys) != 2 { + return node + } + + keyToken := item.Keys[0].Token + item.Keys = item.Keys[1:] + + // we need to un-flatten the ast enough to decode + newNode := &ast.ObjectItem{ + Keys: []*ast.ObjectKey{ + &ast.ObjectKey{ + Token: keyToken, + }, + }, + Val: &ast.ObjectType{ + List: &ast.ObjectList{ + Items: []*ast.ObjectItem{item}, + }, + }, + } + + return newNode +} + +func (d *decoder) decodeString(name string, node ast.Node, result reflect.Value) error { + switch n := node.(type) { + case *ast.LiteralType: + switch n.Token.Type { + case token.NUMBER: + result.Set(reflect.ValueOf(n.Token.Text).Convert(result.Type())) + return nil + case token.STRING, token.HEREDOC: + result.Set(reflect.ValueOf(n.Token.Value()).Convert(result.Type())) + return nil + } + } + + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: unknown type for string %T", name, node), + } +} + +func (d *decoder) decodeStruct(name string, node ast.Node, result reflect.Value) error { + var item *ast.ObjectItem + if it, ok := node.(*ast.ObjectItem); ok { + item = it + node = it.Val + } + + if ot, ok := node.(*ast.ObjectType); ok { + node = ot.List + } + + // Handle the special case where the object itself is a literal. Previously + // the yacc parser would always ensure top-level elements were arrays. The new + // parser does not make the same guarantees, thus we need to convert any + // top-level literal elements into a list. + if _, ok := node.(*ast.LiteralType); ok && item != nil { + node = &ast.ObjectList{Items: []*ast.ObjectItem{item}} + } + + list, ok := node.(*ast.ObjectList) + if !ok { + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: not an object type for struct (%T)", name, node), + } + } + + // This slice will keep track of all the structs we'll be decoding. + // There can be more than one struct if there are embedded structs + // that are squashed. + structs := make([]reflect.Value, 1, 5) + structs[0] = result + + // Compile the list of all the fields that we're going to be decoding + // from all the structs. + type field struct { + field reflect.StructField + val reflect.Value + } + fields := []field{} + for len(structs) > 0 { + structVal := structs[0] + structs = structs[1:] + + structType := structVal.Type() + for i := 0; i < structType.NumField(); i++ { + fieldType := structType.Field(i) + tagParts := strings.Split(fieldType.Tag.Get(tagName), ",") + + // Ignore fields with tag name "-" + if tagParts[0] == "-" { + continue + } + + if fieldType.Anonymous { + fieldKind := fieldType.Type.Kind() + if fieldKind != reflect.Struct { + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: unsupported type to struct: %s", + fieldType.Name, fieldKind), + } + } + + // We have an embedded field. We "squash" the fields down + // if specified in the tag. + squash := false + for _, tag := range tagParts[1:] { + if tag == "squash" { + squash = true + break + } + } + + if squash { + structs = append( + structs, result.FieldByName(fieldType.Name)) + continue + } + } + + // Normal struct field, store it away + fields = append(fields, field{fieldType, structVal.Field(i)}) + } + } + + usedKeys := make(map[string]struct{}) + decodedFields := make([]string, 0, len(fields)) + decodedFieldsVal := make([]reflect.Value, 0) + unusedKeysVal := make([]reflect.Value, 0) + for _, f := range fields { + field, fieldValue := f.field, f.val + if !fieldValue.IsValid() { + // This should never happen + panic("field is not valid") + } + + // If we can't set the field, then it is unexported or something, + // and we just continue onwards. + if !fieldValue.CanSet() { + continue + } + + fieldName := field.Name + + tagValue := field.Tag.Get(tagName) + tagParts := strings.SplitN(tagValue, ",", 2) + if len(tagParts) >= 2 { + switch tagParts[1] { + case "decodedFields": + decodedFieldsVal = append(decodedFieldsVal, fieldValue) + continue + case "key": + if item == nil { + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: %s asked for 'key', impossible", + name, fieldName), + } + } + + fieldValue.SetString(item.Keys[0].Token.Value().(string)) + continue + case "unusedKeys": + unusedKeysVal = append(unusedKeysVal, fieldValue) + continue + } + } + + if tagParts[0] != "" { + fieldName = tagParts[0] + } + + // Determine the element we'll use to decode. If it is a single + // match (only object with the field), then we decode it exactly. + // If it is a prefix match, then we decode the matches. + filter := list.Filter(fieldName) + + prefixMatches := filter.Children() + matches := filter.Elem() + if len(matches.Items) == 0 && len(prefixMatches.Items) == 0 { + continue + } + + // Track the used key + usedKeys[fieldName] = struct{}{} + + // Create the field name and decode. We range over the elements + // because we actually want the value. + fieldName = fmt.Sprintf("%s.%s", name, fieldName) + if len(prefixMatches.Items) > 0 { + if err := d.decode(fieldName, prefixMatches, fieldValue); err != nil { + return err + } + } + for _, match := range matches.Items { + var decodeNode ast.Node = match.Val + if ot, ok := decodeNode.(*ast.ObjectType); ok { + decodeNode = &ast.ObjectList{Items: ot.List.Items} + } + + if err := d.decode(fieldName, decodeNode, fieldValue); err != nil { + return err + } + } + + decodedFields = append(decodedFields, field.Name) + } + + if len(decodedFieldsVal) > 0 { + // Sort it so that it is deterministic + sort.Strings(decodedFields) + + for _, v := range decodedFieldsVal { + v.Set(reflect.ValueOf(decodedFields)) + } + } + + return nil +} + +// findNodeType returns the type of ast.Node +func findNodeType() reflect.Type { + var nodeContainer struct { + Node ast.Node + } + value := reflect.ValueOf(nodeContainer).FieldByName("Node") + return value.Type() +} diff --git a/vendor/github.com/hashicorp/hcl/hcl.go b/vendor/github.com/hashicorp/hcl/hcl.go new file mode 100644 index 00000000..575a20b5 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl.go @@ -0,0 +1,11 @@ +// Package hcl decodes HCL into usable Go structures. +// +// hcl input can come in either pure HCL format or JSON format. +// It can be parsed into an AST, and then decoded into a structure, +// or it can be decoded directly from a string into a structure. +// +// If you choose to parse HCL into a raw AST, the benefit is that you +// can write custom visitor implementations to implement custom +// semantic checks. By default, HCL does not perform any semantic +// checks. +package hcl diff --git a/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go b/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go new file mode 100644 index 00000000..6e5ef654 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go @@ -0,0 +1,219 @@ +// Package ast declares the types used to represent syntax trees for HCL +// (HashiCorp Configuration Language) +package ast + +import ( + "fmt" + "strings" + + "github.com/hashicorp/hcl/hcl/token" +) + +// Node is an element in the abstract syntax tree. +type Node interface { + node() + Pos() token.Pos +} + +func (File) node() {} +func (ObjectList) node() {} +func (ObjectKey) node() {} +func (ObjectItem) node() {} +func (Comment) node() {} +func (CommentGroup) node() {} +func (ObjectType) node() {} +func (LiteralType) node() {} +func (ListType) node() {} + +// File represents a single HCL file +type File struct { + Node Node // usually a *ObjectList + Comments []*CommentGroup // list of all comments in the source +} + +func (f *File) Pos() token.Pos { + return f.Node.Pos() +} + +// ObjectList represents a list of ObjectItems. An HCL file itself is an +// ObjectList. +type ObjectList struct { + Items []*ObjectItem +} + +func (o *ObjectList) Add(item *ObjectItem) { + o.Items = append(o.Items, item) +} + +// Filter filters out the objects with the given key list as a prefix. +// +// The returned list of objects contain ObjectItems where the keys have +// this prefix already stripped off. This might result in objects with +// zero-length key lists if they have no children. +// +// If no matches are found, an empty ObjectList (non-nil) is returned. +func (o *ObjectList) Filter(keys ...string) *ObjectList { + var result ObjectList + for _, item := range o.Items { + // If there aren't enough keys, then ignore this + if len(item.Keys) < len(keys) { + continue + } + + match := true + for i, key := range item.Keys[:len(keys)] { + key := key.Token.Value().(string) + if key != keys[i] && !strings.EqualFold(key, keys[i]) { + match = false + break + } + } + if !match { + continue + } + + // Strip off the prefix from the children + newItem := *item + newItem.Keys = newItem.Keys[len(keys):] + result.Add(&newItem) + } + + return &result +} + +// Children returns further nested objects (key length > 0) within this +// ObjectList. This should be used with Filter to get at child items. +func (o *ObjectList) Children() *ObjectList { + var result ObjectList + for _, item := range o.Items { + if len(item.Keys) > 0 { + result.Add(item) + } + } + + return &result +} + +// Elem returns items in the list that are direct element assignments +// (key length == 0). This should be used with Filter to get at elements. +func (o *ObjectList) Elem() *ObjectList { + var result ObjectList + for _, item := range o.Items { + if len(item.Keys) == 0 { + result.Add(item) + } + } + + return &result +} + +func (o *ObjectList) Pos() token.Pos { + // always returns the uninitiliazed position + return o.Items[0].Pos() +} + +// ObjectItem represents a HCL Object Item. An item is represented with a key +// (or keys). It can be an assignment or an object (both normal and nested) +type ObjectItem struct { + // keys is only one length long if it's of type assignment. If it's a + // nested object it can be larger than one. In that case "assign" is + // invalid as there is no assignments for a nested object. + Keys []*ObjectKey + + // assign contains the position of "=", if any + Assign token.Pos + + // val is the item itself. It can be an object,list, number, bool or a + // string. If key length is larger than one, val can be only of type + // Object. + Val Node + + LeadComment *CommentGroup // associated lead comment + LineComment *CommentGroup // associated line comment +} + +func (o *ObjectItem) Pos() token.Pos { + // I'm not entirely sure what causes this, but removing this causes + // a test failure. We should investigate at some point. + if len(o.Keys) == 0 { + return token.Pos{} + } + + return o.Keys[0].Pos() +} + +// ObjectKeys are either an identifier or of type string. +type ObjectKey struct { + Token token.Token +} + +func (o *ObjectKey) Pos() token.Pos { + return o.Token.Pos +} + +// LiteralType represents a literal of basic type. Valid types are: +// token.NUMBER, token.FLOAT, token.BOOL and token.STRING +type LiteralType struct { + Token token.Token + + // comment types, only used when in a list + LeadComment *CommentGroup + LineComment *CommentGroup +} + +func (l *LiteralType) Pos() token.Pos { + return l.Token.Pos +} + +// ListStatement represents a HCL List type +type ListType struct { + Lbrack token.Pos // position of "[" + Rbrack token.Pos // position of "]" + List []Node // the elements in lexical order +} + +func (l *ListType) Pos() token.Pos { + return l.Lbrack +} + +func (l *ListType) Add(node Node) { + l.List = append(l.List, node) +} + +// ObjectType represents a HCL Object Type +type ObjectType struct { + Lbrace token.Pos // position of "{" + Rbrace token.Pos // position of "}" + List *ObjectList // the nodes in lexical order +} + +func (o *ObjectType) Pos() token.Pos { + return o.Lbrace +} + +// Comment node represents a single //, # style or /*- style commment +type Comment struct { + Start token.Pos // position of / or # + Text string +} + +func (c *Comment) Pos() token.Pos { + return c.Start +} + +// CommentGroup node represents a sequence of comments with no other tokens and +// no empty lines between. +type CommentGroup struct { + List []*Comment // len(List) > 0 +} + +func (c *CommentGroup) Pos() token.Pos { + return c.List[0].Pos() +} + +//------------------------------------------------------------------- +// GoStringer +//------------------------------------------------------------------- + +func (o *ObjectKey) GoString() string { return fmt.Sprintf("*%#v", *o) } +func (o *ObjectList) GoString() string { return fmt.Sprintf("*%#v", *o) } diff --git a/vendor/github.com/hashicorp/hcl/hcl/ast/walk.go b/vendor/github.com/hashicorp/hcl/hcl/ast/walk.go new file mode 100644 index 00000000..ba07ad42 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/ast/walk.go @@ -0,0 +1,52 @@ +package ast + +import "fmt" + +// WalkFunc describes a function to be called for each node during a Walk. The +// returned node can be used to rewrite the AST. Walking stops the returned +// bool is false. +type WalkFunc func(Node) (Node, bool) + +// Walk traverses an AST in depth-first order: It starts by calling fn(node); +// node must not be nil. If fn returns true, Walk invokes fn recursively for +// each of the non-nil children of node, followed by a call of fn(nil). The +// returned node of fn can be used to rewrite the passed node to fn. +func Walk(node Node, fn WalkFunc) Node { + rewritten, ok := fn(node) + if !ok { + return rewritten + } + + switch n := node.(type) { + case *File: + n.Node = Walk(n.Node, fn) + case *ObjectList: + for i, item := range n.Items { + n.Items[i] = Walk(item, fn).(*ObjectItem) + } + case *ObjectKey: + // nothing to do + case *ObjectItem: + for i, k := range n.Keys { + n.Keys[i] = Walk(k, fn).(*ObjectKey) + } + + if n.Val != nil { + n.Val = Walk(n.Val, fn) + } + case *LiteralType: + // nothing to do + case *ListType: + for i, l := range n.List { + n.List[i] = Walk(l, fn) + } + case *ObjectType: + n.List = Walk(n.List, fn).(*ObjectList) + default: + // should we panic here? + fmt.Printf("unknown type: %T\n", n) + } + + fn(nil) + return rewritten +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/fmtcmd/fmtcmd.go b/vendor/github.com/hashicorp/hcl/hcl/fmtcmd/fmtcmd.go new file mode 100644 index 00000000..2380d71e --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/fmtcmd/fmtcmd.go @@ -0,0 +1,162 @@ +// Derivative work from: +// - https://golang.org/src/cmd/gofmt/gofmt.go +// - https://github.com/fatih/hclfmt + +package fmtcmd + +import ( + "bytes" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "strings" + + "github.com/hashicorp/hcl/hcl/printer" +) + +var ( + ErrWriteStdin = errors.New("cannot use write option with standard input") +) + +type Options struct { + List bool // list files whose formatting differs + Write bool // write result to (source) file instead of stdout + Diff bool // display diffs of formatting changes +} + +func isValidFile(f os.FileInfo, extensions []string) bool { + if !f.IsDir() && !strings.HasPrefix(f.Name(), ".") { + for _, ext := range extensions { + if strings.HasSuffix(f.Name(), "."+ext) { + return true + } + } + } + + return false +} + +// If in == nil, the source is the contents of the file with the given filename. +func processFile(filename string, in io.Reader, out io.Writer, stdin bool, opts Options) error { + if in == nil { + f, err := os.Open(filename) + if err != nil { + return err + } + defer f.Close() + in = f + } + + src, err := ioutil.ReadAll(in) + if err != nil { + return err + } + + res, err := printer.Format(src) + if err != nil { + return fmt.Errorf("In %s: %s", filename, err) + } + + if !bytes.Equal(src, res) { + // formatting has changed + if opts.List { + fmt.Fprintln(out, filename) + } + if opts.Write { + err = ioutil.WriteFile(filename, res, 0644) + if err != nil { + return err + } + } + if opts.Diff { + data, err := diff(src, res) + if err != nil { + return fmt.Errorf("computing diff: %s", err) + } + fmt.Fprintf(out, "diff a/%s b/%s\n", filename, filename) + out.Write(data) + } + } + + if !opts.List && !opts.Write && !opts.Diff { + _, err = out.Write(res) + } + + return err +} + +func walkDir(path string, extensions []string, stdout io.Writer, opts Options) error { + visitFile := func(path string, f os.FileInfo, err error) error { + if err == nil && isValidFile(f, extensions) { + err = processFile(path, nil, stdout, false, opts) + } + return err + } + + return filepath.Walk(path, visitFile) +} + +func Run( + paths, extensions []string, + stdin io.Reader, + stdout io.Writer, + opts Options, +) error { + if len(paths) == 0 { + if opts.Write { + return ErrWriteStdin + } + if err := processFile("", stdin, stdout, true, opts); err != nil { + return err + } + return nil + } + + for _, path := range paths { + switch dir, err := os.Stat(path); { + case err != nil: + return err + case dir.IsDir(): + if err := walkDir(path, extensions, stdout, opts); err != nil { + return err + } + default: + if err := processFile(path, nil, stdout, false, opts); err != nil { + return err + } + } + } + + return nil +} + +func diff(b1, b2 []byte) (data []byte, err error) { + f1, err := ioutil.TempFile("", "") + if err != nil { + return + } + defer os.Remove(f1.Name()) + defer f1.Close() + + f2, err := ioutil.TempFile("", "") + if err != nil { + return + } + defer os.Remove(f2.Name()) + defer f2.Close() + + f1.Write(b1) + f2.Write(b2) + + data, err = exec.Command("diff", "-u", f1.Name(), f2.Name()).CombinedOutput() + if len(data) > 0 { + // diff exits with a non-zero status when the files don't match. + // Ignore that failure as long as we get output. + err = nil + } + return +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/parser/error.go b/vendor/github.com/hashicorp/hcl/hcl/parser/error.go new file mode 100644 index 00000000..5c99381d --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/parser/error.go @@ -0,0 +1,17 @@ +package parser + +import ( + "fmt" + + "github.com/hashicorp/hcl/hcl/token" +) + +// PosError is a parse error that contains a position. +type PosError struct { + Pos token.Pos + Err error +} + +func (e *PosError) Error() string { + return fmt.Sprintf("At %s: %s", e.Pos, e.Err) +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go b/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go new file mode 100644 index 00000000..098e1bc4 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go @@ -0,0 +1,526 @@ +// Package parser implements a parser for HCL (HashiCorp Configuration +// Language) +package parser + +import ( + "bytes" + "errors" + "fmt" + "strings" + + "github.com/hashicorp/hcl/hcl/ast" + "github.com/hashicorp/hcl/hcl/scanner" + "github.com/hashicorp/hcl/hcl/token" +) + +type Parser struct { + sc *scanner.Scanner + + // Last read token + tok token.Token + commaPrev token.Token + + comments []*ast.CommentGroup + leadComment *ast.CommentGroup // last lead comment + lineComment *ast.CommentGroup // last line comment + + enableTrace bool + indent int + n int // buffer size (max = 1) +} + +func newParser(src []byte) *Parser { + return &Parser{ + sc: scanner.New(src), + } +} + +// Parse returns the fully parsed source and returns the abstract syntax tree. +func Parse(src []byte) (*ast.File, error) { + // normalize all line endings + // since the scanner and output only work with "\n" line endings, we may + // end up with dangling "\r" characters in the parsed data. + src = bytes.Replace(src, []byte("\r\n"), []byte("\n"), -1) + + p := newParser(src) + return p.Parse() +} + +var errEofToken = errors.New("EOF token found") + +// Parse returns the fully parsed source and returns the abstract syntax tree. +func (p *Parser) Parse() (*ast.File, error) { + f := &ast.File{} + var err, scerr error + p.sc.Error = func(pos token.Pos, msg string) { + scerr = &PosError{Pos: pos, Err: errors.New(msg)} + } + + f.Node, err = p.objectList(false) + if scerr != nil { + return nil, scerr + } + if err != nil { + return nil, err + } + + f.Comments = p.comments + return f, nil +} + +// objectList parses a list of items within an object (generally k/v pairs). +// The parameter" obj" tells this whether to we are within an object (braces: +// '{', '}') or just at the top level. If we're within an object, we end +// at an RBRACE. +func (p *Parser) objectList(obj bool) (*ast.ObjectList, error) { + defer un(trace(p, "ParseObjectList")) + node := &ast.ObjectList{} + + for { + if obj { + tok := p.scan() + p.unscan() + if tok.Type == token.RBRACE { + break + } + } + + n, err := p.objectItem() + if err == errEofToken { + break // we are finished + } + + // we don't return a nil node, because might want to use already + // collected items. + if err != nil { + return node, err + } + + node.Add(n) + + // object lists can be optionally comma-delimited e.g. when a list of maps + // is being expressed, so a comma is allowed here - it's simply consumed + tok := p.scan() + if tok.Type != token.COMMA { + p.unscan() + } + } + return node, nil +} + +func (p *Parser) consumeComment() (comment *ast.Comment, endline int) { + endline = p.tok.Pos.Line + + // count the endline if it's multiline comment, ie starting with /* + if len(p.tok.Text) > 1 && p.tok.Text[1] == '*' { + // don't use range here - no need to decode Unicode code points + for i := 0; i < len(p.tok.Text); i++ { + if p.tok.Text[i] == '\n' { + endline++ + } + } + } + + comment = &ast.Comment{Start: p.tok.Pos, Text: p.tok.Text} + p.tok = p.sc.Scan() + return +} + +func (p *Parser) consumeCommentGroup(n int) (comments *ast.CommentGroup, endline int) { + var list []*ast.Comment + endline = p.tok.Pos.Line + + for p.tok.Type == token.COMMENT && p.tok.Pos.Line <= endline+n { + var comment *ast.Comment + comment, endline = p.consumeComment() + list = append(list, comment) + } + + // add comment group to the comments list + comments = &ast.CommentGroup{List: list} + p.comments = append(p.comments, comments) + + return +} + +// objectItem parses a single object item +func (p *Parser) objectItem() (*ast.ObjectItem, error) { + defer un(trace(p, "ParseObjectItem")) + + keys, err := p.objectKey() + if len(keys) > 0 && err == errEofToken { + // We ignore eof token here since it is an error if we didn't + // receive a value (but we did receive a key) for the item. + err = nil + } + if len(keys) > 0 && err != nil && p.tok.Type == token.RBRACE { + // This is a strange boolean statement, but what it means is: + // We have keys with no value, and we're likely in an object + // (since RBrace ends an object). For this, we set err to nil so + // we continue and get the error below of having the wrong value + // type. + err = nil + + // Reset the token type so we don't think it completed fine. See + // objectType which uses p.tok.Type to check if we're done with + // the object. + p.tok.Type = token.EOF + } + if err != nil { + return nil, err + } + + o := &ast.ObjectItem{ + Keys: keys, + } + + if p.leadComment != nil { + o.LeadComment = p.leadComment + p.leadComment = nil + } + + switch p.tok.Type { + case token.ASSIGN: + o.Assign = p.tok.Pos + o.Val, err = p.object() + if err != nil { + return nil, err + } + case token.LBRACE: + o.Val, err = p.objectType() + if err != nil { + return nil, err + } + default: + keyStr := make([]string, 0, len(keys)) + for _, k := range keys { + keyStr = append(keyStr, k.Token.Text) + } + + return nil, &PosError{ + Pos: p.tok.Pos, + Err: fmt.Errorf( + "key '%s' expected start of object ('{') or assignment ('=')", + strings.Join(keyStr, " ")), + } + } + + // do a look-ahead for line comment + p.scan() + if len(keys) > 0 && o.Val.Pos().Line == keys[0].Pos().Line && p.lineComment != nil { + o.LineComment = p.lineComment + p.lineComment = nil + } + p.unscan() + return o, nil +} + +// objectKey parses an object key and returns a ObjectKey AST +func (p *Parser) objectKey() ([]*ast.ObjectKey, error) { + keyCount := 0 + keys := make([]*ast.ObjectKey, 0) + + for { + tok := p.scan() + switch tok.Type { + case token.EOF: + // It is very important to also return the keys here as well as + // the error. This is because we need to be able to tell if we + // did parse keys prior to finding the EOF, or if we just found + // a bare EOF. + return keys, errEofToken + case token.ASSIGN: + // assignment or object only, but not nested objects. this is not + // allowed: `foo bar = {}` + if keyCount > 1 { + return nil, &PosError{ + Pos: p.tok.Pos, + Err: fmt.Errorf("nested object expected: LBRACE got: %s", p.tok.Type), + } + } + + if keyCount == 0 { + return nil, &PosError{ + Pos: p.tok.Pos, + Err: errors.New("no object keys found!"), + } + } + + return keys, nil + case token.LBRACE: + var err error + + // If we have no keys, then it is a syntax error. i.e. {{}} is not + // allowed. + if len(keys) == 0 { + err = &PosError{ + Pos: p.tok.Pos, + Err: fmt.Errorf("expected: IDENT | STRING got: %s", p.tok.Type), + } + } + + // object + return keys, err + case token.IDENT, token.STRING: + keyCount++ + keys = append(keys, &ast.ObjectKey{Token: p.tok}) + case token.ILLEGAL: + return keys, &PosError{ + Pos: p.tok.Pos, + Err: fmt.Errorf("illegal character"), + } + default: + return keys, &PosError{ + Pos: p.tok.Pos, + Err: fmt.Errorf("expected: IDENT | STRING | ASSIGN | LBRACE got: %s", p.tok.Type), + } + } + } +} + +// object parses any type of object, such as number, bool, string, object or +// list. +func (p *Parser) object() (ast.Node, error) { + defer un(trace(p, "ParseType")) + tok := p.scan() + + switch tok.Type { + case token.NUMBER, token.FLOAT, token.BOOL, token.STRING, token.HEREDOC: + return p.literalType() + case token.LBRACE: + return p.objectType() + case token.LBRACK: + return p.listType() + case token.COMMENT: + // implement comment + case token.EOF: + return nil, errEofToken + } + + return nil, &PosError{ + Pos: tok.Pos, + Err: fmt.Errorf("Unknown token: %+v", tok), + } +} + +// objectType parses an object type and returns a ObjectType AST +func (p *Parser) objectType() (*ast.ObjectType, error) { + defer un(trace(p, "ParseObjectType")) + + // we assume that the currently scanned token is a LBRACE + o := &ast.ObjectType{ + Lbrace: p.tok.Pos, + } + + l, err := p.objectList(true) + + // if we hit RBRACE, we are good to go (means we parsed all Items), if it's + // not a RBRACE, it's an syntax error and we just return it. + if err != nil && p.tok.Type != token.RBRACE { + return nil, err + } + + // No error, scan and expect the ending to be a brace + if tok := p.scan(); tok.Type != token.RBRACE { + return nil, &PosError{ + Pos: tok.Pos, + Err: fmt.Errorf("object expected closing RBRACE got: %s", tok.Type), + } + } + + o.List = l + o.Rbrace = p.tok.Pos // advanced via parseObjectList + return o, nil +} + +// listType parses a list type and returns a ListType AST +func (p *Parser) listType() (*ast.ListType, error) { + defer un(trace(p, "ParseListType")) + + // we assume that the currently scanned token is a LBRACK + l := &ast.ListType{ + Lbrack: p.tok.Pos, + } + + needComma := false + for { + tok := p.scan() + if needComma { + switch tok.Type { + case token.COMMA, token.RBRACK: + default: + return nil, &PosError{ + Pos: tok.Pos, + Err: fmt.Errorf( + "error parsing list, expected comma or list end, got: %s", + tok.Type), + } + } + } + switch tok.Type { + case token.BOOL, token.NUMBER, token.FLOAT, token.STRING, token.HEREDOC: + node, err := p.literalType() + if err != nil { + return nil, err + } + + // If there is a lead comment, apply it + if p.leadComment != nil { + node.LeadComment = p.leadComment + p.leadComment = nil + } + + l.Add(node) + needComma = true + case token.COMMA: + // get next list item or we are at the end + // do a look-ahead for line comment + p.scan() + if p.lineComment != nil && len(l.List) > 0 { + lit, ok := l.List[len(l.List)-1].(*ast.LiteralType) + if ok { + lit.LineComment = p.lineComment + l.List[len(l.List)-1] = lit + p.lineComment = nil + } + } + p.unscan() + + needComma = false + continue + case token.LBRACE: + // Looks like a nested object, so parse it out + node, err := p.objectType() + if err != nil { + return nil, &PosError{ + Pos: tok.Pos, + Err: fmt.Errorf( + "error while trying to parse object within list: %s", err), + } + } + l.Add(node) + needComma = true + case token.LBRACK: + node, err := p.listType() + if err != nil { + return nil, &PosError{ + Pos: tok.Pos, + Err: fmt.Errorf( + "error while trying to parse list within list: %s", err), + } + } + l.Add(node) + case token.RBRACK: + // finished + l.Rbrack = p.tok.Pos + return l, nil + default: + return nil, &PosError{ + Pos: tok.Pos, + Err: fmt.Errorf("unexpected token while parsing list: %s", tok.Type), + } + } + } +} + +// literalType parses a literal type and returns a LiteralType AST +func (p *Parser) literalType() (*ast.LiteralType, error) { + defer un(trace(p, "ParseLiteral")) + + return &ast.LiteralType{ + Token: p.tok, + }, nil +} + +// scan returns the next token from the underlying scanner. If a token has +// been unscanned then read that instead. In the process, it collects any +// comment groups encountered, and remembers the last lead and line comments. +func (p *Parser) scan() token.Token { + // If we have a token on the buffer, then return it. + if p.n != 0 { + p.n = 0 + return p.tok + } + + // Otherwise read the next token from the scanner and Save it to the buffer + // in case we unscan later. + prev := p.tok + p.tok = p.sc.Scan() + + if p.tok.Type == token.COMMENT { + var comment *ast.CommentGroup + var endline int + + // fmt.Printf("p.tok.Pos.Line = %+v prev: %d endline %d \n", + // p.tok.Pos.Line, prev.Pos.Line, endline) + if p.tok.Pos.Line == prev.Pos.Line { + // The comment is on same line as the previous token; it + // cannot be a lead comment but may be a line comment. + comment, endline = p.consumeCommentGroup(0) + if p.tok.Pos.Line != endline { + // The next token is on a different line, thus + // the last comment group is a line comment. + p.lineComment = comment + } + } + + // consume successor comments, if any + endline = -1 + for p.tok.Type == token.COMMENT { + comment, endline = p.consumeCommentGroup(1) + } + + if endline+1 == p.tok.Pos.Line && p.tok.Type != token.RBRACE { + switch p.tok.Type { + case token.RBRACE, token.RBRACK: + // Do not count for these cases + default: + // The next token is following on the line immediately after the + // comment group, thus the last comment group is a lead comment. + p.leadComment = comment + } + } + + } + + return p.tok +} + +// unscan pushes the previously read token back onto the buffer. +func (p *Parser) unscan() { + p.n = 1 +} + +// ---------------------------------------------------------------------------- +// Parsing support + +func (p *Parser) printTrace(a ...interface{}) { + if !p.enableTrace { + return + } + + const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . " + const n = len(dots) + fmt.Printf("%5d:%3d: ", p.tok.Pos.Line, p.tok.Pos.Column) + + i := 2 * p.indent + for i > n { + fmt.Print(dots) + i -= n + } + // i <= n + fmt.Print(dots[0:i]) + fmt.Println(a...) +} + +func trace(p *Parser, msg string) *Parser { + p.printTrace(msg, "(") + p.indent++ + return p +} + +// Usage pattern: defer un(trace(p, "...")) +func un(p *Parser) { + p.indent-- + p.printTrace(")") +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/nodes.go b/vendor/github.com/hashicorp/hcl/hcl/printer/nodes.go new file mode 100644 index 00000000..c896d584 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/printer/nodes.go @@ -0,0 +1,779 @@ +package printer + +import ( + "bytes" + "fmt" + "sort" + + "github.com/hashicorp/hcl/hcl/ast" + "github.com/hashicorp/hcl/hcl/token" +) + +const ( + blank = byte(' ') + newline = byte('\n') + tab = byte('\t') + infinity = 1 << 30 // offset or line +) + +var ( + unindent = []byte("\uE123") // in the private use space +) + +type printer struct { + cfg Config + prev token.Pos + + comments []*ast.CommentGroup // may be nil, contains all comments + standaloneComments []*ast.CommentGroup // contains all standalone comments (not assigned to any node) + + enableTrace bool + indentTrace int +} + +type ByPosition []*ast.CommentGroup + +func (b ByPosition) Len() int { return len(b) } +func (b ByPosition) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b ByPosition) Less(i, j int) bool { return b[i].Pos().Before(b[j].Pos()) } + +// collectComments comments all standalone comments which are not lead or line +// comment +func (p *printer) collectComments(node ast.Node) { + // first collect all comments. This is already stored in + // ast.File.(comments) + ast.Walk(node, func(nn ast.Node) (ast.Node, bool) { + switch t := nn.(type) { + case *ast.File: + p.comments = t.Comments + return nn, false + } + return nn, true + }) + + standaloneComments := make(map[token.Pos]*ast.CommentGroup, 0) + for _, c := range p.comments { + standaloneComments[c.Pos()] = c + } + + // next remove all lead and line comments from the overall comment map. + // This will give us comments which are standalone, comments which are not + // assigned to any kind of node. + ast.Walk(node, func(nn ast.Node) (ast.Node, bool) { + switch t := nn.(type) { + case *ast.LiteralType: + if t.LeadComment != nil { + for _, comment := range t.LeadComment.List { + if _, ok := standaloneComments[comment.Pos()]; ok { + delete(standaloneComments, comment.Pos()) + } + } + } + + if t.LineComment != nil { + for _, comment := range t.LineComment.List { + if _, ok := standaloneComments[comment.Pos()]; ok { + delete(standaloneComments, comment.Pos()) + } + } + } + case *ast.ObjectItem: + if t.LeadComment != nil { + for _, comment := range t.LeadComment.List { + if _, ok := standaloneComments[comment.Pos()]; ok { + delete(standaloneComments, comment.Pos()) + } + } + } + + if t.LineComment != nil { + for _, comment := range t.LineComment.List { + if _, ok := standaloneComments[comment.Pos()]; ok { + delete(standaloneComments, comment.Pos()) + } + } + } + } + + return nn, true + }) + + for _, c := range standaloneComments { + p.standaloneComments = append(p.standaloneComments, c) + } + + sort.Sort(ByPosition(p.standaloneComments)) +} + +// output prints creates b printable HCL output and returns it. +func (p *printer) output(n interface{}) []byte { + var buf bytes.Buffer + + switch t := n.(type) { + case *ast.File: + // File doesn't trace so we add the tracing here + defer un(trace(p, "File")) + return p.output(t.Node) + case *ast.ObjectList: + defer un(trace(p, "ObjectList")) + + var index int + for { + // Determine the location of the next actual non-comment + // item. If we're at the end, the next item is at "infinity" + var nextItem token.Pos + if index != len(t.Items) { + nextItem = t.Items[index].Pos() + } else { + nextItem = token.Pos{Offset: infinity, Line: infinity} + } + + // Go through the standalone comments in the file and print out + // the comments that we should be for this object item. + for _, c := range p.standaloneComments { + // Go through all the comments in the group. The group + // should be printed together, not separated by double newlines. + printed := false + newlinePrinted := false + for _, comment := range c.List { + // We only care about comments after the previous item + // we've printed so that comments are printed in the + // correct locations (between two objects for example). + // And before the next item. + if comment.Pos().After(p.prev) && comment.Pos().Before(nextItem) { + // if we hit the end add newlines so we can print the comment + // we don't do this if prev is invalid which means the + // beginning of the file since the first comment should + // be at the first line. + if !newlinePrinted && p.prev.IsValid() && index == len(t.Items) { + buf.Write([]byte{newline, newline}) + newlinePrinted = true + } + + // Write the actual comment. + buf.WriteString(comment.Text) + buf.WriteByte(newline) + + // Set printed to true to note that we printed something + printed = true + } + } + + // If we're not at the last item, write a new line so + // that there is a newline separating this comment from + // the next object. + if printed && index != len(t.Items) { + buf.WriteByte(newline) + } + } + + if index == len(t.Items) { + break + } + + buf.Write(p.output(t.Items[index])) + if index != len(t.Items)-1 { + // Always write a newline to separate us from the next item + buf.WriteByte(newline) + + // Need to determine if we're going to separate the next item + // with a blank line. The logic here is simple, though there + // are a few conditions: + // + // 1. The next object is more than one line away anyways, + // so we need an empty line. + // + // 2. The next object is not a "single line" object, so + // we need an empty line. + // + // 3. This current object is not a single line object, + // so we need an empty line. + current := t.Items[index] + next := t.Items[index+1] + if next.Pos().Line != t.Items[index].Pos().Line+1 || + !p.isSingleLineObject(next) || + !p.isSingleLineObject(current) { + buf.WriteByte(newline) + } + } + index++ + } + case *ast.ObjectKey: + buf.WriteString(t.Token.Text) + case *ast.ObjectItem: + p.prev = t.Pos() + buf.Write(p.objectItem(t)) + case *ast.LiteralType: + buf.Write(p.literalType(t)) + case *ast.ListType: + buf.Write(p.list(t)) + case *ast.ObjectType: + buf.Write(p.objectType(t)) + default: + fmt.Printf(" unknown type: %T\n", n) + } + + return buf.Bytes() +} + +func (p *printer) literalType(lit *ast.LiteralType) []byte { + result := []byte(lit.Token.Text) + switch lit.Token.Type { + case token.HEREDOC: + // Clear the trailing newline from heredocs + if result[len(result)-1] == '\n' { + result = result[:len(result)-1] + } + + // Poison lines 2+ so that we don't indent them + result = p.heredocIndent(result) + case token.STRING: + // If this is a multiline string, poison lines 2+ so we don't + // indent them. + if bytes.IndexRune(result, '\n') >= 0 { + result = p.heredocIndent(result) + } + } + + return result +} + +// objectItem returns the printable HCL form of an object item. An object type +// starts with one/multiple keys and has a value. The value might be of any +// type. +func (p *printer) objectItem(o *ast.ObjectItem) []byte { + defer un(trace(p, fmt.Sprintf("ObjectItem: %s", o.Keys[0].Token.Text))) + var buf bytes.Buffer + + if o.LeadComment != nil { + for _, comment := range o.LeadComment.List { + buf.WriteString(comment.Text) + buf.WriteByte(newline) + } + } + + for i, k := range o.Keys { + buf.WriteString(k.Token.Text) + buf.WriteByte(blank) + + // reach end of key + if o.Assign.IsValid() && i == len(o.Keys)-1 && len(o.Keys) == 1 { + buf.WriteString("=") + buf.WriteByte(blank) + } + } + + buf.Write(p.output(o.Val)) + + if o.Val.Pos().Line == o.Keys[0].Pos().Line && o.LineComment != nil { + buf.WriteByte(blank) + for _, comment := range o.LineComment.List { + buf.WriteString(comment.Text) + } + } + + return buf.Bytes() +} + +// objectType returns the printable HCL form of an object type. An object type +// begins with a brace and ends with a brace. +func (p *printer) objectType(o *ast.ObjectType) []byte { + defer un(trace(p, "ObjectType")) + var buf bytes.Buffer + buf.WriteString("{") + + var index int + var nextItem token.Pos + var commented, newlinePrinted bool + for { + // Determine the location of the next actual non-comment + // item. If we're at the end, the next item is the closing brace + if index != len(o.List.Items) { + nextItem = o.List.Items[index].Pos() + } else { + nextItem = o.Rbrace + } + + // Go through the standalone comments in the file and print out + // the comments that we should be for this object item. + for _, c := range p.standaloneComments { + printed := false + var lastCommentPos token.Pos + for _, comment := range c.List { + // We only care about comments after the previous item + // we've printed so that comments are printed in the + // correct locations (between two objects for example). + // And before the next item. + if comment.Pos().After(p.prev) && comment.Pos().Before(nextItem) { + // If there are standalone comments and the initial newline has not + // been printed yet, do it now. + if !newlinePrinted { + newlinePrinted = true + buf.WriteByte(newline) + } + + // add newline if it's between other printed nodes + if index > 0 { + commented = true + buf.WriteByte(newline) + } + + // Store this position + lastCommentPos = comment.Pos() + + // output the comment itself + buf.Write(p.indent(p.heredocIndent([]byte(comment.Text)))) + + // Set printed to true to note that we printed something + printed = true + + /* + if index != len(o.List.Items) { + buf.WriteByte(newline) // do not print on the end + } + */ + } + } + + // Stuff to do if we had comments + if printed { + // Always write a newline + buf.WriteByte(newline) + + // If there is another item in the object and our comment + // didn't hug it directly, then make sure there is a blank + // line separating them. + if nextItem != o.Rbrace && nextItem.Line != lastCommentPos.Line+1 { + buf.WriteByte(newline) + } + } + } + + if index == len(o.List.Items) { + p.prev = o.Rbrace + break + } + + // At this point we are sure that it's not a totally empty block: print + // the initial newline if it hasn't been printed yet by the previous + // block about standalone comments. + if !newlinePrinted { + buf.WriteByte(newline) + newlinePrinted = true + } + + // check if we have adjacent one liner items. If yes we'll going to align + // the comments. + var aligned []*ast.ObjectItem + for _, item := range o.List.Items[index:] { + // we don't group one line lists + if len(o.List.Items) == 1 { + break + } + + // one means a oneliner with out any lead comment + // two means a oneliner with lead comment + // anything else might be something else + cur := lines(string(p.objectItem(item))) + if cur > 2 { + break + } + + curPos := item.Pos() + + nextPos := token.Pos{} + if index != len(o.List.Items)-1 { + nextPos = o.List.Items[index+1].Pos() + } + + prevPos := token.Pos{} + if index != 0 { + prevPos = o.List.Items[index-1].Pos() + } + + // fmt.Println("DEBUG ----------------") + // fmt.Printf("prev = %+v prevPos: %s\n", prev, prevPos) + // fmt.Printf("cur = %+v curPos: %s\n", cur, curPos) + // fmt.Printf("next = %+v nextPos: %s\n", next, nextPos) + + if curPos.Line+1 == nextPos.Line { + aligned = append(aligned, item) + index++ + continue + } + + if curPos.Line-1 == prevPos.Line { + aligned = append(aligned, item) + index++ + + // finish if we have a new line or comment next. This happens + // if the next item is not adjacent + if curPos.Line+1 != nextPos.Line { + break + } + continue + } + + break + } + + // put newlines if the items are between other non aligned items. + // newlines are also added if there is a standalone comment already, so + // check it too + if !commented && index != len(aligned) { + buf.WriteByte(newline) + } + + if len(aligned) >= 1 { + p.prev = aligned[len(aligned)-1].Pos() + + items := p.alignedItems(aligned) + buf.Write(p.indent(items)) + } else { + p.prev = o.List.Items[index].Pos() + + buf.Write(p.indent(p.objectItem(o.List.Items[index]))) + index++ + } + + buf.WriteByte(newline) + } + + buf.WriteString("}") + return buf.Bytes() +} + +func (p *printer) alignedItems(items []*ast.ObjectItem) []byte { + var buf bytes.Buffer + + // find the longest key and value length, needed for alignment + var longestKeyLen int // longest key length + var longestValLen int // longest value length + for _, item := range items { + key := len(item.Keys[0].Token.Text) + val := len(p.output(item.Val)) + + if key > longestKeyLen { + longestKeyLen = key + } + + if val > longestValLen { + longestValLen = val + } + } + + for i, item := range items { + if item.LeadComment != nil { + for _, comment := range item.LeadComment.List { + buf.WriteString(comment.Text) + buf.WriteByte(newline) + } + } + + for i, k := range item.Keys { + keyLen := len(k.Token.Text) + buf.WriteString(k.Token.Text) + for i := 0; i < longestKeyLen-keyLen+1; i++ { + buf.WriteByte(blank) + } + + // reach end of key + if i == len(item.Keys)-1 && len(item.Keys) == 1 { + buf.WriteString("=") + buf.WriteByte(blank) + } + } + + val := p.output(item.Val) + valLen := len(val) + buf.Write(val) + + if item.Val.Pos().Line == item.Keys[0].Pos().Line && item.LineComment != nil { + for i := 0; i < longestValLen-valLen+1; i++ { + buf.WriteByte(blank) + } + + for _, comment := range item.LineComment.List { + buf.WriteString(comment.Text) + } + } + + // do not print for the last item + if i != len(items)-1 { + buf.WriteByte(newline) + } + } + + return buf.Bytes() +} + +// list returns the printable HCL form of an list type. +func (p *printer) list(l *ast.ListType) []byte { + var buf bytes.Buffer + buf.WriteString("[") + + var longestLine int + for _, item := range l.List { + // for now we assume that the list only contains literal types + if lit, ok := item.(*ast.LiteralType); ok { + lineLen := len(lit.Token.Text) + if lineLen > longestLine { + longestLine = lineLen + } + } + } + + insertSpaceBeforeItem := false + lastHadLeadComment := false + for i, item := range l.List { + // Keep track of whether this item is a heredoc since that has + // unique behavior. + heredoc := false + if lit, ok := item.(*ast.LiteralType); ok && lit.Token.Type == token.HEREDOC { + heredoc = true + } + + if item.Pos().Line != l.Lbrack.Line { + // multiline list, add newline before we add each item + buf.WriteByte(newline) + insertSpaceBeforeItem = false + + // If we have a lead comment, then we want to write that first + leadComment := false + if lit, ok := item.(*ast.LiteralType); ok && lit.LeadComment != nil { + leadComment = true + + // If this isn't the first item and the previous element + // didn't have a lead comment, then we need to add an extra + // newline to properly space things out. If it did have a + // lead comment previously then this would be done + // automatically. + if i > 0 && !lastHadLeadComment { + buf.WriteByte(newline) + } + + for _, comment := range lit.LeadComment.List { + buf.Write(p.indent([]byte(comment.Text))) + buf.WriteByte(newline) + } + } + + // also indent each line + val := p.output(item) + curLen := len(val) + buf.Write(p.indent(val)) + + // if this item is a heredoc, then we output the comma on + // the next line. This is the only case this happens. + comma := []byte{','} + if heredoc { + buf.WriteByte(newline) + comma = p.indent(comma) + } + + buf.Write(comma) + + if lit, ok := item.(*ast.LiteralType); ok && lit.LineComment != nil { + // if the next item doesn't have any comments, do not align + buf.WriteByte(blank) // align one space + for i := 0; i < longestLine-curLen; i++ { + buf.WriteByte(blank) + } + + for _, comment := range lit.LineComment.List { + buf.WriteString(comment.Text) + } + } + + lastItem := i == len(l.List)-1 + if lastItem { + buf.WriteByte(newline) + } + + if leadComment && !lastItem { + buf.WriteByte(newline) + } + + lastHadLeadComment = leadComment + } else { + if insertSpaceBeforeItem { + buf.WriteByte(blank) + insertSpaceBeforeItem = false + } + + // Output the item itself + // also indent each line + val := p.output(item) + curLen := len(val) + buf.Write(val) + + // If this is a heredoc item we always have to output a newline + // so that it parses properly. + if heredoc { + buf.WriteByte(newline) + } + + // If this isn't the last element, write a comma. + if i != len(l.List)-1 { + buf.WriteString(",") + insertSpaceBeforeItem = true + } + + if lit, ok := item.(*ast.LiteralType); ok && lit.LineComment != nil { + // if the next item doesn't have any comments, do not align + buf.WriteByte(blank) // align one space + for i := 0; i < longestLine-curLen; i++ { + buf.WriteByte(blank) + } + + for _, comment := range lit.LineComment.List { + buf.WriteString(comment.Text) + } + } + } + + } + + buf.WriteString("]") + return buf.Bytes() +} + +// indent indents the lines of the given buffer for each non-empty line +func (p *printer) indent(buf []byte) []byte { + var prefix []byte + if p.cfg.SpacesWidth != 0 { + for i := 0; i < p.cfg.SpacesWidth; i++ { + prefix = append(prefix, blank) + } + } else { + prefix = []byte{tab} + } + + var res []byte + bol := true + for _, c := range buf { + if bol && c != '\n' { + res = append(res, prefix...) + } + + res = append(res, c) + bol = c == '\n' + } + return res +} + +// unindent removes all the indentation from the tombstoned lines +func (p *printer) unindent(buf []byte) []byte { + var res []byte + for i := 0; i < len(buf); i++ { + skip := len(buf)-i <= len(unindent) + if !skip { + skip = !bytes.Equal(unindent, buf[i:i+len(unindent)]) + } + if skip { + res = append(res, buf[i]) + continue + } + + // We have a marker. we have to backtrace here and clean out + // any whitespace ahead of our tombstone up to a \n + for j := len(res) - 1; j >= 0; j-- { + if res[j] == '\n' { + break + } + + res = res[:j] + } + + // Skip the entire unindent marker + i += len(unindent) - 1 + } + + return res +} + +// heredocIndent marks all the 2nd and further lines as unindentable +func (p *printer) heredocIndent(buf []byte) []byte { + var res []byte + bol := false + for _, c := range buf { + if bol && c != '\n' { + res = append(res, unindent...) + } + res = append(res, c) + bol = c == '\n' + } + return res +} + +// isSingleLineObject tells whether the given object item is a single +// line object such as "obj {}". +// +// A single line object: +// +// * has no lead comments (hence multi-line) +// * has no assignment +// * has no values in the stanza (within {}) +// +func (p *printer) isSingleLineObject(val *ast.ObjectItem) bool { + // If there is a lead comment, can't be one line + if val.LeadComment != nil { + return false + } + + // If there is assignment, we always break by line + if val.Assign.IsValid() { + return false + } + + // If it isn't an object type, then its not a single line object + ot, ok := val.Val.(*ast.ObjectType) + if !ok { + return false + } + + // If the object has no items, it is single line! + return len(ot.List.Items) == 0 +} + +func lines(txt string) int { + endline := 1 + for i := 0; i < len(txt); i++ { + if txt[i] == '\n' { + endline++ + } + } + return endline +} + +// ---------------------------------------------------------------------------- +// Tracing support + +func (p *printer) printTrace(a ...interface{}) { + if !p.enableTrace { + return + } + + const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . " + const n = len(dots) + i := 2 * p.indentTrace + for i > n { + fmt.Print(dots) + i -= n + } + // i <= n + fmt.Print(dots[0:i]) + fmt.Println(a...) +} + +func trace(p *printer, msg string) *printer { + p.printTrace(msg, "(") + p.indentTrace++ + return p +} + +// Usage pattern: defer un(trace(p, "...")) +func un(p *printer) { + p.indentTrace-- + p.printTrace(")") +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/printer.go b/vendor/github.com/hashicorp/hcl/hcl/printer/printer.go new file mode 100644 index 00000000..6617ab8e --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/printer/printer.go @@ -0,0 +1,66 @@ +// Package printer implements printing of AST nodes to HCL format. +package printer + +import ( + "bytes" + "io" + "text/tabwriter" + + "github.com/hashicorp/hcl/hcl/ast" + "github.com/hashicorp/hcl/hcl/parser" +) + +var DefaultConfig = Config{ + SpacesWidth: 2, +} + +// A Config node controls the output of Fprint. +type Config struct { + SpacesWidth int // if set, it will use spaces instead of tabs for alignment +} + +func (c *Config) Fprint(output io.Writer, node ast.Node) error { + p := &printer{ + cfg: *c, + comments: make([]*ast.CommentGroup, 0), + standaloneComments: make([]*ast.CommentGroup, 0), + // enableTrace: true, + } + + p.collectComments(node) + + if _, err := output.Write(p.unindent(p.output(node))); err != nil { + return err + } + + // flush tabwriter, if any + var err error + if tw, _ := output.(*tabwriter.Writer); tw != nil { + err = tw.Flush() + } + + return err +} + +// Fprint "pretty-prints" an HCL node to output +// It calls Config.Fprint with default settings. +func Fprint(output io.Writer, node ast.Node) error { + return DefaultConfig.Fprint(output, node) +} + +// Format formats src HCL and returns the result. +func Format(src []byte) ([]byte, error) { + node, err := parser.Parse(src) + if err != nil { + return nil, err + } + + var buf bytes.Buffer + if err := DefaultConfig.Fprint(&buf, node); err != nil { + return nil, err + } + + // Add trailing newline to result + buf.WriteString("\n") + return buf.Bytes(), nil +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go b/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go new file mode 100644 index 00000000..6601ef76 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go @@ -0,0 +1,651 @@ +// Package scanner implements a scanner for HCL (HashiCorp Configuration +// Language) source text. +package scanner + +import ( + "bytes" + "fmt" + "os" + "regexp" + "unicode" + "unicode/utf8" + + "github.com/hashicorp/hcl/hcl/token" +) + +// eof represents a marker rune for the end of the reader. +const eof = rune(0) + +// Scanner defines a lexical scanner +type Scanner struct { + buf *bytes.Buffer // Source buffer for advancing and scanning + src []byte // Source buffer for immutable access + + // Source Position + srcPos token.Pos // current position + prevPos token.Pos // previous position, used for peek() method + + lastCharLen int // length of last character in bytes + lastLineLen int // length of last line in characters (for correct column reporting) + + tokStart int // token text start position + tokEnd int // token text end position + + // Error is called for each error encountered. If no Error + // function is set, the error is reported to os.Stderr. + Error func(pos token.Pos, msg string) + + // ErrorCount is incremented by one for each error encountered. + ErrorCount int + + // tokPos is the start position of most recently scanned token; set by + // Scan. The Filename field is always left untouched by the Scanner. If + // an error is reported (via Error) and Position is invalid, the scanner is + // not inside a token. + tokPos token.Pos +} + +// New creates and initializes a new instance of Scanner using src as +// its source content. +func New(src []byte) *Scanner { + // even though we accept a src, we read from a io.Reader compatible type + // (*bytes.Buffer). So in the future we might easily change it to streaming + // read. + b := bytes.NewBuffer(src) + s := &Scanner{ + buf: b, + src: src, + } + + // srcPosition always starts with 1 + s.srcPos.Line = 1 + return s +} + +// next reads the next rune from the bufferred reader. Returns the rune(0) if +// an error occurs (or io.EOF is returned). +func (s *Scanner) next() rune { + ch, size, err := s.buf.ReadRune() + if err != nil { + // advance for error reporting + s.srcPos.Column++ + s.srcPos.Offset += size + s.lastCharLen = size + return eof + } + + if ch == utf8.RuneError && size == 1 { + s.srcPos.Column++ + s.srcPos.Offset += size + s.lastCharLen = size + s.err("illegal UTF-8 encoding") + return ch + } + + // remember last position + s.prevPos = s.srcPos + + s.srcPos.Column++ + s.lastCharLen = size + s.srcPos.Offset += size + + if ch == '\n' { + s.srcPos.Line++ + s.lastLineLen = s.srcPos.Column + s.srcPos.Column = 0 + } + + // If we see a null character with data left, then that is an error + if ch == '\x00' && s.buf.Len() > 0 { + s.err("unexpected null character (0x00)") + return eof + } + + // debug + // fmt.Printf("ch: %q, offset:column: %d:%d\n", ch, s.srcPos.Offset, s.srcPos.Column) + return ch +} + +// unread unreads the previous read Rune and updates the source position +func (s *Scanner) unread() { + if err := s.buf.UnreadRune(); err != nil { + panic(err) // this is user fault, we should catch it + } + s.srcPos = s.prevPos // put back last position +} + +// peek returns the next rune without advancing the reader. +func (s *Scanner) peek() rune { + peek, _, err := s.buf.ReadRune() + if err != nil { + return eof + } + + s.buf.UnreadRune() + return peek +} + +// Scan scans the next token and returns the token. +func (s *Scanner) Scan() token.Token { + ch := s.next() + + // skip white space + for isWhitespace(ch) { + ch = s.next() + } + + var tok token.Type + + // token text markings + s.tokStart = s.srcPos.Offset - s.lastCharLen + + // token position, initial next() is moving the offset by one(size of rune + // actually), though we are interested with the starting point + s.tokPos.Offset = s.srcPos.Offset - s.lastCharLen + if s.srcPos.Column > 0 { + // common case: last character was not a '\n' + s.tokPos.Line = s.srcPos.Line + s.tokPos.Column = s.srcPos.Column + } else { + // last character was a '\n' + // (we cannot be at the beginning of the source + // since we have called next() at least once) + s.tokPos.Line = s.srcPos.Line - 1 + s.tokPos.Column = s.lastLineLen + } + + switch { + case isLetter(ch): + tok = token.IDENT + lit := s.scanIdentifier() + if lit == "true" || lit == "false" { + tok = token.BOOL + } + case isDecimal(ch): + tok = s.scanNumber(ch) + default: + switch ch { + case eof: + tok = token.EOF + case '"': + tok = token.STRING + s.scanString() + case '#', '/': + tok = token.COMMENT + s.scanComment(ch) + case '.': + tok = token.PERIOD + ch = s.peek() + if isDecimal(ch) { + tok = token.FLOAT + ch = s.scanMantissa(ch) + ch = s.scanExponent(ch) + } + case '<': + tok = token.HEREDOC + s.scanHeredoc() + case '[': + tok = token.LBRACK + case ']': + tok = token.RBRACK + case '{': + tok = token.LBRACE + case '}': + tok = token.RBRACE + case ',': + tok = token.COMMA + case '=': + tok = token.ASSIGN + case '+': + tok = token.ADD + case '-': + if isDecimal(s.peek()) { + ch := s.next() + tok = s.scanNumber(ch) + } else { + tok = token.SUB + } + default: + s.err("illegal char") + } + } + + // finish token ending + s.tokEnd = s.srcPos.Offset + + // create token literal + var tokenText string + if s.tokStart >= 0 { + tokenText = string(s.src[s.tokStart:s.tokEnd]) + } + s.tokStart = s.tokEnd // ensure idempotency of tokenText() call + + return token.Token{ + Type: tok, + Pos: s.tokPos, + Text: tokenText, + } +} + +func (s *Scanner) scanComment(ch rune) { + // single line comments + if ch == '#' || (ch == '/' && s.peek() != '*') { + if ch == '/' && s.peek() != '/' { + s.err("expected '/' for comment") + return + } + + ch = s.next() + for ch != '\n' && ch >= 0 && ch != eof { + ch = s.next() + } + if ch != eof && ch >= 0 { + s.unread() + } + return + } + + // be sure we get the character after /* This allows us to find comment's + // that are not erminated + if ch == '/' { + s.next() + ch = s.next() // read character after "/*" + } + + // look for /* - style comments + for { + if ch < 0 || ch == eof { + s.err("comment not terminated") + break + } + + ch0 := ch + ch = s.next() + if ch0 == '*' && ch == '/' { + break + } + } +} + +// scanNumber scans a HCL number definition starting with the given rune +func (s *Scanner) scanNumber(ch rune) token.Type { + if ch == '0' { + // check for hexadecimal, octal or float + ch = s.next() + if ch == 'x' || ch == 'X' { + // hexadecimal + ch = s.next() + found := false + for isHexadecimal(ch) { + ch = s.next() + found = true + } + + if !found { + s.err("illegal hexadecimal number") + } + + if ch != eof { + s.unread() + } + + return token.NUMBER + } + + // now it's either something like: 0421(octal) or 0.1231(float) + illegalOctal := false + for isDecimal(ch) { + ch = s.next() + if ch == '8' || ch == '9' { + // this is just a possibility. For example 0159 is illegal, but + // 0159.23 is valid. So we mark a possible illegal octal. If + // the next character is not a period, we'll print the error. + illegalOctal = true + } + } + + if ch == 'e' || ch == 'E' { + ch = s.scanExponent(ch) + return token.FLOAT + } + + if ch == '.' { + ch = s.scanFraction(ch) + + if ch == 'e' || ch == 'E' { + ch = s.next() + ch = s.scanExponent(ch) + } + return token.FLOAT + } + + if illegalOctal { + s.err("illegal octal number") + } + + if ch != eof { + s.unread() + } + return token.NUMBER + } + + s.scanMantissa(ch) + ch = s.next() // seek forward + if ch == 'e' || ch == 'E' { + ch = s.scanExponent(ch) + return token.FLOAT + } + + if ch == '.' { + ch = s.scanFraction(ch) + if ch == 'e' || ch == 'E' { + ch = s.next() + ch = s.scanExponent(ch) + } + return token.FLOAT + } + + if ch != eof { + s.unread() + } + return token.NUMBER +} + +// scanMantissa scans the mantissa beginning from the rune. It returns the next +// non decimal rune. It's used to determine wheter it's a fraction or exponent. +func (s *Scanner) scanMantissa(ch rune) rune { + scanned := false + for isDecimal(ch) { + ch = s.next() + scanned = true + } + + if scanned && ch != eof { + s.unread() + } + return ch +} + +// scanFraction scans the fraction after the '.' rune +func (s *Scanner) scanFraction(ch rune) rune { + if ch == '.' { + ch = s.peek() // we peek just to see if we can move forward + ch = s.scanMantissa(ch) + } + return ch +} + +// scanExponent scans the remaining parts of an exponent after the 'e' or 'E' +// rune. +func (s *Scanner) scanExponent(ch rune) rune { + if ch == 'e' || ch == 'E' { + ch = s.next() + if ch == '-' || ch == '+' { + ch = s.next() + } + ch = s.scanMantissa(ch) + } + return ch +} + +// scanHeredoc scans a heredoc string +func (s *Scanner) scanHeredoc() { + // Scan the second '<' in example: '<= len(identBytes) && identRegexp.Match(s.src[lineStart:s.srcPos.Offset-s.lastCharLen]) { + break + } + + // Not an anchor match, record the start of a new line + lineStart = s.srcPos.Offset + } + + if ch == eof { + s.err("heredoc not terminated") + return + } + } + + return +} + +// scanString scans a quoted string +func (s *Scanner) scanString() { + braces := 0 + for { + // '"' opening already consumed + // read character after quote + ch := s.next() + + if (ch == '\n' && braces == 0) || ch < 0 || ch == eof { + s.err("literal not terminated") + return + } + + if ch == '"' && braces == 0 { + break + } + + // If we're going into a ${} then we can ignore quotes for awhile + if braces == 0 && ch == '$' && s.peek() == '{' { + braces++ + s.next() + } else if braces > 0 && ch == '{' { + braces++ + } + if braces > 0 && ch == '}' { + braces-- + } + + if ch == '\\' { + s.scanEscape() + } + } + + return +} + +// scanEscape scans an escape sequence +func (s *Scanner) scanEscape() rune { + // http://en.cppreference.com/w/cpp/language/escape + ch := s.next() // read character after '/' + switch ch { + case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', '"': + // nothing to do + case '0', '1', '2', '3', '4', '5', '6', '7': + // octal notation + ch = s.scanDigits(ch, 8, 3) + case 'x': + // hexademical notation + ch = s.scanDigits(s.next(), 16, 2) + case 'u': + // universal character name + ch = s.scanDigits(s.next(), 16, 4) + case 'U': + // universal character name + ch = s.scanDigits(s.next(), 16, 8) + default: + s.err("illegal char escape") + } + return ch +} + +// scanDigits scans a rune with the given base for n times. For example an +// octal notation \184 would yield in scanDigits(ch, 8, 3) +func (s *Scanner) scanDigits(ch rune, base, n int) rune { + start := n + for n > 0 && digitVal(ch) < base { + ch = s.next() + if ch == eof { + // If we see an EOF, we halt any more scanning of digits + // immediately. + break + } + + n-- + } + if n > 0 { + s.err("illegal char escape") + } + + if n != start { + // we scanned all digits, put the last non digit char back, + // only if we read anything at all + s.unread() + } + + return ch +} + +// scanIdentifier scans an identifier and returns the literal string +func (s *Scanner) scanIdentifier() string { + offs := s.srcPos.Offset - s.lastCharLen + ch := s.next() + for isLetter(ch) || isDigit(ch) || ch == '-' || ch == '.' { + ch = s.next() + } + + if ch != eof { + s.unread() // we got identifier, put back latest char + } + + return string(s.src[offs:s.srcPos.Offset]) +} + +// recentPosition returns the position of the character immediately after the +// character or token returned by the last call to Scan. +func (s *Scanner) recentPosition() (pos token.Pos) { + pos.Offset = s.srcPos.Offset - s.lastCharLen + switch { + case s.srcPos.Column > 0: + // common case: last character was not a '\n' + pos.Line = s.srcPos.Line + pos.Column = s.srcPos.Column + case s.lastLineLen > 0: + // last character was a '\n' + // (we cannot be at the beginning of the source + // since we have called next() at least once) + pos.Line = s.srcPos.Line - 1 + pos.Column = s.lastLineLen + default: + // at the beginning of the source + pos.Line = 1 + pos.Column = 1 + } + return +} + +// err prints the error of any scanning to s.Error function. If the function is +// not defined, by default it prints them to os.Stderr +func (s *Scanner) err(msg string) { + s.ErrorCount++ + pos := s.recentPosition() + + if s.Error != nil { + s.Error(pos, msg) + return + } + + fmt.Fprintf(os.Stderr, "%s: %s\n", pos, msg) +} + +// isHexadecimal returns true if the given rune is a letter +func isLetter(ch rune) bool { + return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= 0x80 && unicode.IsLetter(ch) +} + +// isDigit returns true if the given rune is a decimal digit +func isDigit(ch rune) bool { + return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch) +} + +// isDecimal returns true if the given rune is a decimal number +func isDecimal(ch rune) bool { + return '0' <= ch && ch <= '9' +} + +// isHexadecimal returns true if the given rune is an hexadecimal number +func isHexadecimal(ch rune) bool { + return '0' <= ch && ch <= '9' || 'a' <= ch && ch <= 'f' || 'A' <= ch && ch <= 'F' +} + +// isWhitespace returns true if the rune is a space, tab, newline or carriage return +func isWhitespace(ch rune) bool { + return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r' +} + +// digitVal returns the integer value of a given octal,decimal or hexadecimal rune +func digitVal(ch rune) int { + switch { + case '0' <= ch && ch <= '9': + return int(ch - '0') + case 'a' <= ch && ch <= 'f': + return int(ch - 'a' + 10) + case 'A' <= ch && ch <= 'F': + return int(ch - 'A' + 10) + } + return 16 // larger than any legal digit val +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go b/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go new file mode 100644 index 00000000..5f981eaa --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go @@ -0,0 +1,241 @@ +package strconv + +import ( + "errors" + "unicode/utf8" +) + +// ErrSyntax indicates that a value does not have the right syntax for the target type. +var ErrSyntax = errors.New("invalid syntax") + +// Unquote interprets s as a single-quoted, double-quoted, +// or backquoted Go string literal, returning the string value +// that s quotes. (If s is single-quoted, it would be a Go +// character literal; Unquote returns the corresponding +// one-character string.) +func Unquote(s string) (t string, err error) { + n := len(s) + if n < 2 { + return "", ErrSyntax + } + quote := s[0] + if quote != s[n-1] { + return "", ErrSyntax + } + s = s[1 : n-1] + + if quote != '"' { + return "", ErrSyntax + } + if !contains(s, '$') && !contains(s, '{') && contains(s, '\n') { + return "", ErrSyntax + } + + // Is it trivial? Avoid allocation. + if !contains(s, '\\') && !contains(s, quote) && !contains(s, '$') { + switch quote { + case '"': + return s, nil + case '\'': + r, size := utf8.DecodeRuneInString(s) + if size == len(s) && (r != utf8.RuneError || size != 1) { + return s, nil + } + } + } + + var runeTmp [utf8.UTFMax]byte + buf := make([]byte, 0, 3*len(s)/2) // Try to avoid more allocations. + for len(s) > 0 { + // If we're starting a '${}' then let it through un-unquoted. + // Specifically: we don't unquote any characters within the `${}` + // section. + if s[0] == '$' && len(s) > 1 && s[1] == '{' { + buf = append(buf, '$', '{') + s = s[2:] + + // Continue reading until we find the closing brace, copying as-is + braces := 1 + for len(s) > 0 && braces > 0 { + r, size := utf8.DecodeRuneInString(s) + if r == utf8.RuneError { + return "", ErrSyntax + } + + s = s[size:] + + n := utf8.EncodeRune(runeTmp[:], r) + buf = append(buf, runeTmp[:n]...) + + switch r { + case '{': + braces++ + case '}': + braces-- + } + } + if braces != 0 { + return "", ErrSyntax + } + if len(s) == 0 { + // If there's no string left, we're done! + break + } else { + // If there's more left, we need to pop back up to the top of the loop + // in case there's another interpolation in this string. + continue + } + } + + if s[0] == '\n' { + return "", ErrSyntax + } + + c, multibyte, ss, err := unquoteChar(s, quote) + if err != nil { + return "", err + } + s = ss + if c < utf8.RuneSelf || !multibyte { + buf = append(buf, byte(c)) + } else { + n := utf8.EncodeRune(runeTmp[:], c) + buf = append(buf, runeTmp[:n]...) + } + if quote == '\'' && len(s) != 0 { + // single-quoted must be single character + return "", ErrSyntax + } + } + return string(buf), nil +} + +// contains reports whether the string contains the byte c. +func contains(s string, c byte) bool { + for i := 0; i < len(s); i++ { + if s[i] == c { + return true + } + } + return false +} + +func unhex(b byte) (v rune, ok bool) { + c := rune(b) + switch { + case '0' <= c && c <= '9': + return c - '0', true + case 'a' <= c && c <= 'f': + return c - 'a' + 10, true + case 'A' <= c && c <= 'F': + return c - 'A' + 10, true + } + return +} + +func unquoteChar(s string, quote byte) (value rune, multibyte bool, tail string, err error) { + // easy cases + switch c := s[0]; { + case c == quote && (quote == '\'' || quote == '"'): + err = ErrSyntax + return + case c >= utf8.RuneSelf: + r, size := utf8.DecodeRuneInString(s) + return r, true, s[size:], nil + case c != '\\': + return rune(s[0]), false, s[1:], nil + } + + // hard case: c is backslash + if len(s) <= 1 { + err = ErrSyntax + return + } + c := s[1] + s = s[2:] + + switch c { + case 'a': + value = '\a' + case 'b': + value = '\b' + case 'f': + value = '\f' + case 'n': + value = '\n' + case 'r': + value = '\r' + case 't': + value = '\t' + case 'v': + value = '\v' + case 'x', 'u', 'U': + n := 0 + switch c { + case 'x': + n = 2 + case 'u': + n = 4 + case 'U': + n = 8 + } + var v rune + if len(s) < n { + err = ErrSyntax + return + } + for j := 0; j < n; j++ { + x, ok := unhex(s[j]) + if !ok { + err = ErrSyntax + return + } + v = v<<4 | x + } + s = s[n:] + if c == 'x' { + // single-byte string, possibly not UTF-8 + value = v + break + } + if v > utf8.MaxRune { + err = ErrSyntax + return + } + value = v + multibyte = true + case '0', '1', '2', '3', '4', '5', '6', '7': + v := rune(c) - '0' + if len(s) < 2 { + err = ErrSyntax + return + } + for j := 0; j < 2; j++ { // one digit already; two more + x := rune(s[j]) - '0' + if x < 0 || x > 7 { + err = ErrSyntax + return + } + v = (v << 3) | x + } + s = s[2:] + if v > 255 { + err = ErrSyntax + return + } + value = v + case '\\': + value = '\\' + case '\'', '"': + if c != quote { + err = ErrSyntax + return + } + value = rune(c) + default: + err = ErrSyntax + return + } + tail = s + return +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/token/position.go b/vendor/github.com/hashicorp/hcl/hcl/token/position.go new file mode 100644 index 00000000..59c1bb72 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/token/position.go @@ -0,0 +1,46 @@ +package token + +import "fmt" + +// Pos describes an arbitrary source position +// including the file, line, and column location. +// A Position is valid if the line number is > 0. +type Pos struct { + Filename string // filename, if any + Offset int // offset, starting at 0 + Line int // line number, starting at 1 + Column int // column number, starting at 1 (character count) +} + +// IsValid returns true if the position is valid. +func (p *Pos) IsValid() bool { return p.Line > 0 } + +// String returns a string in one of several forms: +// +// file:line:column valid position with file name +// line:column valid position without file name +// file invalid position with file name +// - invalid position without file name +func (p Pos) String() string { + s := p.Filename + if p.IsValid() { + if s != "" { + s += ":" + } + s += fmt.Sprintf("%d:%d", p.Line, p.Column) + } + if s == "" { + s = "-" + } + return s +} + +// Before reports whether the position p is before u. +func (p Pos) Before(u Pos) bool { + return u.Offset > p.Offset || u.Line > p.Line +} + +// After reports whether the position p is after u. +func (p Pos) After(u Pos) bool { + return u.Offset < p.Offset || u.Line < p.Line +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/token/token.go b/vendor/github.com/hashicorp/hcl/hcl/token/token.go new file mode 100644 index 00000000..e37c0664 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/token/token.go @@ -0,0 +1,219 @@ +// Package token defines constants representing the lexical tokens for HCL +// (HashiCorp Configuration Language) +package token + +import ( + "fmt" + "strconv" + "strings" + + hclstrconv "github.com/hashicorp/hcl/hcl/strconv" +) + +// Token defines a single HCL token which can be obtained via the Scanner +type Token struct { + Type Type + Pos Pos + Text string + JSON bool +} + +// Type is the set of lexical tokens of the HCL (HashiCorp Configuration Language) +type Type int + +const ( + // Special tokens + ILLEGAL Type = iota + EOF + COMMENT + + identifier_beg + IDENT // literals + literal_beg + NUMBER // 12345 + FLOAT // 123.45 + BOOL // true,false + STRING // "abc" + HEREDOC // < 0 { + // Pop the current item + n := len(frontier) + item := frontier[n-1] + frontier = frontier[:n-1] + + switch v := item.Val.(type) { + case *ast.ObjectType: + items, frontier = flattenObjectType(v, item, items, frontier) + case *ast.ListType: + items, frontier = flattenListType(v, item, items, frontier) + default: + items = append(items, item) + } + } + + // Reverse the list since the frontier model runs things backwards + for i := len(items)/2 - 1; i >= 0; i-- { + opp := len(items) - 1 - i + items[i], items[opp] = items[opp], items[i] + } + + // Done! Set the original items + list.Items = items + return n, true + }) +} + +func flattenListType( + ot *ast.ListType, + item *ast.ObjectItem, + items []*ast.ObjectItem, + frontier []*ast.ObjectItem) ([]*ast.ObjectItem, []*ast.ObjectItem) { + // If the list is empty, keep the original list + if len(ot.List) == 0 { + items = append(items, item) + return items, frontier + } + + // All the elements of this object must also be objects! + for _, subitem := range ot.List { + if _, ok := subitem.(*ast.ObjectType); !ok { + items = append(items, item) + return items, frontier + } + } + + // Great! We have a match go through all the items and flatten + for _, elem := range ot.List { + // Add it to the frontier so that we can recurse + frontier = append(frontier, &ast.ObjectItem{ + Keys: item.Keys, + Assign: item.Assign, + Val: elem, + LeadComment: item.LeadComment, + LineComment: item.LineComment, + }) + } + + return items, frontier +} + +func flattenObjectType( + ot *ast.ObjectType, + item *ast.ObjectItem, + items []*ast.ObjectItem, + frontier []*ast.ObjectItem) ([]*ast.ObjectItem, []*ast.ObjectItem) { + // If the list has no items we do not have to flatten anything + if ot.List.Items == nil { + items = append(items, item) + return items, frontier + } + + // All the elements of this object must also be objects! + for _, subitem := range ot.List.Items { + if _, ok := subitem.Val.(*ast.ObjectType); !ok { + items = append(items, item) + return items, frontier + } + } + + // Great! We have a match go through all the items and flatten + for _, subitem := range ot.List.Items { + // Copy the new key + keys := make([]*ast.ObjectKey, len(item.Keys)+len(subitem.Keys)) + copy(keys, item.Keys) + copy(keys[len(item.Keys):], subitem.Keys) + + // Add it to the frontier so that we can recurse + frontier = append(frontier, &ast.ObjectItem{ + Keys: keys, + Assign: item.Assign, + Val: subitem.Val, + LeadComment: item.LeadComment, + LineComment: item.LineComment, + }) + } + + return items, frontier +} diff --git a/vendor/github.com/hashicorp/hcl/json/parser/parser.go b/vendor/github.com/hashicorp/hcl/json/parser/parser.go new file mode 100644 index 00000000..125a5f07 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/json/parser/parser.go @@ -0,0 +1,313 @@ +package parser + +import ( + "errors" + "fmt" + + "github.com/hashicorp/hcl/hcl/ast" + hcltoken "github.com/hashicorp/hcl/hcl/token" + "github.com/hashicorp/hcl/json/scanner" + "github.com/hashicorp/hcl/json/token" +) + +type Parser struct { + sc *scanner.Scanner + + // Last read token + tok token.Token + commaPrev token.Token + + enableTrace bool + indent int + n int // buffer size (max = 1) +} + +func newParser(src []byte) *Parser { + return &Parser{ + sc: scanner.New(src), + } +} + +// Parse returns the fully parsed source and returns the abstract syntax tree. +func Parse(src []byte) (*ast.File, error) { + p := newParser(src) + return p.Parse() +} + +var errEofToken = errors.New("EOF token found") + +// Parse returns the fully parsed source and returns the abstract syntax tree. +func (p *Parser) Parse() (*ast.File, error) { + f := &ast.File{} + var err, scerr error + p.sc.Error = func(pos token.Pos, msg string) { + scerr = fmt.Errorf("%s: %s", pos, msg) + } + + // The root must be an object in JSON + object, err := p.object() + if scerr != nil { + return nil, scerr + } + if err != nil { + return nil, err + } + + // We make our final node an object list so it is more HCL compatible + f.Node = object.List + + // Flatten it, which finds patterns and turns them into more HCL-like + // AST trees. + flattenObjects(f.Node) + + return f, nil +} + +func (p *Parser) objectList() (*ast.ObjectList, error) { + defer un(trace(p, "ParseObjectList")) + node := &ast.ObjectList{} + + for { + n, err := p.objectItem() + if err == errEofToken { + break // we are finished + } + + // we don't return a nil node, because might want to use already + // collected items. + if err != nil { + return node, err + } + + node.Add(n) + + // Check for a followup comma. If it isn't a comma, then we're done + if tok := p.scan(); tok.Type != token.COMMA { + break + } + } + + return node, nil +} + +// objectItem parses a single object item +func (p *Parser) objectItem() (*ast.ObjectItem, error) { + defer un(trace(p, "ParseObjectItem")) + + keys, err := p.objectKey() + if err != nil { + return nil, err + } + + o := &ast.ObjectItem{ + Keys: keys, + } + + switch p.tok.Type { + case token.COLON: + pos := p.tok.Pos + o.Assign = hcltoken.Pos{ + Filename: pos.Filename, + Offset: pos.Offset, + Line: pos.Line, + Column: pos.Column, + } + + o.Val, err = p.objectValue() + if err != nil { + return nil, err + } + } + + return o, nil +} + +// objectKey parses an object key and returns a ObjectKey AST +func (p *Parser) objectKey() ([]*ast.ObjectKey, error) { + keyCount := 0 + keys := make([]*ast.ObjectKey, 0) + + for { + tok := p.scan() + switch tok.Type { + case token.EOF: + return nil, errEofToken + case token.STRING: + keyCount++ + keys = append(keys, &ast.ObjectKey{ + Token: p.tok.HCLToken(), + }) + case token.COLON: + // If we have a zero keycount it means that we never got + // an object key, i.e. `{ :`. This is a syntax error. + if keyCount == 0 { + return nil, fmt.Errorf("expected: STRING got: %s", p.tok.Type) + } + + // Done + return keys, nil + case token.ILLEGAL: + return nil, errors.New("illegal") + default: + return nil, fmt.Errorf("expected: STRING got: %s", p.tok.Type) + } + } +} + +// object parses any type of object, such as number, bool, string, object or +// list. +func (p *Parser) objectValue() (ast.Node, error) { + defer un(trace(p, "ParseObjectValue")) + tok := p.scan() + + switch tok.Type { + case token.NUMBER, token.FLOAT, token.BOOL, token.NULL, token.STRING: + return p.literalType() + case token.LBRACE: + return p.objectType() + case token.LBRACK: + return p.listType() + case token.EOF: + return nil, errEofToken + } + + return nil, fmt.Errorf("Expected object value, got unknown token: %+v", tok) +} + +// object parses any type of object, such as number, bool, string, object or +// list. +func (p *Parser) object() (*ast.ObjectType, error) { + defer un(trace(p, "ParseType")) + tok := p.scan() + + switch tok.Type { + case token.LBRACE: + return p.objectType() + case token.EOF: + return nil, errEofToken + } + + return nil, fmt.Errorf("Expected object, got unknown token: %+v", tok) +} + +// objectType parses an object type and returns a ObjectType AST +func (p *Parser) objectType() (*ast.ObjectType, error) { + defer un(trace(p, "ParseObjectType")) + + // we assume that the currently scanned token is a LBRACE + o := &ast.ObjectType{} + + l, err := p.objectList() + + // if we hit RBRACE, we are good to go (means we parsed all Items), if it's + // not a RBRACE, it's an syntax error and we just return it. + if err != nil && p.tok.Type != token.RBRACE { + return nil, err + } + + o.List = l + return o, nil +} + +// listType parses a list type and returns a ListType AST +func (p *Parser) listType() (*ast.ListType, error) { + defer un(trace(p, "ParseListType")) + + // we assume that the currently scanned token is a LBRACK + l := &ast.ListType{} + + for { + tok := p.scan() + switch tok.Type { + case token.NUMBER, token.FLOAT, token.STRING: + node, err := p.literalType() + if err != nil { + return nil, err + } + + l.Add(node) + case token.COMMA: + continue + case token.LBRACE: + node, err := p.objectType() + if err != nil { + return nil, err + } + + l.Add(node) + case token.BOOL: + // TODO(arslan) should we support? not supported by HCL yet + case token.LBRACK: + // TODO(arslan) should we support nested lists? Even though it's + // written in README of HCL, it's not a part of the grammar + // (not defined in parse.y) + case token.RBRACK: + // finished + return l, nil + default: + return nil, fmt.Errorf("unexpected token while parsing list: %s", tok.Type) + } + + } +} + +// literalType parses a literal type and returns a LiteralType AST +func (p *Parser) literalType() (*ast.LiteralType, error) { + defer un(trace(p, "ParseLiteral")) + + return &ast.LiteralType{ + Token: p.tok.HCLToken(), + }, nil +} + +// scan returns the next token from the underlying scanner. If a token has +// been unscanned then read that instead. +func (p *Parser) scan() token.Token { + // If we have a token on the buffer, then return it. + if p.n != 0 { + p.n = 0 + return p.tok + } + + p.tok = p.sc.Scan() + return p.tok +} + +// unscan pushes the previously read token back onto the buffer. +func (p *Parser) unscan() { + p.n = 1 +} + +// ---------------------------------------------------------------------------- +// Parsing support + +func (p *Parser) printTrace(a ...interface{}) { + if !p.enableTrace { + return + } + + const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . " + const n = len(dots) + fmt.Printf("%5d:%3d: ", p.tok.Pos.Line, p.tok.Pos.Column) + + i := 2 * p.indent + for i > n { + fmt.Print(dots) + i -= n + } + // i <= n + fmt.Print(dots[0:i]) + fmt.Println(a...) +} + +func trace(p *Parser, msg string) *Parser { + p.printTrace(msg, "(") + p.indent++ + return p +} + +// Usage pattern: defer un(trace(p, "...")) +func un(p *Parser) { + p.indent-- + p.printTrace(")") +} diff --git a/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go b/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go new file mode 100644 index 00000000..fe3f0f09 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go @@ -0,0 +1,451 @@ +package scanner + +import ( + "bytes" + "fmt" + "os" + "unicode" + "unicode/utf8" + + "github.com/hashicorp/hcl/json/token" +) + +// eof represents a marker rune for the end of the reader. +const eof = rune(0) + +// Scanner defines a lexical scanner +type Scanner struct { + buf *bytes.Buffer // Source buffer for advancing and scanning + src []byte // Source buffer for immutable access + + // Source Position + srcPos token.Pos // current position + prevPos token.Pos // previous position, used for peek() method + + lastCharLen int // length of last character in bytes + lastLineLen int // length of last line in characters (for correct column reporting) + + tokStart int // token text start position + tokEnd int // token text end position + + // Error is called for each error encountered. If no Error + // function is set, the error is reported to os.Stderr. + Error func(pos token.Pos, msg string) + + // ErrorCount is incremented by one for each error encountered. + ErrorCount int + + // tokPos is the start position of most recently scanned token; set by + // Scan. The Filename field is always left untouched by the Scanner. If + // an error is reported (via Error) and Position is invalid, the scanner is + // not inside a token. + tokPos token.Pos +} + +// New creates and initializes a new instance of Scanner using src as +// its source content. +func New(src []byte) *Scanner { + // even though we accept a src, we read from a io.Reader compatible type + // (*bytes.Buffer). So in the future we might easily change it to streaming + // read. + b := bytes.NewBuffer(src) + s := &Scanner{ + buf: b, + src: src, + } + + // srcPosition always starts with 1 + s.srcPos.Line = 1 + return s +} + +// next reads the next rune from the bufferred reader. Returns the rune(0) if +// an error occurs (or io.EOF is returned). +func (s *Scanner) next() rune { + ch, size, err := s.buf.ReadRune() + if err != nil { + // advance for error reporting + s.srcPos.Column++ + s.srcPos.Offset += size + s.lastCharLen = size + return eof + } + + if ch == utf8.RuneError && size == 1 { + s.srcPos.Column++ + s.srcPos.Offset += size + s.lastCharLen = size + s.err("illegal UTF-8 encoding") + return ch + } + + // remember last position + s.prevPos = s.srcPos + + s.srcPos.Column++ + s.lastCharLen = size + s.srcPos.Offset += size + + if ch == '\n' { + s.srcPos.Line++ + s.lastLineLen = s.srcPos.Column + s.srcPos.Column = 0 + } + + // debug + // fmt.Printf("ch: %q, offset:column: %d:%d\n", ch, s.srcPos.Offset, s.srcPos.Column) + return ch +} + +// unread unreads the previous read Rune and updates the source position +func (s *Scanner) unread() { + if err := s.buf.UnreadRune(); err != nil { + panic(err) // this is user fault, we should catch it + } + s.srcPos = s.prevPos // put back last position +} + +// peek returns the next rune without advancing the reader. +func (s *Scanner) peek() rune { + peek, _, err := s.buf.ReadRune() + if err != nil { + return eof + } + + s.buf.UnreadRune() + return peek +} + +// Scan scans the next token and returns the token. +func (s *Scanner) Scan() token.Token { + ch := s.next() + + // skip white space + for isWhitespace(ch) { + ch = s.next() + } + + var tok token.Type + + // token text markings + s.tokStart = s.srcPos.Offset - s.lastCharLen + + // token position, initial next() is moving the offset by one(size of rune + // actually), though we are interested with the starting point + s.tokPos.Offset = s.srcPos.Offset - s.lastCharLen + if s.srcPos.Column > 0 { + // common case: last character was not a '\n' + s.tokPos.Line = s.srcPos.Line + s.tokPos.Column = s.srcPos.Column + } else { + // last character was a '\n' + // (we cannot be at the beginning of the source + // since we have called next() at least once) + s.tokPos.Line = s.srcPos.Line - 1 + s.tokPos.Column = s.lastLineLen + } + + switch { + case isLetter(ch): + lit := s.scanIdentifier() + if lit == "true" || lit == "false" { + tok = token.BOOL + } else if lit == "null" { + tok = token.NULL + } else { + s.err("illegal char") + } + case isDecimal(ch): + tok = s.scanNumber(ch) + default: + switch ch { + case eof: + tok = token.EOF + case '"': + tok = token.STRING + s.scanString() + case '.': + tok = token.PERIOD + ch = s.peek() + if isDecimal(ch) { + tok = token.FLOAT + ch = s.scanMantissa(ch) + ch = s.scanExponent(ch) + } + case '[': + tok = token.LBRACK + case ']': + tok = token.RBRACK + case '{': + tok = token.LBRACE + case '}': + tok = token.RBRACE + case ',': + tok = token.COMMA + case ':': + tok = token.COLON + case '-': + if isDecimal(s.peek()) { + ch := s.next() + tok = s.scanNumber(ch) + } else { + s.err("illegal char") + } + default: + s.err("illegal char: " + string(ch)) + } + } + + // finish token ending + s.tokEnd = s.srcPos.Offset + + // create token literal + var tokenText string + if s.tokStart >= 0 { + tokenText = string(s.src[s.tokStart:s.tokEnd]) + } + s.tokStart = s.tokEnd // ensure idempotency of tokenText() call + + return token.Token{ + Type: tok, + Pos: s.tokPos, + Text: tokenText, + } +} + +// scanNumber scans a HCL number definition starting with the given rune +func (s *Scanner) scanNumber(ch rune) token.Type { + zero := ch == '0' + pos := s.srcPos + + s.scanMantissa(ch) + ch = s.next() // seek forward + if ch == 'e' || ch == 'E' { + ch = s.scanExponent(ch) + return token.FLOAT + } + + if ch == '.' { + ch = s.scanFraction(ch) + if ch == 'e' || ch == 'E' { + ch = s.next() + ch = s.scanExponent(ch) + } + return token.FLOAT + } + + if ch != eof { + s.unread() + } + + // If we have a larger number and this is zero, error + if zero && pos != s.srcPos { + s.err("numbers cannot start with 0") + } + + return token.NUMBER +} + +// scanMantissa scans the mantissa beginning from the rune. It returns the next +// non decimal rune. It's used to determine wheter it's a fraction or exponent. +func (s *Scanner) scanMantissa(ch rune) rune { + scanned := false + for isDecimal(ch) { + ch = s.next() + scanned = true + } + + if scanned && ch != eof { + s.unread() + } + return ch +} + +// scanFraction scans the fraction after the '.' rune +func (s *Scanner) scanFraction(ch rune) rune { + if ch == '.' { + ch = s.peek() // we peek just to see if we can move forward + ch = s.scanMantissa(ch) + } + return ch +} + +// scanExponent scans the remaining parts of an exponent after the 'e' or 'E' +// rune. +func (s *Scanner) scanExponent(ch rune) rune { + if ch == 'e' || ch == 'E' { + ch = s.next() + if ch == '-' || ch == '+' { + ch = s.next() + } + ch = s.scanMantissa(ch) + } + return ch +} + +// scanString scans a quoted string +func (s *Scanner) scanString() { + braces := 0 + for { + // '"' opening already consumed + // read character after quote + ch := s.next() + + if ch == '\n' || ch < 0 || ch == eof { + s.err("literal not terminated") + return + } + + if ch == '"' { + break + } + + // If we're going into a ${} then we can ignore quotes for awhile + if braces == 0 && ch == '$' && s.peek() == '{' { + braces++ + s.next() + } else if braces > 0 && ch == '{' { + braces++ + } + if braces > 0 && ch == '}' { + braces-- + } + + if ch == '\\' { + s.scanEscape() + } + } + + return +} + +// scanEscape scans an escape sequence +func (s *Scanner) scanEscape() rune { + // http://en.cppreference.com/w/cpp/language/escape + ch := s.next() // read character after '/' + switch ch { + case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', '"': + // nothing to do + case '0', '1', '2', '3', '4', '5', '6', '7': + // octal notation + ch = s.scanDigits(ch, 8, 3) + case 'x': + // hexademical notation + ch = s.scanDigits(s.next(), 16, 2) + case 'u': + // universal character name + ch = s.scanDigits(s.next(), 16, 4) + case 'U': + // universal character name + ch = s.scanDigits(s.next(), 16, 8) + default: + s.err("illegal char escape") + } + return ch +} + +// scanDigits scans a rune with the given base for n times. For example an +// octal notation \184 would yield in scanDigits(ch, 8, 3) +func (s *Scanner) scanDigits(ch rune, base, n int) rune { + for n > 0 && digitVal(ch) < base { + ch = s.next() + n-- + } + if n > 0 { + s.err("illegal char escape") + } + + // we scanned all digits, put the last non digit char back + s.unread() + return ch +} + +// scanIdentifier scans an identifier and returns the literal string +func (s *Scanner) scanIdentifier() string { + offs := s.srcPos.Offset - s.lastCharLen + ch := s.next() + for isLetter(ch) || isDigit(ch) || ch == '-' { + ch = s.next() + } + + if ch != eof { + s.unread() // we got identifier, put back latest char + } + + return string(s.src[offs:s.srcPos.Offset]) +} + +// recentPosition returns the position of the character immediately after the +// character or token returned by the last call to Scan. +func (s *Scanner) recentPosition() (pos token.Pos) { + pos.Offset = s.srcPos.Offset - s.lastCharLen + switch { + case s.srcPos.Column > 0: + // common case: last character was not a '\n' + pos.Line = s.srcPos.Line + pos.Column = s.srcPos.Column + case s.lastLineLen > 0: + // last character was a '\n' + // (we cannot be at the beginning of the source + // since we have called next() at least once) + pos.Line = s.srcPos.Line - 1 + pos.Column = s.lastLineLen + default: + // at the beginning of the source + pos.Line = 1 + pos.Column = 1 + } + return +} + +// err prints the error of any scanning to s.Error function. If the function is +// not defined, by default it prints them to os.Stderr +func (s *Scanner) err(msg string) { + s.ErrorCount++ + pos := s.recentPosition() + + if s.Error != nil { + s.Error(pos, msg) + return + } + + fmt.Fprintf(os.Stderr, "%s: %s\n", pos, msg) +} + +// isHexadecimal returns true if the given rune is a letter +func isLetter(ch rune) bool { + return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= 0x80 && unicode.IsLetter(ch) +} + +// isHexadecimal returns true if the given rune is a decimal digit +func isDigit(ch rune) bool { + return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch) +} + +// isHexadecimal returns true if the given rune is a decimal number +func isDecimal(ch rune) bool { + return '0' <= ch && ch <= '9' +} + +// isHexadecimal returns true if the given rune is an hexadecimal number +func isHexadecimal(ch rune) bool { + return '0' <= ch && ch <= '9' || 'a' <= ch && ch <= 'f' || 'A' <= ch && ch <= 'F' +} + +// isWhitespace returns true if the rune is a space, tab, newline or carriage return +func isWhitespace(ch rune) bool { + return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r' +} + +// digitVal returns the integer value of a given octal,decimal or hexadecimal rune +func digitVal(ch rune) int { + switch { + case '0' <= ch && ch <= '9': + return int(ch - '0') + case 'a' <= ch && ch <= 'f': + return int(ch - 'a' + 10) + case 'A' <= ch && ch <= 'F': + return int(ch - 'A' + 10) + } + return 16 // larger than any legal digit val +} diff --git a/vendor/github.com/hashicorp/hcl/json/token/position.go b/vendor/github.com/hashicorp/hcl/json/token/position.go new file mode 100644 index 00000000..59c1bb72 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/json/token/position.go @@ -0,0 +1,46 @@ +package token + +import "fmt" + +// Pos describes an arbitrary source position +// including the file, line, and column location. +// A Position is valid if the line number is > 0. +type Pos struct { + Filename string // filename, if any + Offset int // offset, starting at 0 + Line int // line number, starting at 1 + Column int // column number, starting at 1 (character count) +} + +// IsValid returns true if the position is valid. +func (p *Pos) IsValid() bool { return p.Line > 0 } + +// String returns a string in one of several forms: +// +// file:line:column valid position with file name +// line:column valid position without file name +// file invalid position with file name +// - invalid position without file name +func (p Pos) String() string { + s := p.Filename + if p.IsValid() { + if s != "" { + s += ":" + } + s += fmt.Sprintf("%d:%d", p.Line, p.Column) + } + if s == "" { + s = "-" + } + return s +} + +// Before reports whether the position p is before u. +func (p Pos) Before(u Pos) bool { + return u.Offset > p.Offset || u.Line > p.Line +} + +// After reports whether the position p is after u. +func (p Pos) After(u Pos) bool { + return u.Offset < p.Offset || u.Line < p.Line +} diff --git a/vendor/github.com/hashicorp/hcl/json/token/token.go b/vendor/github.com/hashicorp/hcl/json/token/token.go new file mode 100644 index 00000000..95a0c3ee --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/json/token/token.go @@ -0,0 +1,118 @@ +package token + +import ( + "fmt" + "strconv" + + hcltoken "github.com/hashicorp/hcl/hcl/token" +) + +// Token defines a single HCL token which can be obtained via the Scanner +type Token struct { + Type Type + Pos Pos + Text string +} + +// Type is the set of lexical tokens of the HCL (HashiCorp Configuration Language) +type Type int + +const ( + // Special tokens + ILLEGAL Type = iota + EOF + + identifier_beg + literal_beg + NUMBER // 12345 + FLOAT // 123.45 + BOOL // true,false + STRING // "abc" + NULL // null + literal_end + identifier_end + + operator_beg + LBRACK // [ + LBRACE // { + COMMA // , + PERIOD // . + COLON // : + + RBRACK // ] + RBRACE // } + + operator_end +) + +var tokens = [...]string{ + ILLEGAL: "ILLEGAL", + + EOF: "EOF", + + NUMBER: "NUMBER", + FLOAT: "FLOAT", + BOOL: "BOOL", + STRING: "STRING", + NULL: "NULL", + + LBRACK: "LBRACK", + LBRACE: "LBRACE", + COMMA: "COMMA", + PERIOD: "PERIOD", + COLON: "COLON", + + RBRACK: "RBRACK", + RBRACE: "RBRACE", +} + +// String returns the string corresponding to the token tok. +func (t Type) String() string { + s := "" + if 0 <= t && t < Type(len(tokens)) { + s = tokens[t] + } + if s == "" { + s = "token(" + strconv.Itoa(int(t)) + ")" + } + return s +} + +// IsIdentifier returns true for tokens corresponding to identifiers and basic +// type literals; it returns false otherwise. +func (t Type) IsIdentifier() bool { return identifier_beg < t && t < identifier_end } + +// IsLiteral returns true for tokens corresponding to basic type literals; it +// returns false otherwise. +func (t Type) IsLiteral() bool { return literal_beg < t && t < literal_end } + +// IsOperator returns true for tokens corresponding to operators and +// delimiters; it returns false otherwise. +func (t Type) IsOperator() bool { return operator_beg < t && t < operator_end } + +// String returns the token's literal text. Note that this is only +// applicable for certain token types, such as token.IDENT, +// token.STRING, etc.. +func (t Token) String() string { + return fmt.Sprintf("%s %s %s", t.Pos.String(), t.Type.String(), t.Text) +} + +// HCLToken converts this token to an HCL token. +// +// The token type must be a literal type or this will panic. +func (t Token) HCLToken() hcltoken.Token { + switch t.Type { + case BOOL: + return hcltoken.Token{Type: hcltoken.BOOL, Text: t.Text} + case FLOAT: + return hcltoken.Token{Type: hcltoken.FLOAT, Text: t.Text} + case NULL: + return hcltoken.Token{Type: hcltoken.STRING, Text: ""} + case NUMBER: + return hcltoken.Token{Type: hcltoken.NUMBER, Text: t.Text} + case STRING: + return hcltoken.Token{Type: hcltoken.STRING, Text: t.Text, JSON: true} + default: + panic(fmt.Sprintf("unimplemented HCLToken for type: %s", t.Type)) + } +} diff --git a/vendor/github.com/hashicorp/hcl/lex.go b/vendor/github.com/hashicorp/hcl/lex.go new file mode 100644 index 00000000..d9993c29 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/lex.go @@ -0,0 +1,38 @@ +package hcl + +import ( + "unicode" + "unicode/utf8" +) + +type lexModeValue byte + +const ( + lexModeUnknown lexModeValue = iota + lexModeHcl + lexModeJson +) + +// lexMode returns whether we're going to be parsing in JSON +// mode or HCL mode. +func lexMode(v []byte) lexModeValue { + var ( + r rune + w int + offset int + ) + + for { + r, w = utf8.DecodeRune(v[offset:]) + offset += w + if unicode.IsSpace(r) { + continue + } + if r == '{' { + return lexModeJson + } + break + } + + return lexModeHcl +} diff --git a/vendor/github.com/hashicorp/hcl/parse.go b/vendor/github.com/hashicorp/hcl/parse.go new file mode 100644 index 00000000..1fca53c4 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/parse.go @@ -0,0 +1,39 @@ +package hcl + +import ( + "fmt" + + "github.com/hashicorp/hcl/hcl/ast" + hclParser "github.com/hashicorp/hcl/hcl/parser" + jsonParser "github.com/hashicorp/hcl/json/parser" +) + +// ParseBytes accepts as input byte slice and returns ast tree. +// +// Input can be either JSON or HCL +func ParseBytes(in []byte) (*ast.File, error) { + return parse(in) +} + +// ParseString accepts input as a string and returns ast tree. +func ParseString(input string) (*ast.File, error) { + return parse([]byte(input)) +} + +func parse(in []byte) (*ast.File, error) { + switch lexMode(in) { + case lexModeHcl: + return hclParser.Parse(in) + case lexModeJson: + return jsonParser.Parse(in) + } + + return nil, fmt.Errorf("unknown config format") +} + +// Parse parses the given input and returns the root object. +// +// The input format can be either HCL or JSON. +func Parse(input string) (*ast.File, error) { + return parse([]byte(input)) +} diff --git a/vendor/github.com/hashicorp/hcl/testhelper/unix2dos.go b/vendor/github.com/hashicorp/hcl/testhelper/unix2dos.go new file mode 100644 index 00000000..827ac6f1 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/testhelper/unix2dos.go @@ -0,0 +1,15 @@ +package testhelper + +import ( + "runtime" + "strings" +) + +// Converts the line endings when on Windows +func Unix2dos(unix string) string { + if runtime.GOOS != "windows" { + return unix + } + + return strings.Replace(unix, "\n", "\r\n", -1) +} diff --git a/vendor/github.com/kr/fs/LICENSE b/vendor/github.com/kr/fs/LICENSE new file mode 100644 index 00000000..74487567 --- /dev/null +++ b/vendor/github.com/kr/fs/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/kr/fs/filesystem.go b/vendor/github.com/kr/fs/filesystem.go new file mode 100644 index 00000000..f1c4805f --- /dev/null +++ b/vendor/github.com/kr/fs/filesystem.go @@ -0,0 +1,36 @@ +package fs + +import ( + "io/ioutil" + "os" + "path/filepath" +) + +// FileSystem defines the methods of an abstract filesystem. +type FileSystem interface { + + // ReadDir reads the directory named by dirname and returns a + // list of directory entries. + ReadDir(dirname string) ([]os.FileInfo, error) + + // Lstat returns a FileInfo describing the named file. If the file is a + // symbolic link, the returned FileInfo describes the symbolic link. Lstat + // makes no attempt to follow the link. + Lstat(name string) (os.FileInfo, error) + + // Join joins any number of path elements into a single path, adding a + // separator if necessary. The result is Cleaned; in particular, all + // empty strings are ignored. + // + // The separator is FileSystem specific. + Join(elem ...string) string +} + +// fs represents a FileSystem provided by the os package. +type fs struct{} + +func (f *fs) ReadDir(dirname string) ([]os.FileInfo, error) { return ioutil.ReadDir(dirname) } + +func (f *fs) Lstat(name string) (os.FileInfo, error) { return os.Lstat(name) } + +func (f *fs) Join(elem ...string) string { return filepath.Join(elem...) } diff --git a/vendor/github.com/kr/fs/walk.go b/vendor/github.com/kr/fs/walk.go new file mode 100644 index 00000000..6ffa1e0b --- /dev/null +++ b/vendor/github.com/kr/fs/walk.go @@ -0,0 +1,95 @@ +// Package fs provides filesystem-related functions. +package fs + +import ( + "os" +) + +// Walker provides a convenient interface for iterating over the +// descendants of a filesystem path. +// Successive calls to the Step method will step through each +// file or directory in the tree, including the root. The files +// are walked in lexical order, which makes the output deterministic +// but means that for very large directories Walker can be inefficient. +// Walker does not follow symbolic links. +type Walker struct { + fs FileSystem + cur item + stack []item + descend bool +} + +type item struct { + path string + info os.FileInfo + err error +} + +// Walk returns a new Walker rooted at root. +func Walk(root string) *Walker { + return WalkFS(root, new(fs)) +} + +// WalkFS returns a new Walker rooted at root on the FileSystem fs. +func WalkFS(root string, fs FileSystem) *Walker { + info, err := fs.Lstat(root) + return &Walker{ + fs: fs, + stack: []item{{root, info, err}}, + } +} + +// Step advances the Walker to the next file or directory, +// which will then be available through the Path, Stat, +// and Err methods. +// It returns false when the walk stops at the end of the tree. +func (w *Walker) Step() bool { + if w.descend && w.cur.err == nil && w.cur.info.IsDir() { + list, err := w.fs.ReadDir(w.cur.path) + if err != nil { + w.cur.err = err + w.stack = append(w.stack, w.cur) + } else { + for i := len(list) - 1; i >= 0; i-- { + path := w.fs.Join(w.cur.path, list[i].Name()) + w.stack = append(w.stack, item{path, list[i], nil}) + } + } + } + + if len(w.stack) == 0 { + return false + } + i := len(w.stack) - 1 + w.cur = w.stack[i] + w.stack = w.stack[:i] + w.descend = true + return true +} + +// Path returns the path to the most recent file or directory +// visited by a call to Step. It contains the argument to Walk +// as a prefix; that is, if Walk is called with "dir", which is +// a directory containing the file "a", Path will return "dir/a". +func (w *Walker) Path() string { + return w.cur.path +} + +// Stat returns info for the most recent file or directory +// visited by a call to Step. +func (w *Walker) Stat() os.FileInfo { + return w.cur.info +} + +// Err returns the error, if any, for the most recent attempt +// by Step to visit a file or directory. If a directory has +// an error, w will not descend into that directory. +func (w *Walker) Err() error { + return w.cur.err +} + +// SkipDir causes the currently visited directory to be skipped. +// If w is not on a directory, SkipDir has no effect. +func (w *Walker) SkipDir() { + w.descend = false +} diff --git a/vendor/github.com/magiconair/properties/LICENSE b/vendor/github.com/magiconair/properties/LICENSE new file mode 100644 index 00000000..b387087c --- /dev/null +++ b/vendor/github.com/magiconair/properties/LICENSE @@ -0,0 +1,25 @@ +goproperties - properties file decoder for Go + +Copyright (c) 2013-2018 - Frank Schroeder + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/magiconair/properties/assert/assert.go b/vendor/github.com/magiconair/properties/assert/assert.go new file mode 100644 index 00000000..cb1097ba --- /dev/null +++ b/vendor/github.com/magiconair/properties/assert/assert.go @@ -0,0 +1,90 @@ +// Copyright 2017 Frank Schroeder. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package assert provides helper functions for testing. +package assert + +import ( + "fmt" + "path/filepath" + "reflect" + "regexp" + "runtime" + "strings" + "testing" +) + +// skip defines the default call depth +const skip = 2 + +// Equal asserts that got and want are equal as defined by +// reflect.DeepEqual. The test fails with msg if they are not equal. +func Equal(t *testing.T, got, want interface{}, msg ...string) { + if x := equal(2, got, want, msg...); x != "" { + fmt.Println(x) + t.Fail() + } +} + +func equal(skip int, got, want interface{}, msg ...string) string { + if !reflect.DeepEqual(got, want) { + return fail(skip, "got %v want %v %s", got, want, strings.Join(msg, " ")) + } + return "" +} + +// Panic asserts that function fn() panics. +// It assumes that recover() either returns a string or +// an error and fails if the message does not match +// the regular expression in 'matches'. +func Panic(t *testing.T, fn func(), matches string) { + if x := doesPanic(2, fn, matches); x != "" { + fmt.Println(x) + t.Fail() + } +} + +func doesPanic(skip int, fn func(), expr string) (err string) { + defer func() { + r := recover() + if r == nil { + err = fail(skip, "did not panic") + return + } + var v string + switch r.(type) { + case error: + v = r.(error).Error() + case string: + v = r.(string) + } + err = matches(skip, v, expr) + }() + fn() + return "" +} + +// Matches asserts that a value matches a given regular expression. +func Matches(t *testing.T, value, expr string) { + if x := matches(2, value, expr); x != "" { + fmt.Println(x) + t.Fail() + } +} + +func matches(skip int, value, expr string) string { + ok, err := regexp.MatchString(expr, value) + if err != nil { + return fail(skip, "invalid pattern %q. %s", expr, err) + } + if !ok { + return fail(skip, "got %s which does not match %s", value, expr) + } + return "" +} + +func fail(skip int, format string, args ...interface{}) string { + _, file, line, _ := runtime.Caller(skip) + return fmt.Sprintf("\t%s:%d: %s\n", filepath.Base(file), line, fmt.Sprintf(format, args...)) +} diff --git a/vendor/github.com/magiconair/properties/decode.go b/vendor/github.com/magiconair/properties/decode.go new file mode 100644 index 00000000..0a961bb0 --- /dev/null +++ b/vendor/github.com/magiconair/properties/decode.go @@ -0,0 +1,289 @@ +// Copyright 2017 Frank Schroeder. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package properties + +import ( + "fmt" + "reflect" + "strconv" + "strings" + "time" +) + +// Decode assigns property values to exported fields of a struct. +// +// Decode traverses v recursively and returns an error if a value cannot be +// converted to the field type or a required value is missing for a field. +// +// The following type dependent decodings are used: +// +// String, boolean, numeric fields have the value of the property key assigned. +// The property key name is the name of the field. A different key and a default +// value can be set in the field's tag. Fields without default value are +// required. If the value cannot be converted to the field type an error is +// returned. +// +// time.Duration fields have the result of time.ParseDuration() assigned. +// +// time.Time fields have the vaule of time.Parse() assigned. The default layout +// is time.RFC3339 but can be set in the field's tag. +// +// Arrays and slices of string, boolean, numeric, time.Duration and time.Time +// fields have the value interpreted as a comma separated list of values. The +// individual values are trimmed of whitespace and empty values are ignored. A +// default value can be provided as a semicolon separated list in the field's +// tag. +// +// Struct fields are decoded recursively using the field name plus "." as +// prefix. The prefix (without dot) can be overridden in the field's tag. +// Default values are not supported in the field's tag. Specify them on the +// fields of the inner struct instead. +// +// Map fields must have a key of type string and are decoded recursively by +// using the field's name plus ".' as prefix and the next element of the key +// name as map key. The prefix (without dot) can be overridden in the field's +// tag. Default values are not supported. +// +// Examples: +// +// // Field is ignored. +// Field int `properties:"-"` +// +// // Field is assigned value of 'Field'. +// Field int +// +// // Field is assigned value of 'myName'. +// Field int `properties:"myName"` +// +// // Field is assigned value of key 'myName' and has a default +// // value 15 if the key does not exist. +// Field int `properties:"myName,default=15"` +// +// // Field is assigned value of key 'Field' and has a default +// // value 15 if the key does not exist. +// Field int `properties:",default=15"` +// +// // Field is assigned value of key 'date' and the date +// // is in format 2006-01-02 +// Field time.Time `properties:"date,layout=2006-01-02"` +// +// // Field is assigned the non-empty and whitespace trimmed +// // values of key 'Field' split by commas. +// Field []string +// +// // Field is assigned the non-empty and whitespace trimmed +// // values of key 'Field' split by commas and has a default +// // value ["a", "b", "c"] if the key does not exist. +// Field []string `properties:",default=a;b;c"` +// +// // Field is decoded recursively with "Field." as key prefix. +// Field SomeStruct +// +// // Field is decoded recursively with "myName." as key prefix. +// Field SomeStruct `properties:"myName"` +// +// // Field is decoded recursively with "Field." as key prefix +// // and the next dotted element of the key as map key. +// Field map[string]string +// +// // Field is decoded recursively with "myName." as key prefix +// // and the next dotted element of the key as map key. +// Field map[string]string `properties:"myName"` +func (p *Properties) Decode(x interface{}) error { + t, v := reflect.TypeOf(x), reflect.ValueOf(x) + if t.Kind() != reflect.Ptr || v.Elem().Type().Kind() != reflect.Struct { + return fmt.Errorf("not a pointer to struct: %s", t) + } + if err := dec(p, "", nil, nil, v); err != nil { + return err + } + return nil +} + +func dec(p *Properties, key string, def *string, opts map[string]string, v reflect.Value) error { + t := v.Type() + + // value returns the property value for key or the default if provided. + value := func() (string, error) { + if val, ok := p.Get(key); ok { + return val, nil + } + if def != nil { + return *def, nil + } + return "", fmt.Errorf("missing required key %s", key) + } + + // conv converts a string to a value of the given type. + conv := func(s string, t reflect.Type) (val reflect.Value, err error) { + var v interface{} + + switch { + case isDuration(t): + v, err = time.ParseDuration(s) + + case isTime(t): + layout := opts["layout"] + if layout == "" { + layout = time.RFC3339 + } + v, err = time.Parse(layout, s) + + case isBool(t): + v, err = boolVal(s), nil + + case isString(t): + v, err = s, nil + + case isFloat(t): + v, err = strconv.ParseFloat(s, 64) + + case isInt(t): + v, err = strconv.ParseInt(s, 10, 64) + + case isUint(t): + v, err = strconv.ParseUint(s, 10, 64) + + default: + return reflect.Zero(t), fmt.Errorf("unsupported type %s", t) + } + if err != nil { + return reflect.Zero(t), err + } + return reflect.ValueOf(v).Convert(t), nil + } + + // keydef returns the property key and the default value based on the + // name of the struct field and the options in the tag. + keydef := func(f reflect.StructField) (string, *string, map[string]string) { + _key, _opts := parseTag(f.Tag.Get("properties")) + + var _def *string + if d, ok := _opts["default"]; ok { + _def = &d + } + if _key != "" { + return _key, _def, _opts + } + return f.Name, _def, _opts + } + + switch { + case isDuration(t) || isTime(t) || isBool(t) || isString(t) || isFloat(t) || isInt(t) || isUint(t): + s, err := value() + if err != nil { + return err + } + val, err := conv(s, t) + if err != nil { + return err + } + v.Set(val) + + case isPtr(t): + return dec(p, key, def, opts, v.Elem()) + + case isStruct(t): + for i := 0; i < v.NumField(); i++ { + fv := v.Field(i) + fk, def, opts := keydef(t.Field(i)) + if !fv.CanSet() { + return fmt.Errorf("cannot set %s", t.Field(i).Name) + } + if fk == "-" { + continue + } + if key != "" { + fk = key + "." + fk + } + if err := dec(p, fk, def, opts, fv); err != nil { + return err + } + } + return nil + + case isArray(t): + val, err := value() + if err != nil { + return err + } + vals := split(val, ";") + a := reflect.MakeSlice(t, 0, len(vals)) + for _, s := range vals { + val, err := conv(s, t.Elem()) + if err != nil { + return err + } + a = reflect.Append(a, val) + } + v.Set(a) + + case isMap(t): + valT := t.Elem() + m := reflect.MakeMap(t) + for postfix := range p.FilterStripPrefix(key + ".").m { + pp := strings.SplitN(postfix, ".", 2) + mk, mv := pp[0], reflect.New(valT) + if err := dec(p, key+"."+mk, nil, nil, mv); err != nil { + return err + } + m.SetMapIndex(reflect.ValueOf(mk), mv.Elem()) + } + v.Set(m) + + default: + return fmt.Errorf("unsupported type %s", t) + } + return nil +} + +// split splits a string on sep, trims whitespace of elements +// and omits empty elements +func split(s string, sep string) []string { + var a []string + for _, v := range strings.Split(s, sep) { + if v = strings.TrimSpace(v); v != "" { + a = append(a, v) + } + } + return a +} + +// parseTag parses a "key,k=v,k=v,..." +func parseTag(tag string) (key string, opts map[string]string) { + opts = map[string]string{} + for i, s := range strings.Split(tag, ",") { + if i == 0 { + key = s + continue + } + + pp := strings.SplitN(s, "=", 2) + if len(pp) == 1 { + opts[pp[0]] = "" + } else { + opts[pp[0]] = pp[1] + } + } + return key, opts +} + +func isArray(t reflect.Type) bool { return t.Kind() == reflect.Array || t.Kind() == reflect.Slice } +func isBool(t reflect.Type) bool { return t.Kind() == reflect.Bool } +func isDuration(t reflect.Type) bool { return t == reflect.TypeOf(time.Second) } +func isMap(t reflect.Type) bool { return t.Kind() == reflect.Map } +func isPtr(t reflect.Type) bool { return t.Kind() == reflect.Ptr } +func isString(t reflect.Type) bool { return t.Kind() == reflect.String } +func isStruct(t reflect.Type) bool { return t.Kind() == reflect.Struct } +func isTime(t reflect.Type) bool { return t == reflect.TypeOf(time.Time{}) } +func isFloat(t reflect.Type) bool { + return t.Kind() == reflect.Float32 || t.Kind() == reflect.Float64 +} +func isInt(t reflect.Type) bool { + return t.Kind() == reflect.Int || t.Kind() == reflect.Int8 || t.Kind() == reflect.Int16 || t.Kind() == reflect.Int32 || t.Kind() == reflect.Int64 +} +func isUint(t reflect.Type) bool { + return t.Kind() == reflect.Uint || t.Kind() == reflect.Uint8 || t.Kind() == reflect.Uint16 || t.Kind() == reflect.Uint32 || t.Kind() == reflect.Uint64 +} diff --git a/vendor/github.com/magiconair/properties/doc.go b/vendor/github.com/magiconair/properties/doc.go new file mode 100644 index 00000000..36c83680 --- /dev/null +++ b/vendor/github.com/magiconair/properties/doc.go @@ -0,0 +1,156 @@ +// Copyright 2017 Frank Schroeder. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package properties provides functions for reading and writing +// ISO-8859-1 and UTF-8 encoded .properties files and has +// support for recursive property expansion. +// +// Java properties files are ISO-8859-1 encoded and use Unicode +// literals for characters outside the ISO character set. Unicode +// literals can be used in UTF-8 encoded properties files but +// aren't necessary. +// +// To load a single properties file use MustLoadFile(): +// +// p := properties.MustLoadFile(filename, properties.UTF8) +// +// To load multiple properties files use MustLoadFiles() +// which loads the files in the given order and merges the +// result. Missing properties files can be ignored if the +// 'ignoreMissing' flag is set to true. +// +// Filenames can contain environment variables which are expanded +// before loading. +// +// f1 := "/etc/myapp/myapp.conf" +// f2 := "/home/${USER}/myapp.conf" +// p := MustLoadFiles([]string{f1, f2}, properties.UTF8, true) +// +// All of the different key/value delimiters ' ', ':' and '=' are +// supported as well as the comment characters '!' and '#' and +// multi-line values. +// +// ! this is a comment +// # and so is this +// +// # the following expressions are equal +// key value +// key=value +// key:value +// key = value +// key : value +// key = val\ +// ue +// +// Properties stores all comments preceding a key and provides +// GetComments() and SetComments() methods to retrieve and +// update them. The convenience functions GetComment() and +// SetComment() allow access to the last comment. The +// WriteComment() method writes properties files including +// the comments and with the keys in the original order. +// This can be used for sanitizing properties files. +// +// Property expansion is recursive and circular references +// and malformed expressions are not allowed and cause an +// error. Expansion of environment variables is supported. +// +// # standard property +// key = value +// +// # property expansion: key2 = value +// key2 = ${key} +// +// # recursive expansion: key3 = value +// key3 = ${key2} +// +// # circular reference (error) +// key = ${key} +// +// # malformed expression (error) +// key = ${ke +// +// # refers to the users' home dir +// home = ${HOME} +// +// # local key takes precendence over env var: u = foo +// USER = foo +// u = ${USER} +// +// The default property expansion format is ${key} but can be +// changed by setting different pre- and postfix values on the +// Properties object. +// +// p := properties.NewProperties() +// p.Prefix = "#[" +// p.Postfix = "]#" +// +// Properties provides convenience functions for getting typed +// values with default values if the key does not exist or the +// type conversion failed. +// +// # Returns true if the value is either "1", "on", "yes" or "true" +// # Returns false for every other value and the default value if +// # the key does not exist. +// v = p.GetBool("key", false) +// +// # Returns the value if the key exists and the format conversion +// # was successful. Otherwise, the default value is returned. +// v = p.GetInt64("key", 999) +// v = p.GetUint64("key", 999) +// v = p.GetFloat64("key", 123.0) +// v = p.GetString("key", "def") +// v = p.GetDuration("key", 999) +// +// As an alterantive properties may be applied with the standard +// library's flag implementation at any time. +// +// # Standard configuration +// v = flag.Int("key", 999, "help message") +// flag.Parse() +// +// # Merge p into the flag set +// p.MustFlag(flag.CommandLine) +// +// Properties provides several MustXXX() convenience functions +// which will terminate the app if an error occurs. The behavior +// of the failure is configurable and the default is to call +// log.Fatal(err). To have the MustXXX() functions panic instead +// of logging the error set a different ErrorHandler before +// you use the Properties package. +// +// properties.ErrorHandler = properties.PanicHandler +// +// # Will panic instead of logging an error +// p := properties.MustLoadFile("config.properties") +// +// You can also provide your own ErrorHandler function. The only requirement +// is that the error handler function must exit after handling the error. +// +// properties.ErrorHandler = func(err error) { +// fmt.Println(err) +// os.Exit(1) +// } +// +// # Will write to stdout and then exit +// p := properties.MustLoadFile("config.properties") +// +// Properties can also be loaded into a struct via the `Decode` +// method, e.g. +// +// type S struct { +// A string `properties:"a,default=foo"` +// D time.Duration `properties:"timeout,default=5s"` +// E time.Time `properties:"expires,layout=2006-01-02,default=2015-01-01"` +// } +// +// See `Decode()` method for the full documentation. +// +// The following documents provide a description of the properties +// file format. +// +// http://en.wikipedia.org/wiki/.properties +// +// http://docs.oracle.com/javase/7/docs/api/java/util/Properties.html#load%28java.io.Reader%29 +// +package properties diff --git a/vendor/github.com/magiconair/properties/integrate.go b/vendor/github.com/magiconair/properties/integrate.go new file mode 100644 index 00000000..0d775e03 --- /dev/null +++ b/vendor/github.com/magiconair/properties/integrate.go @@ -0,0 +1,34 @@ +// Copyright 2017 Frank Schroeder. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package properties + +import "flag" + +// MustFlag sets flags that are skipped by dst.Parse when p contains +// the respective key for flag.Flag.Name. +// +// It's use is recommended with command line arguments as in: +// flag.Parse() +// p.MustFlag(flag.CommandLine) +func (p *Properties) MustFlag(dst *flag.FlagSet) { + m := make(map[string]*flag.Flag) + dst.VisitAll(func(f *flag.Flag) { + m[f.Name] = f + }) + dst.Visit(func(f *flag.Flag) { + delete(m, f.Name) // overridden + }) + + for name, f := range m { + v, ok := p.Get(name) + if !ok { + continue + } + + if err := f.Value.Set(v); err != nil { + ErrorHandler(err) + } + } +} diff --git a/vendor/github.com/magiconair/properties/lex.go b/vendor/github.com/magiconair/properties/lex.go new file mode 100644 index 00000000..c63fcc60 --- /dev/null +++ b/vendor/github.com/magiconair/properties/lex.go @@ -0,0 +1,407 @@ +// Copyright 2017 Frank Schroeder. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// +// Parts of the lexer are from the template/text/parser package +// For these parts the following applies: +// +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file of the go 1.2 +// distribution. + +package properties + +import ( + "fmt" + "strconv" + "strings" + "unicode/utf8" +) + +// item represents a token or text string returned from the scanner. +type item struct { + typ itemType // The type of this item. + pos int // The starting position, in bytes, of this item in the input string. + val string // The value of this item. +} + +func (i item) String() string { + switch { + case i.typ == itemEOF: + return "EOF" + case i.typ == itemError: + return i.val + case len(i.val) > 10: + return fmt.Sprintf("%.10q...", i.val) + } + return fmt.Sprintf("%q", i.val) +} + +// itemType identifies the type of lex items. +type itemType int + +const ( + itemError itemType = iota // error occurred; value is text of error + itemEOF + itemKey // a key + itemValue // a value + itemComment // a comment +) + +// defines a constant for EOF +const eof = -1 + +// permitted whitespace characters space, FF and TAB +const whitespace = " \f\t" + +// stateFn represents the state of the scanner as a function that returns the next state. +type stateFn func(*lexer) stateFn + +// lexer holds the state of the scanner. +type lexer struct { + input string // the string being scanned + state stateFn // the next lexing function to enter + pos int // current position in the input + start int // start position of this item + width int // width of last rune read from input + lastPos int // position of most recent item returned by nextItem + runes []rune // scanned runes for this item + items chan item // channel of scanned items +} + +// next returns the next rune in the input. +func (l *lexer) next() rune { + if l.pos >= len(l.input) { + l.width = 0 + return eof + } + r, w := utf8.DecodeRuneInString(l.input[l.pos:]) + l.width = w + l.pos += l.width + return r +} + +// peek returns but does not consume the next rune in the input. +func (l *lexer) peek() rune { + r := l.next() + l.backup() + return r +} + +// backup steps back one rune. Can only be called once per call of next. +func (l *lexer) backup() { + l.pos -= l.width +} + +// emit passes an item back to the client. +func (l *lexer) emit(t itemType) { + i := item{t, l.start, string(l.runes)} + l.items <- i + l.start = l.pos + l.runes = l.runes[:0] +} + +// ignore skips over the pending input before this point. +func (l *lexer) ignore() { + l.start = l.pos +} + +// appends the rune to the current value +func (l *lexer) appendRune(r rune) { + l.runes = append(l.runes, r) +} + +// accept consumes the next rune if it's from the valid set. +func (l *lexer) accept(valid string) bool { + if strings.ContainsRune(valid, l.next()) { + return true + } + l.backup() + return false +} + +// acceptRun consumes a run of runes from the valid set. +func (l *lexer) acceptRun(valid string) { + for strings.ContainsRune(valid, l.next()) { + } + l.backup() +} + +// acceptRunUntil consumes a run of runes up to a terminator. +func (l *lexer) acceptRunUntil(term rune) { + for term != l.next() { + } + l.backup() +} + +// hasText returns true if the current parsed text is not empty. +func (l *lexer) isNotEmpty() bool { + return l.pos > l.start +} + +// lineNumber reports which line we're on, based on the position of +// the previous item returned by nextItem. Doing it this way +// means we don't have to worry about peek double counting. +func (l *lexer) lineNumber() int { + return 1 + strings.Count(l.input[:l.lastPos], "\n") +} + +// errorf returns an error token and terminates the scan by passing +// back a nil pointer that will be the next state, terminating l.nextItem. +func (l *lexer) errorf(format string, args ...interface{}) stateFn { + l.items <- item{itemError, l.start, fmt.Sprintf(format, args...)} + return nil +} + +// nextItem returns the next item from the input. +func (l *lexer) nextItem() item { + i := <-l.items + l.lastPos = i.pos + return i +} + +// lex creates a new scanner for the input string. +func lex(input string) *lexer { + l := &lexer{ + input: input, + items: make(chan item), + runes: make([]rune, 0, 32), + } + go l.run() + return l +} + +// run runs the state machine for the lexer. +func (l *lexer) run() { + for l.state = lexBeforeKey(l); l.state != nil; { + l.state = l.state(l) + } +} + +// state functions + +// lexBeforeKey scans until a key begins. +func lexBeforeKey(l *lexer) stateFn { + switch r := l.next(); { + case isEOF(r): + l.emit(itemEOF) + return nil + + case isEOL(r): + l.ignore() + return lexBeforeKey + + case isComment(r): + return lexComment + + case isWhitespace(r): + l.ignore() + return lexBeforeKey + + default: + l.backup() + return lexKey + } +} + +// lexComment scans a comment line. The comment character has already been scanned. +func lexComment(l *lexer) stateFn { + l.acceptRun(whitespace) + l.ignore() + for { + switch r := l.next(); { + case isEOF(r): + l.ignore() + l.emit(itemEOF) + return nil + case isEOL(r): + l.emit(itemComment) + return lexBeforeKey + default: + l.appendRune(r) + } + } +} + +// lexKey scans the key up to a delimiter +func lexKey(l *lexer) stateFn { + var r rune + +Loop: + for { + switch r = l.next(); { + + case isEscape(r): + err := l.scanEscapeSequence() + if err != nil { + return l.errorf(err.Error()) + } + + case isEndOfKey(r): + l.backup() + break Loop + + case isEOF(r): + break Loop + + default: + l.appendRune(r) + } + } + + if len(l.runes) > 0 { + l.emit(itemKey) + } + + if isEOF(r) { + l.emit(itemEOF) + return nil + } + + return lexBeforeValue +} + +// lexBeforeValue scans the delimiter between key and value. +// Leading and trailing whitespace is ignored. +// We expect to be just after the key. +func lexBeforeValue(l *lexer) stateFn { + l.acceptRun(whitespace) + l.accept(":=") + l.acceptRun(whitespace) + l.ignore() + return lexValue +} + +// lexValue scans text until the end of the line. We expect to be just after the delimiter. +func lexValue(l *lexer) stateFn { + for { + switch r := l.next(); { + case isEscape(r): + if isEOL(l.peek()) { + l.next() + l.acceptRun(whitespace) + } else { + err := l.scanEscapeSequence() + if err != nil { + return l.errorf(err.Error()) + } + } + + case isEOL(r): + l.emit(itemValue) + l.ignore() + return lexBeforeKey + + case isEOF(r): + l.emit(itemValue) + l.emit(itemEOF) + return nil + + default: + l.appendRune(r) + } + } +} + +// scanEscapeSequence scans either one of the escaped characters +// or a unicode literal. We expect to be after the escape character. +func (l *lexer) scanEscapeSequence() error { + switch r := l.next(); { + + case isEscapedCharacter(r): + l.appendRune(decodeEscapedCharacter(r)) + return nil + + case atUnicodeLiteral(r): + return l.scanUnicodeLiteral() + + case isEOF(r): + return fmt.Errorf("premature EOF") + + // silently drop the escape character and append the rune as is + default: + l.appendRune(r) + return nil + } +} + +// scans a unicode literal in the form \uXXXX. We expect to be after the \u. +func (l *lexer) scanUnicodeLiteral() error { + // scan the digits + d := make([]rune, 4) + for i := 0; i < 4; i++ { + d[i] = l.next() + if d[i] == eof || !strings.ContainsRune("0123456789abcdefABCDEF", d[i]) { + return fmt.Errorf("invalid unicode literal") + } + } + + // decode the digits into a rune + r, err := strconv.ParseInt(string(d), 16, 0) + if err != nil { + return err + } + + l.appendRune(rune(r)) + return nil +} + +// decodeEscapedCharacter returns the unescaped rune. We expect to be after the escape character. +func decodeEscapedCharacter(r rune) rune { + switch r { + case 'f': + return '\f' + case 'n': + return '\n' + case 'r': + return '\r' + case 't': + return '\t' + default: + return r + } +} + +// atUnicodeLiteral reports whether we are at a unicode literal. +// The escape character has already been consumed. +func atUnicodeLiteral(r rune) bool { + return r == 'u' +} + +// isComment reports whether we are at the start of a comment. +func isComment(r rune) bool { + return r == '#' || r == '!' +} + +// isEndOfKey reports whether the rune terminates the current key. +func isEndOfKey(r rune) bool { + return strings.ContainsRune(" \f\t\r\n:=", r) +} + +// isEOF reports whether we are at EOF. +func isEOF(r rune) bool { + return r == eof +} + +// isEOL reports whether we are at a new line character. +func isEOL(r rune) bool { + return r == '\n' || r == '\r' +} + +// isEscape reports whether the rune is the escape character which +// prefixes unicode literals and other escaped characters. +func isEscape(r rune) bool { + return r == '\\' +} + +// isEscapedCharacter reports whether we are at one of the characters that need escaping. +// The escape character has already been consumed. +func isEscapedCharacter(r rune) bool { + return strings.ContainsRune(" :=fnrt", r) +} + +// isWhitespace reports whether the rune is a whitespace character. +func isWhitespace(r rune) bool { + return strings.ContainsRune(whitespace, r) +} diff --git a/vendor/github.com/magiconair/properties/load.go b/vendor/github.com/magiconair/properties/load.go new file mode 100644 index 00000000..9c83fd63 --- /dev/null +++ b/vendor/github.com/magiconair/properties/load.go @@ -0,0 +1,241 @@ +// Copyright 2017 Frank Schroeder. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package properties + +import ( + "fmt" + "io/ioutil" + "net/http" + "os" + "strings" +) + +// Encoding specifies encoding of the input data. +type Encoding uint + +const ( + // UTF8 interprets the input data as UTF-8. + UTF8 Encoding = 1 << iota + + // ISO_8859_1 interprets the input data as ISO-8859-1. + ISO_8859_1 +) + +// Load reads a buffer into a Properties struct. +func Load(buf []byte, enc Encoding) (*Properties, error) { + return loadBuf(buf, enc) +} + +// LoadString reads an UTF8 string into a properties struct. +func LoadString(s string) (*Properties, error) { + return loadBuf([]byte(s), UTF8) +} + +// LoadMap creates a new Properties struct from a string map. +func LoadMap(m map[string]string) *Properties { + p := NewProperties() + for k, v := range m { + p.Set(k, v) + } + return p +} + +// LoadFile reads a file into a Properties struct. +func LoadFile(filename string, enc Encoding) (*Properties, error) { + return loadAll([]string{filename}, enc, false) +} + +// LoadFiles reads multiple files in the given order into +// a Properties struct. If 'ignoreMissing' is true then +// non-existent files will not be reported as error. +func LoadFiles(filenames []string, enc Encoding, ignoreMissing bool) (*Properties, error) { + return loadAll(filenames, enc, ignoreMissing) +} + +// LoadURL reads the content of the URL into a Properties struct. +// +// The encoding is determined via the Content-Type header which +// should be set to 'text/plain'. If the 'charset' parameter is +// missing, 'iso-8859-1' or 'latin1' the encoding is set to +// ISO-8859-1. If the 'charset' parameter is set to 'utf-8' the +// encoding is set to UTF-8. A missing content type header is +// interpreted as 'text/plain; charset=utf-8'. +func LoadURL(url string) (*Properties, error) { + return loadAll([]string{url}, UTF8, false) +} + +// LoadURLs reads the content of multiple URLs in the given order into a +// Properties struct. If 'ignoreMissing' is true then a 404 status code will +// not be reported as error. See LoadURL for the Content-Type header +// and the encoding. +func LoadURLs(urls []string, ignoreMissing bool) (*Properties, error) { + return loadAll(urls, UTF8, ignoreMissing) +} + +// LoadAll reads the content of multiple URLs or files in the given order into a +// Properties struct. If 'ignoreMissing' is true then a 404 status code or missing file will +// not be reported as error. Encoding sets the encoding for files. For the URLs please see +// LoadURL for the Content-Type header and the encoding. +func LoadAll(names []string, enc Encoding, ignoreMissing bool) (*Properties, error) { + return loadAll(names, enc, ignoreMissing) +} + +// MustLoadString reads an UTF8 string into a Properties struct and +// panics on error. +func MustLoadString(s string) *Properties { + return must(LoadString(s)) +} + +// MustLoadFile reads a file into a Properties struct and +// panics on error. +func MustLoadFile(filename string, enc Encoding) *Properties { + return must(LoadFile(filename, enc)) +} + +// MustLoadFiles reads multiple files in the given order into +// a Properties struct and panics on error. If 'ignoreMissing' +// is true then non-existent files will not be reported as error. +func MustLoadFiles(filenames []string, enc Encoding, ignoreMissing bool) *Properties { + return must(LoadFiles(filenames, enc, ignoreMissing)) +} + +// MustLoadURL reads the content of a URL into a Properties struct and +// panics on error. +func MustLoadURL(url string) *Properties { + return must(LoadURL(url)) +} + +// MustLoadURLs reads the content of multiple URLs in the given order into a +// Properties struct and panics on error. If 'ignoreMissing' is true then a 404 +// status code will not be reported as error. +func MustLoadURLs(urls []string, ignoreMissing bool) *Properties { + return must(LoadURLs(urls, ignoreMissing)) +} + +// MustLoadAll reads the content of multiple URLs or files in the given order into a +// Properties struct. If 'ignoreMissing' is true then a 404 status code or missing file will +// not be reported as error. Encoding sets the encoding for files. For the URLs please see +// LoadURL for the Content-Type header and the encoding. It panics on error. +func MustLoadAll(names []string, enc Encoding, ignoreMissing bool) *Properties { + return must(LoadAll(names, enc, ignoreMissing)) +} + +func loadBuf(buf []byte, enc Encoding) (*Properties, error) { + p, err := parse(convert(buf, enc)) + if err != nil { + return nil, err + } + return p, p.check() +} + +func loadAll(names []string, enc Encoding, ignoreMissing bool) (*Properties, error) { + result := NewProperties() + for _, name := range names { + n, err := expandName(name) + if err != nil { + return nil, err + } + var p *Properties + if strings.HasPrefix(n, "http://") || strings.HasPrefix(n, "https://") { + p, err = loadURL(n, ignoreMissing) + } else { + p, err = loadFile(n, enc, ignoreMissing) + } + if err != nil { + return nil, err + } + result.Merge(p) + + } + return result, result.check() +} + +func loadFile(filename string, enc Encoding, ignoreMissing bool) (*Properties, error) { + data, err := ioutil.ReadFile(filename) + if err != nil { + if ignoreMissing && os.IsNotExist(err) { + LogPrintf("properties: %s not found. skipping", filename) + return NewProperties(), nil + } + return nil, err + } + p, err := parse(convert(data, enc)) + if err != nil { + return nil, err + } + return p, nil +} + +func loadURL(url string, ignoreMissing bool) (*Properties, error) { + resp, err := http.Get(url) + if err != nil { + return nil, fmt.Errorf("properties: error fetching %q. %s", url, err) + } + if resp.StatusCode == 404 && ignoreMissing { + LogPrintf("properties: %s returned %d. skipping", url, resp.StatusCode) + return NewProperties(), nil + } + if resp.StatusCode != 200 { + return nil, fmt.Errorf("properties: %s returned %d", url, resp.StatusCode) + } + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("properties: %s error reading response. %s", url, err) + } + if err = resp.Body.Close(); err != nil { + return nil, fmt.Errorf("properties: %s error reading response. %s", url, err) + } + + ct := resp.Header.Get("Content-Type") + var enc Encoding + switch strings.ToLower(ct) { + case "text/plain", "text/plain; charset=iso-8859-1", "text/plain; charset=latin1": + enc = ISO_8859_1 + case "", "text/plain; charset=utf-8": + enc = UTF8 + default: + return nil, fmt.Errorf("properties: invalid content type %s", ct) + } + + p, err := parse(convert(body, enc)) + if err != nil { + return nil, err + } + return p, nil +} + +func must(p *Properties, err error) *Properties { + if err != nil { + ErrorHandler(err) + } + return p +} + +// expandName expands ${ENV_VAR} expressions in a name. +// If the environment variable does not exist then it will be replaced +// with an empty string. Malformed expressions like "${ENV_VAR" will +// be reported as error. +func expandName(name string) (string, error) { + return expand(name, []string{}, "${", "}", make(map[string]string)) +} + +// Interprets a byte buffer either as an ISO-8859-1 or UTF-8 encoded string. +// For ISO-8859-1 we can convert each byte straight into a rune since the +// first 256 unicode code points cover ISO-8859-1. +func convert(buf []byte, enc Encoding) string { + switch enc { + case UTF8: + return string(buf) + case ISO_8859_1: + runes := make([]rune, len(buf)) + for i, b := range buf { + runes[i] = rune(b) + } + return string(runes) + default: + ErrorHandler(fmt.Errorf("unsupported encoding %v", enc)) + } + panic("ErrorHandler should exit") +} diff --git a/vendor/github.com/magiconair/properties/parser.go b/vendor/github.com/magiconair/properties/parser.go new file mode 100644 index 00000000..90f555cb --- /dev/null +++ b/vendor/github.com/magiconair/properties/parser.go @@ -0,0 +1,95 @@ +// Copyright 2017 Frank Schroeder. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package properties + +import ( + "fmt" + "runtime" +) + +type parser struct { + lex *lexer +} + +func parse(input string) (properties *Properties, err error) { + p := &parser{lex: lex(input)} + defer p.recover(&err) + + properties = NewProperties() + key := "" + comments := []string{} + + for { + token := p.expectOneOf(itemComment, itemKey, itemEOF) + switch token.typ { + case itemEOF: + goto done + case itemComment: + comments = append(comments, token.val) + continue + case itemKey: + key = token.val + if _, ok := properties.m[key]; !ok { + properties.k = append(properties.k, key) + } + } + + token = p.expectOneOf(itemValue, itemEOF) + if len(comments) > 0 { + properties.c[key] = comments + comments = []string{} + } + switch token.typ { + case itemEOF: + properties.m[key] = "" + goto done + case itemValue: + properties.m[key] = token.val + } + } + +done: + return properties, nil +} + +func (p *parser) errorf(format string, args ...interface{}) { + format = fmt.Sprintf("properties: Line %d: %s", p.lex.lineNumber(), format) + panic(fmt.Errorf(format, args...)) +} + +func (p *parser) expect(expected itemType) (token item) { + token = p.lex.nextItem() + if token.typ != expected { + p.unexpected(token) + } + return token +} + +func (p *parser) expectOneOf(expected ...itemType) (token item) { + token = p.lex.nextItem() + for _, v := range expected { + if token.typ == v { + return token + } + } + p.unexpected(token) + panic("unexpected token") +} + +func (p *parser) unexpected(token item) { + p.errorf(token.String()) +} + +// recover is the handler that turns panics into returns from the top level of Parse. +func (p *parser) recover(errp *error) { + e := recover() + if e != nil { + if _, ok := e.(runtime.Error); ok { + panic(e) + } + *errp = e.(error) + } + return +} diff --git a/vendor/github.com/magiconair/properties/properties.go b/vendor/github.com/magiconair/properties/properties.go new file mode 100644 index 00000000..53f5b2ff --- /dev/null +++ b/vendor/github.com/magiconair/properties/properties.go @@ -0,0 +1,822 @@ +// Copyright 2017 Frank Schroeder. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package properties + +// BUG(frank): Set() does not check for invalid unicode literals since this is currently handled by the lexer. +// BUG(frank): Write() does not allow to configure the newline character. Therefore, on Windows LF is used. + +import ( + "fmt" + "io" + "log" + "os" + "regexp" + "strconv" + "strings" + "time" + "unicode/utf8" +) + +const maxExpansionDepth = 64 + +// ErrorHandlerFunc defines the type of function which handles failures +// of the MustXXX() functions. An error handler function must exit +// the application after handling the error. +type ErrorHandlerFunc func(error) + +// ErrorHandler is the function which handles failures of the MustXXX() +// functions. The default is LogFatalHandler. +var ErrorHandler ErrorHandlerFunc = LogFatalHandler + +// LogHandlerFunc defines the function prototype for logging errors. +type LogHandlerFunc func(fmt string, args ...interface{}) + +// LogPrintf defines a log handler which uses log.Printf. +var LogPrintf LogHandlerFunc = log.Printf + +// LogFatalHandler handles the error by logging a fatal error and exiting. +func LogFatalHandler(err error) { + log.Fatal(err) +} + +// PanicHandler handles the error by panicking. +func PanicHandler(err error) { + panic(err) +} + +// ----------------------------------------------------------------------------- + +// A Properties contains the key/value pairs from the properties input. +// All values are stored in unexpanded form and are expanded at runtime +type Properties struct { + // Pre-/Postfix for property expansion. + Prefix string + Postfix string + + // DisableExpansion controls the expansion of properties on Get() + // and the check for circular references on Set(). When set to + // true Properties behaves like a simple key/value store and does + // not check for circular references on Get() or on Set(). + DisableExpansion bool + + // Stores the key/value pairs + m map[string]string + + // Stores the comments per key. + c map[string][]string + + // Stores the keys in order of appearance. + k []string +} + +// NewProperties creates a new Properties struct with the default +// configuration for "${key}" expressions. +func NewProperties() *Properties { + return &Properties{ + Prefix: "${", + Postfix: "}", + m: map[string]string{}, + c: map[string][]string{}, + k: []string{}, + } +} + +// Get returns the expanded value for the given key if exists. +// Otherwise, ok is false. +func (p *Properties) Get(key string) (value string, ok bool) { + v, ok := p.m[key] + if p.DisableExpansion { + return v, ok + } + if !ok { + return "", false + } + + expanded, err := p.expand(key, v) + + // we guarantee that the expanded value is free of + // circular references and malformed expressions + // so we panic if we still get an error here. + if err != nil { + ErrorHandler(fmt.Errorf("%s in %q", err, key+" = "+v)) + } + + return expanded, true +} + +// MustGet returns the expanded value for the given key if exists. +// Otherwise, it panics. +func (p *Properties) MustGet(key string) string { + if v, ok := p.Get(key); ok { + return v + } + ErrorHandler(invalidKeyError(key)) + panic("ErrorHandler should exit") +} + +// ---------------------------------------------------------------------------- + +// ClearComments removes the comments for all keys. +func (p *Properties) ClearComments() { + p.c = map[string][]string{} +} + +// ---------------------------------------------------------------------------- + +// GetComment returns the last comment before the given key or an empty string. +func (p *Properties) GetComment(key string) string { + comments, ok := p.c[key] + if !ok || len(comments) == 0 { + return "" + } + return comments[len(comments)-1] +} + +// ---------------------------------------------------------------------------- + +// GetComments returns all comments that appeared before the given key or nil. +func (p *Properties) GetComments(key string) []string { + if comments, ok := p.c[key]; ok { + return comments + } + return nil +} + +// ---------------------------------------------------------------------------- + +// SetComment sets the comment for the key. +func (p *Properties) SetComment(key, comment string) { + p.c[key] = []string{comment} +} + +// ---------------------------------------------------------------------------- + +// SetComments sets the comments for the key. If the comments are nil then +// all comments for this key are deleted. +func (p *Properties) SetComments(key string, comments []string) { + if comments == nil { + delete(p.c, key) + return + } + p.c[key] = comments +} + +// ---------------------------------------------------------------------------- + +// GetBool checks if the expanded value is one of '1', 'yes', +// 'true' or 'on' if the key exists. The comparison is case-insensitive. +// If the key does not exist the default value is returned. +func (p *Properties) GetBool(key string, def bool) bool { + v, err := p.getBool(key) + if err != nil { + return def + } + return v +} + +// MustGetBool checks if the expanded value is one of '1', 'yes', +// 'true' or 'on' if the key exists. The comparison is case-insensitive. +// If the key does not exist the function panics. +func (p *Properties) MustGetBool(key string) bool { + v, err := p.getBool(key) + if err != nil { + ErrorHandler(err) + } + return v +} + +func (p *Properties) getBool(key string) (value bool, err error) { + if v, ok := p.Get(key); ok { + return boolVal(v), nil + } + return false, invalidKeyError(key) +} + +func boolVal(v string) bool { + v = strings.ToLower(v) + return v == "1" || v == "true" || v == "yes" || v == "on" +} + +// ---------------------------------------------------------------------------- + +// GetDuration parses the expanded value as an time.Duration (in ns) if the +// key exists. If key does not exist or the value cannot be parsed the default +// value is returned. In almost all cases you want to use GetParsedDuration(). +func (p *Properties) GetDuration(key string, def time.Duration) time.Duration { + v, err := p.getInt64(key) + if err != nil { + return def + } + return time.Duration(v) +} + +// MustGetDuration parses the expanded value as an time.Duration (in ns) if +// the key exists. If key does not exist or the value cannot be parsed the +// function panics. In almost all cases you want to use MustGetParsedDuration(). +func (p *Properties) MustGetDuration(key string) time.Duration { + v, err := p.getInt64(key) + if err != nil { + ErrorHandler(err) + } + return time.Duration(v) +} + +// ---------------------------------------------------------------------------- + +// GetParsedDuration parses the expanded value with time.ParseDuration() if the key exists. +// If key does not exist or the value cannot be parsed the default +// value is returned. +func (p *Properties) GetParsedDuration(key string, def time.Duration) time.Duration { + s, ok := p.Get(key) + if !ok { + return def + } + v, err := time.ParseDuration(s) + if err != nil { + return def + } + return v +} + +// MustGetParsedDuration parses the expanded value with time.ParseDuration() if the key exists. +// If key does not exist or the value cannot be parsed the function panics. +func (p *Properties) MustGetParsedDuration(key string) time.Duration { + s, ok := p.Get(key) + if !ok { + ErrorHandler(invalidKeyError(key)) + } + v, err := time.ParseDuration(s) + if err != nil { + ErrorHandler(err) + } + return v +} + +// ---------------------------------------------------------------------------- + +// GetFloat64 parses the expanded value as a float64 if the key exists. +// If key does not exist or the value cannot be parsed the default +// value is returned. +func (p *Properties) GetFloat64(key string, def float64) float64 { + v, err := p.getFloat64(key) + if err != nil { + return def + } + return v +} + +// MustGetFloat64 parses the expanded value as a float64 if the key exists. +// If key does not exist or the value cannot be parsed the function panics. +func (p *Properties) MustGetFloat64(key string) float64 { + v, err := p.getFloat64(key) + if err != nil { + ErrorHandler(err) + } + return v +} + +func (p *Properties) getFloat64(key string) (value float64, err error) { + if v, ok := p.Get(key); ok { + value, err = strconv.ParseFloat(v, 64) + if err != nil { + return 0, err + } + return value, nil + } + return 0, invalidKeyError(key) +} + +// ---------------------------------------------------------------------------- + +// GetInt parses the expanded value as an int if the key exists. +// If key does not exist or the value cannot be parsed the default +// value is returned. If the value does not fit into an int the +// function panics with an out of range error. +func (p *Properties) GetInt(key string, def int) int { + v, err := p.getInt64(key) + if err != nil { + return def + } + return intRangeCheck(key, v) +} + +// MustGetInt parses the expanded value as an int if the key exists. +// If key does not exist or the value cannot be parsed the function panics. +// If the value does not fit into an int the function panics with +// an out of range error. +func (p *Properties) MustGetInt(key string) int { + v, err := p.getInt64(key) + if err != nil { + ErrorHandler(err) + } + return intRangeCheck(key, v) +} + +// ---------------------------------------------------------------------------- + +// GetInt64 parses the expanded value as an int64 if the key exists. +// If key does not exist or the value cannot be parsed the default +// value is returned. +func (p *Properties) GetInt64(key string, def int64) int64 { + v, err := p.getInt64(key) + if err != nil { + return def + } + return v +} + +// MustGetInt64 parses the expanded value as an int if the key exists. +// If key does not exist or the value cannot be parsed the function panics. +func (p *Properties) MustGetInt64(key string) int64 { + v, err := p.getInt64(key) + if err != nil { + ErrorHandler(err) + } + return v +} + +func (p *Properties) getInt64(key string) (value int64, err error) { + if v, ok := p.Get(key); ok { + value, err = strconv.ParseInt(v, 10, 64) + if err != nil { + return 0, err + } + return value, nil + } + return 0, invalidKeyError(key) +} + +// ---------------------------------------------------------------------------- + +// GetUint parses the expanded value as an uint if the key exists. +// If key does not exist or the value cannot be parsed the default +// value is returned. If the value does not fit into an int the +// function panics with an out of range error. +func (p *Properties) GetUint(key string, def uint) uint { + v, err := p.getUint64(key) + if err != nil { + return def + } + return uintRangeCheck(key, v) +} + +// MustGetUint parses the expanded value as an int if the key exists. +// If key does not exist or the value cannot be parsed the function panics. +// If the value does not fit into an int the function panics with +// an out of range error. +func (p *Properties) MustGetUint(key string) uint { + v, err := p.getUint64(key) + if err != nil { + ErrorHandler(err) + } + return uintRangeCheck(key, v) +} + +// ---------------------------------------------------------------------------- + +// GetUint64 parses the expanded value as an uint64 if the key exists. +// If key does not exist or the value cannot be parsed the default +// value is returned. +func (p *Properties) GetUint64(key string, def uint64) uint64 { + v, err := p.getUint64(key) + if err != nil { + return def + } + return v +} + +// MustGetUint64 parses the expanded value as an int if the key exists. +// If key does not exist or the value cannot be parsed the function panics. +func (p *Properties) MustGetUint64(key string) uint64 { + v, err := p.getUint64(key) + if err != nil { + ErrorHandler(err) + } + return v +} + +func (p *Properties) getUint64(key string) (value uint64, err error) { + if v, ok := p.Get(key); ok { + value, err = strconv.ParseUint(v, 10, 64) + if err != nil { + return 0, err + } + return value, nil + } + return 0, invalidKeyError(key) +} + +// ---------------------------------------------------------------------------- + +// GetString returns the expanded value for the given key if exists or +// the default value otherwise. +func (p *Properties) GetString(key, def string) string { + if v, ok := p.Get(key); ok { + return v + } + return def +} + +// MustGetString returns the expanded value for the given key if exists or +// panics otherwise. +func (p *Properties) MustGetString(key string) string { + if v, ok := p.Get(key); ok { + return v + } + ErrorHandler(invalidKeyError(key)) + panic("ErrorHandler should exit") +} + +// ---------------------------------------------------------------------------- + +// Filter returns a new properties object which contains all properties +// for which the key matches the pattern. +func (p *Properties) Filter(pattern string) (*Properties, error) { + re, err := regexp.Compile(pattern) + if err != nil { + return nil, err + } + + return p.FilterRegexp(re), nil +} + +// FilterRegexp returns a new properties object which contains all properties +// for which the key matches the regular expression. +func (p *Properties) FilterRegexp(re *regexp.Regexp) *Properties { + pp := NewProperties() + for _, k := range p.k { + if re.MatchString(k) { + // TODO(fs): we are ignoring the error which flags a circular reference. + // TODO(fs): since we are just copying a subset of keys this cannot happen (fingers crossed) + pp.Set(k, p.m[k]) + } + } + return pp +} + +// FilterPrefix returns a new properties object with a subset of all keys +// with the given prefix. +func (p *Properties) FilterPrefix(prefix string) *Properties { + pp := NewProperties() + for _, k := range p.k { + if strings.HasPrefix(k, prefix) { + // TODO(fs): we are ignoring the error which flags a circular reference. + // TODO(fs): since we are just copying a subset of keys this cannot happen (fingers crossed) + pp.Set(k, p.m[k]) + } + } + return pp +} + +// FilterStripPrefix returns a new properties object with a subset of all keys +// with the given prefix and the prefix removed from the keys. +func (p *Properties) FilterStripPrefix(prefix string) *Properties { + pp := NewProperties() + n := len(prefix) + for _, k := range p.k { + if len(k) > len(prefix) && strings.HasPrefix(k, prefix) { + // TODO(fs): we are ignoring the error which flags a circular reference. + // TODO(fs): since we are modifying keys I am not entirely sure whether we can create a circular reference + // TODO(fs): this function should probably return an error but the signature is fixed + pp.Set(k[n:], p.m[k]) + } + } + return pp +} + +// Len returns the number of keys. +func (p *Properties) Len() int { + return len(p.m) +} + +// Keys returns all keys in the same order as in the input. +func (p *Properties) Keys() []string { + keys := make([]string, len(p.k)) + copy(keys, p.k) + return keys +} + +// Set sets the property key to the corresponding value. +// If a value for key existed before then ok is true and prev +// contains the previous value. If the value contains a +// circular reference or a malformed expression then +// an error is returned. +// An empty key is silently ignored. +func (p *Properties) Set(key, value string) (prev string, ok bool, err error) { + if key == "" { + return "", false, nil + } + + // if expansion is disabled we allow circular references + if p.DisableExpansion { + prev, ok = p.Get(key) + p.m[key] = value + if !ok { + p.k = append(p.k, key) + } + return prev, ok, nil + } + + // to check for a circular reference we temporarily need + // to set the new value. If there is an error then revert + // to the previous state. Only if all tests are successful + // then we add the key to the p.k list. + prev, ok = p.Get(key) + p.m[key] = value + + // now check for a circular reference + _, err = p.expand(key, value) + if err != nil { + + // revert to the previous state + if ok { + p.m[key] = prev + } else { + delete(p.m, key) + } + + return "", false, err + } + + if !ok { + p.k = append(p.k, key) + } + + return prev, ok, nil +} + +// SetValue sets property key to the default string value +// as defined by fmt.Sprintf("%v"). +func (p *Properties) SetValue(key string, value interface{}) error { + _, _, err := p.Set(key, fmt.Sprintf("%v", value)) + return err +} + +// MustSet sets the property key to the corresponding value. +// If a value for key existed before then ok is true and prev +// contains the previous value. An empty key is silently ignored. +func (p *Properties) MustSet(key, value string) (prev string, ok bool) { + prev, ok, err := p.Set(key, value) + if err != nil { + ErrorHandler(err) + } + return prev, ok +} + +// String returns a string of all expanded 'key = value' pairs. +func (p *Properties) String() string { + var s string + for _, key := range p.k { + value, _ := p.Get(key) + s = fmt.Sprintf("%s%s = %s\n", s, key, value) + } + return s +} + +// Write writes all unexpanded 'key = value' pairs to the given writer. +// Write returns the number of bytes written and any write error encountered. +func (p *Properties) Write(w io.Writer, enc Encoding) (n int, err error) { + return p.WriteComment(w, "", enc) +} + +// WriteComment writes all unexpanced 'key = value' pairs to the given writer. +// If prefix is not empty then comments are written with a blank line and the +// given prefix. The prefix should be either "# " or "! " to be compatible with +// the properties file format. Otherwise, the properties parser will not be +// able to read the file back in. It returns the number of bytes written and +// any write error encountered. +func (p *Properties) WriteComment(w io.Writer, prefix string, enc Encoding) (n int, err error) { + var x int + + for _, key := range p.k { + value := p.m[key] + + if prefix != "" { + if comments, ok := p.c[key]; ok { + // don't print comments if they are all empty + allEmpty := true + for _, c := range comments { + if c != "" { + allEmpty = false + break + } + } + + if !allEmpty { + // add a blank line between entries but not at the top + if len(comments) > 0 && n > 0 { + x, err = fmt.Fprintln(w) + if err != nil { + return + } + n += x + } + + for _, c := range comments { + x, err = fmt.Fprintf(w, "%s%s\n", prefix, encode(c, "", enc)) + if err != nil { + return + } + n += x + } + } + } + } + + x, err = fmt.Fprintf(w, "%s = %s\n", encode(key, " :", enc), encode(value, "", enc)) + if err != nil { + return + } + n += x + } + return +} + +// Map returns a copy of the properties as a map. +func (p *Properties) Map() map[string]string { + m := make(map[string]string) + for k, v := range p.m { + m[k] = v + } + return m +} + +// FilterFunc returns a copy of the properties which includes the values which passed all filters. +func (p *Properties) FilterFunc(filters ...func(k, v string) bool) *Properties { + pp := NewProperties() +outer: + for k, v := range p.m { + for _, f := range filters { + if !f(k, v) { + continue outer + } + pp.Set(k, v) + } + } + return pp +} + +// ---------------------------------------------------------------------------- + +// Delete removes the key and its comments. +func (p *Properties) Delete(key string) { + delete(p.m, key) + delete(p.c, key) + newKeys := []string{} + for _, k := range p.k { + if k != key { + newKeys = append(newKeys, k) + } + } + p.k = newKeys +} + +// Merge merges properties, comments and keys from other *Properties into p +func (p *Properties) Merge(other *Properties) { + for k, v := range other.m { + p.m[k] = v + } + for k, v := range other.c { + p.c[k] = v + } + +outer: + for _, otherKey := range other.k { + for _, key := range p.k { + if otherKey == key { + continue outer + } + } + p.k = append(p.k, otherKey) + } +} + +// ---------------------------------------------------------------------------- + +// check expands all values and returns an error if a circular reference or +// a malformed expression was found. +func (p *Properties) check() error { + for key, value := range p.m { + if _, err := p.expand(key, value); err != nil { + return err + } + } + return nil +} + +func (p *Properties) expand(key, input string) (string, error) { + // no pre/postfix -> nothing to expand + if p.Prefix == "" && p.Postfix == "" { + return input, nil + } + + return expand(input, []string{key}, p.Prefix, p.Postfix, p.m) +} + +// expand recursively expands expressions of '(prefix)key(postfix)' to their corresponding values. +// The function keeps track of the keys that were already expanded and stops if it +// detects a circular reference or a malformed expression of the form '(prefix)key'. +func expand(s string, keys []string, prefix, postfix string, values map[string]string) (string, error) { + if len(keys) > maxExpansionDepth { + return "", fmt.Errorf("expansion too deep") + } + + for { + start := strings.Index(s, prefix) + if start == -1 { + return s, nil + } + + keyStart := start + len(prefix) + keyLen := strings.Index(s[keyStart:], postfix) + if keyLen == -1 { + return "", fmt.Errorf("malformed expression") + } + + end := keyStart + keyLen + len(postfix) - 1 + key := s[keyStart : keyStart+keyLen] + + // fmt.Printf("s:%q pp:%q start:%d end:%d keyStart:%d keyLen:%d key:%q\n", s, prefix + "..." + postfix, start, end, keyStart, keyLen, key) + + for _, k := range keys { + if key == k { + return "", fmt.Errorf("circular reference") + } + } + + val, ok := values[key] + if !ok { + val = os.Getenv(key) + } + new_val, err := expand(val, append(keys, key), prefix, postfix, values) + if err != nil { + return "", err + } + s = s[:start] + new_val + s[end+1:] + } + return s, nil +} + +// encode encodes a UTF-8 string to ISO-8859-1 and escapes some characters. +func encode(s string, special string, enc Encoding) string { + switch enc { + case UTF8: + return encodeUtf8(s, special) + case ISO_8859_1: + return encodeIso(s, special) + default: + panic(fmt.Sprintf("unsupported encoding %v", enc)) + } +} + +func encodeUtf8(s string, special string) string { + v := "" + for pos := 0; pos < len(s); { + r, w := utf8.DecodeRuneInString(s[pos:]) + pos += w + v += escape(r, special) + } + return v +} + +func encodeIso(s string, special string) string { + var r rune + var w int + var v string + for pos := 0; pos < len(s); { + switch r, w = utf8.DecodeRuneInString(s[pos:]); { + case r < 1<<8: // single byte rune -> escape special chars only + v += escape(r, special) + case r < 1<<16: // two byte rune -> unicode literal + v += fmt.Sprintf("\\u%04x", r) + default: // more than two bytes per rune -> can't encode + v += "?" + } + pos += w + } + return v +} + +func escape(r rune, special string) string { + switch r { + case '\f': + return "\\f" + case '\n': + return "\\n" + case '\r': + return "\\r" + case '\t': + return "\\t" + default: + if strings.ContainsRune(special, r) { + return "\\" + string(r) + } + return string(r) + } +} + +func invalidKeyError(key string) error { + return fmt.Errorf("unknown property: %s", key) +} diff --git a/vendor/github.com/magiconair/properties/rangecheck.go b/vendor/github.com/magiconair/properties/rangecheck.go new file mode 100644 index 00000000..2e907d54 --- /dev/null +++ b/vendor/github.com/magiconair/properties/rangecheck.go @@ -0,0 +1,31 @@ +// Copyright 2017 Frank Schroeder. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package properties + +import ( + "fmt" + "math" +) + +// make this a var to overwrite it in a test +var is32Bit = ^uint(0) == math.MaxUint32 + +// intRangeCheck checks if the value fits into the int type and +// panics if it does not. +func intRangeCheck(key string, v int64) int { + if is32Bit && (v < math.MinInt32 || v > math.MaxInt32) { + panic(fmt.Sprintf("Value %d for key %s out of range", v, key)) + } + return int(v) +} + +// uintRangeCheck checks if the value fits into the uint type and +// panics if it does not. +func uintRangeCheck(key string, v uint64) uint { + if is32Bit && v > math.MaxUint32 { + panic(fmt.Sprintf("Value %d for key %s out of range", v, key)) + } + return uint(v) +} diff --git a/vendor/github.com/mitchellh/mapstructure/LICENSE b/vendor/github.com/mitchellh/mapstructure/LICENSE new file mode 100644 index 00000000..f9c841a5 --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2013 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/mapstructure/decode_hooks.go b/vendor/github.com/mitchellh/mapstructure/decode_hooks.go new file mode 100644 index 00000000..2a727575 --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/decode_hooks.go @@ -0,0 +1,171 @@ +package mapstructure + +import ( + "errors" + "reflect" + "strconv" + "strings" + "time" +) + +// typedDecodeHook takes a raw DecodeHookFunc (an interface{}) and turns +// it into the proper DecodeHookFunc type, such as DecodeHookFuncType. +func typedDecodeHook(h DecodeHookFunc) DecodeHookFunc { + // Create variables here so we can reference them with the reflect pkg + var f1 DecodeHookFuncType + var f2 DecodeHookFuncKind + + // Fill in the variables into this interface and the rest is done + // automatically using the reflect package. + potential := []interface{}{f1, f2} + + v := reflect.ValueOf(h) + vt := v.Type() + for _, raw := range potential { + pt := reflect.ValueOf(raw).Type() + if vt.ConvertibleTo(pt) { + return v.Convert(pt).Interface() + } + } + + return nil +} + +// DecodeHookExec executes the given decode hook. This should be used +// since it'll naturally degrade to the older backwards compatible DecodeHookFunc +// that took reflect.Kind instead of reflect.Type. +func DecodeHookExec( + raw DecodeHookFunc, + from reflect.Type, to reflect.Type, + data interface{}) (interface{}, error) { + switch f := typedDecodeHook(raw).(type) { + case DecodeHookFuncType: + return f(from, to, data) + case DecodeHookFuncKind: + return f(from.Kind(), to.Kind(), data) + default: + return nil, errors.New("invalid decode hook signature") + } +} + +// ComposeDecodeHookFunc creates a single DecodeHookFunc that +// automatically composes multiple DecodeHookFuncs. +// +// The composed funcs are called in order, with the result of the +// previous transformation. +func ComposeDecodeHookFunc(fs ...DecodeHookFunc) DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}) (interface{}, error) { + var err error + for _, f1 := range fs { + data, err = DecodeHookExec(f1, f, t, data) + if err != nil { + return nil, err + } + + // Modify the from kind to be correct with the new data + f = nil + if val := reflect.ValueOf(data); val.IsValid() { + f = val.Type() + } + } + + return data, nil + } +} + +// StringToSliceHookFunc returns a DecodeHookFunc that converts +// string to []string by splitting on the given sep. +func StringToSliceHookFunc(sep string) DecodeHookFunc { + return func( + f reflect.Kind, + t reflect.Kind, + data interface{}) (interface{}, error) { + if f != reflect.String || t != reflect.Slice { + return data, nil + } + + raw := data.(string) + if raw == "" { + return []string{}, nil + } + + return strings.Split(raw, sep), nil + } +} + +// StringToTimeDurationHookFunc returns a DecodeHookFunc that converts +// strings to time.Duration. +func StringToTimeDurationHookFunc() DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(time.Duration(5)) { + return data, nil + } + + // Convert it by parsing + return time.ParseDuration(data.(string)) + } +} + +// StringToTimeHookFunc returns a DecodeHookFunc that converts +// strings to time.Time. +func StringToTimeHookFunc(layout string) DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(time.Time{}) { + return data, nil + } + + // Convert it by parsing + return time.Parse(layout, data.(string)) + } +} + +// WeaklyTypedHook is a DecodeHookFunc which adds support for weak typing to +// the decoder. +// +// Note that this is significantly different from the WeaklyTypedInput option +// of the DecoderConfig. +func WeaklyTypedHook( + f reflect.Kind, + t reflect.Kind, + data interface{}) (interface{}, error) { + dataVal := reflect.ValueOf(data) + switch t { + case reflect.String: + switch f { + case reflect.Bool: + if dataVal.Bool() { + return "1", nil + } + return "0", nil + case reflect.Float32: + return strconv.FormatFloat(dataVal.Float(), 'f', -1, 64), nil + case reflect.Int: + return strconv.FormatInt(dataVal.Int(), 10), nil + case reflect.Slice: + dataType := dataVal.Type() + elemKind := dataType.Elem().Kind() + if elemKind == reflect.Uint8 { + return string(dataVal.Interface().([]uint8)), nil + } + case reflect.Uint: + return strconv.FormatUint(dataVal.Uint(), 10), nil + } + } + + return data, nil +} diff --git a/vendor/github.com/mitchellh/mapstructure/error.go b/vendor/github.com/mitchellh/mapstructure/error.go new file mode 100644 index 00000000..47a99e5a --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/error.go @@ -0,0 +1,50 @@ +package mapstructure + +import ( + "errors" + "fmt" + "sort" + "strings" +) + +// Error implements the error interface and can represents multiple +// errors that occur in the course of a single decode. +type Error struct { + Errors []string +} + +func (e *Error) Error() string { + points := make([]string, len(e.Errors)) + for i, err := range e.Errors { + points[i] = fmt.Sprintf("* %s", err) + } + + sort.Strings(points) + return fmt.Sprintf( + "%d error(s) decoding:\n\n%s", + len(e.Errors), strings.Join(points, "\n")) +} + +// WrappedErrors implements the errwrap.Wrapper interface to make this +// return value more useful with the errwrap and go-multierror libraries. +func (e *Error) WrappedErrors() []error { + if e == nil { + return nil + } + + result := make([]error, len(e.Errors)) + for i, e := range e.Errors { + result[i] = errors.New(e) + } + + return result +} + +func appendErrors(errors []string, err error) []string { + switch e := err.(type) { + case *Error: + return append(errors, e.Errors...) + default: + return append(errors, e.Error()) + } +} diff --git a/vendor/github.com/mitchellh/mapstructure/mapstructure.go b/vendor/github.com/mitchellh/mapstructure/mapstructure.go new file mode 100644 index 00000000..aaf12a29 --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/mapstructure.go @@ -0,0 +1,1043 @@ +// Package mapstructure exposes functionality to convert an arbitrary +// map[string]interface{} into a native Go structure. +// +// The Go structure can be arbitrarily complex, containing slices, +// other structs, etc. and the decoder will properly decode nested +// maps and so on into the proper structures in the native Go struct. +// See the examples to see what the decoder is capable of. +package mapstructure + +import ( + "encoding/json" + "errors" + "fmt" + "reflect" + "sort" + "strconv" + "strings" +) + +// DecodeHookFunc is the callback function that can be used for +// data transformations. See "DecodeHook" in the DecoderConfig +// struct. +// +// The type should be DecodeHookFuncType or DecodeHookFuncKind. +// Either is accepted. Types are a superset of Kinds (Types can return +// Kinds) and are generally a richer thing to use, but Kinds are simpler +// if you only need those. +// +// The reason DecodeHookFunc is multi-typed is for backwards compatibility: +// we started with Kinds and then realized Types were the better solution, +// but have a promise to not break backwards compat so we now support +// both. +type DecodeHookFunc interface{} + +// DecodeHookFuncType is a DecodeHookFunc which has complete information about +// the source and target types. +type DecodeHookFuncType func(reflect.Type, reflect.Type, interface{}) (interface{}, error) + +// DecodeHookFuncKind is a DecodeHookFunc which knows only the Kinds of the +// source and target types. +type DecodeHookFuncKind func(reflect.Kind, reflect.Kind, interface{}) (interface{}, error) + +// DecoderConfig is the configuration that is used to create a new decoder +// and allows customization of various aspects of decoding. +type DecoderConfig struct { + // DecodeHook, if set, will be called before any decoding and any + // type conversion (if WeaklyTypedInput is on). This lets you modify + // the values before they're set down onto the resulting struct. + // + // If an error is returned, the entire decode will fail with that + // error. + DecodeHook DecodeHookFunc + + // If ErrorUnused is true, then it is an error for there to exist + // keys in the original map that were unused in the decoding process + // (extra keys). + ErrorUnused bool + + // ZeroFields, if set to true, will zero fields before writing them. + // For example, a map will be emptied before decoded values are put in + // it. If this is false, a map will be merged. + ZeroFields bool + + // If WeaklyTypedInput is true, the decoder will make the following + // "weak" conversions: + // + // - bools to string (true = "1", false = "0") + // - numbers to string (base 10) + // - bools to int/uint (true = 1, false = 0) + // - strings to int/uint (base implied by prefix) + // - int to bool (true if value != 0) + // - string to bool (accepts: 1, t, T, TRUE, true, True, 0, f, F, + // FALSE, false, False. Anything else is an error) + // - empty array = empty map and vice versa + // - negative numbers to overflowed uint values (base 10) + // - slice of maps to a merged map + // - single values are converted to slices if required. Each + // element is weakly decoded. For example: "4" can become []int{4} + // if the target type is an int slice. + // + WeaklyTypedInput bool + + // Metadata is the struct that will contain extra metadata about + // the decoding. If this is nil, then no metadata will be tracked. + Metadata *Metadata + + // Result is a pointer to the struct that will contain the decoded + // value. + Result interface{} + + // The tag name that mapstructure reads for field names. This + // defaults to "mapstructure" + TagName string +} + +// A Decoder takes a raw interface value and turns it into structured +// data, keeping track of rich error information along the way in case +// anything goes wrong. Unlike the basic top-level Decode method, you can +// more finely control how the Decoder behaves using the DecoderConfig +// structure. The top-level Decode method is just a convenience that sets +// up the most basic Decoder. +type Decoder struct { + config *DecoderConfig +} + +// Metadata contains information about decoding a structure that +// is tedious or difficult to get otherwise. +type Metadata struct { + // Keys are the keys of the structure which were successfully decoded + Keys []string + + // Unused is a slice of keys that were found in the raw value but + // weren't decoded since there was no matching field in the result interface + Unused []string +} + +// Decode takes an input structure and uses reflection to translate it to +// the output structure. output must be a pointer to a map or struct. +func Decode(input interface{}, output interface{}) error { + config := &DecoderConfig{ + Metadata: nil, + Result: output, + } + + decoder, err := NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +// WeakDecode is the same as Decode but is shorthand to enable +// WeaklyTypedInput. See DecoderConfig for more info. +func WeakDecode(input, output interface{}) error { + config := &DecoderConfig{ + Metadata: nil, + Result: output, + WeaklyTypedInput: true, + } + + decoder, err := NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +// DecodeMetadata is the same as Decode, but is shorthand to +// enable metadata collection. See DecoderConfig for more info. +func DecodeMetadata(input interface{}, output interface{}, metadata *Metadata) error { + config := &DecoderConfig{ + Metadata: metadata, + Result: output, + } + + decoder, err := NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +// WeakDecodeMetadata is the same as Decode, but is shorthand to +// enable both WeaklyTypedInput and metadata collection. See +// DecoderConfig for more info. +func WeakDecodeMetadata(input interface{}, output interface{}, metadata *Metadata) error { + config := &DecoderConfig{ + Metadata: metadata, + Result: output, + WeaklyTypedInput: true, + } + + decoder, err := NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +// NewDecoder returns a new decoder for the given configuration. Once +// a decoder has been returned, the same configuration must not be used +// again. +func NewDecoder(config *DecoderConfig) (*Decoder, error) { + val := reflect.ValueOf(config.Result) + if val.Kind() != reflect.Ptr { + return nil, errors.New("result must be a pointer") + } + + val = val.Elem() + if !val.CanAddr() { + return nil, errors.New("result must be addressable (a pointer)") + } + + if config.Metadata != nil { + if config.Metadata.Keys == nil { + config.Metadata.Keys = make([]string, 0) + } + + if config.Metadata.Unused == nil { + config.Metadata.Unused = make([]string, 0) + } + } + + if config.TagName == "" { + config.TagName = "mapstructure" + } + + result := &Decoder{ + config: config, + } + + return result, nil +} + +// Decode decodes the given raw interface to the target pointer specified +// by the configuration. +func (d *Decoder) Decode(input interface{}) error { + return d.decode("", input, reflect.ValueOf(d.config.Result).Elem()) +} + +// Decodes an unknown data type into a specific reflection value. +func (d *Decoder) decode(name string, input interface{}, outVal reflect.Value) error { + if input == nil { + // If the data is nil, then we don't set anything, unless ZeroFields is set + // to true. + if d.config.ZeroFields { + outVal.Set(reflect.Zero(outVal.Type())) + + if d.config.Metadata != nil && name != "" { + d.config.Metadata.Keys = append(d.config.Metadata.Keys, name) + } + } + return nil + } + + inputVal := reflect.ValueOf(input) + if !inputVal.IsValid() { + // If the input value is invalid, then we just set the value + // to be the zero value. + outVal.Set(reflect.Zero(outVal.Type())) + if d.config.Metadata != nil && name != "" { + d.config.Metadata.Keys = append(d.config.Metadata.Keys, name) + } + return nil + } + + if d.config.DecodeHook != nil { + // We have a DecodeHook, so let's pre-process the input. + var err error + input, err = DecodeHookExec( + d.config.DecodeHook, + inputVal.Type(), outVal.Type(), input) + if err != nil { + return fmt.Errorf("error decoding '%s': %s", name, err) + } + } + + var err error + inputKind := getKind(outVal) + switch inputKind { + case reflect.Bool: + err = d.decodeBool(name, input, outVal) + case reflect.Interface: + err = d.decodeBasic(name, input, outVal) + case reflect.String: + err = d.decodeString(name, input, outVal) + case reflect.Int: + err = d.decodeInt(name, input, outVal) + case reflect.Uint: + err = d.decodeUint(name, input, outVal) + case reflect.Float32: + err = d.decodeFloat(name, input, outVal) + case reflect.Struct: + err = d.decodeStruct(name, input, outVal) + case reflect.Map: + err = d.decodeMap(name, input, outVal) + case reflect.Ptr: + err = d.decodePtr(name, input, outVal) + case reflect.Slice: + err = d.decodeSlice(name, input, outVal) + case reflect.Array: + err = d.decodeArray(name, input, outVal) + case reflect.Func: + err = d.decodeFunc(name, input, outVal) + default: + // If we reached this point then we weren't able to decode it + return fmt.Errorf("%s: unsupported type: %s", name, inputKind) + } + + // If we reached here, then we successfully decoded SOMETHING, so + // mark the key as used if we're tracking metainput. + if d.config.Metadata != nil && name != "" { + d.config.Metadata.Keys = append(d.config.Metadata.Keys, name) + } + + return err +} + +// This decodes a basic type (bool, int, string, etc.) and sets the +// value to "data" of that type. +func (d *Decoder) decodeBasic(name string, data interface{}, val reflect.Value) error { + if val.IsValid() && val.Elem().IsValid() { + return d.decode(name, data, val.Elem()) + } + dataVal := reflect.ValueOf(data) + if !dataVal.IsValid() { + dataVal = reflect.Zero(val.Type()) + } + + dataValType := dataVal.Type() + if !dataValType.AssignableTo(val.Type()) { + return fmt.Errorf( + "'%s' expected type '%s', got '%s'", + name, val.Type(), dataValType) + } + + val.Set(dataVal) + return nil +} + +func (d *Decoder) decodeString(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.ValueOf(data) + dataKind := getKind(dataVal) + + converted := true + switch { + case dataKind == reflect.String: + val.SetString(dataVal.String()) + case dataKind == reflect.Bool && d.config.WeaklyTypedInput: + if dataVal.Bool() { + val.SetString("1") + } else { + val.SetString("0") + } + case dataKind == reflect.Int && d.config.WeaklyTypedInput: + val.SetString(strconv.FormatInt(dataVal.Int(), 10)) + case dataKind == reflect.Uint && d.config.WeaklyTypedInput: + val.SetString(strconv.FormatUint(dataVal.Uint(), 10)) + case dataKind == reflect.Float32 && d.config.WeaklyTypedInput: + val.SetString(strconv.FormatFloat(dataVal.Float(), 'f', -1, 64)) + case dataKind == reflect.Slice && d.config.WeaklyTypedInput, + dataKind == reflect.Array && d.config.WeaklyTypedInput: + dataType := dataVal.Type() + elemKind := dataType.Elem().Kind() + switch elemKind { + case reflect.Uint8: + var uints []uint8 + if dataKind == reflect.Array { + uints = make([]uint8, dataVal.Len(), dataVal.Len()) + for i := range uints { + uints[i] = dataVal.Index(i).Interface().(uint8) + } + } else { + uints = dataVal.Interface().([]uint8) + } + val.SetString(string(uints)) + default: + converted = false + } + default: + converted = false + } + + if !converted { + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s'", + name, val.Type(), dataVal.Type()) + } + + return nil +} + +func (d *Decoder) decodeInt(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.ValueOf(data) + dataKind := getKind(dataVal) + dataType := dataVal.Type() + + switch { + case dataKind == reflect.Int: + val.SetInt(dataVal.Int()) + case dataKind == reflect.Uint: + val.SetInt(int64(dataVal.Uint())) + case dataKind == reflect.Float32: + val.SetInt(int64(dataVal.Float())) + case dataKind == reflect.Bool && d.config.WeaklyTypedInput: + if dataVal.Bool() { + val.SetInt(1) + } else { + val.SetInt(0) + } + case dataKind == reflect.String && d.config.WeaklyTypedInput: + i, err := strconv.ParseInt(dataVal.String(), 0, val.Type().Bits()) + if err == nil { + val.SetInt(i) + } else { + return fmt.Errorf("cannot parse '%s' as int: %s", name, err) + } + case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number": + jn := data.(json.Number) + i, err := jn.Int64() + if err != nil { + return fmt.Errorf( + "error decoding json.Number into %s: %s", name, err) + } + val.SetInt(i) + default: + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s'", + name, val.Type(), dataVal.Type()) + } + + return nil +} + +func (d *Decoder) decodeUint(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.ValueOf(data) + dataKind := getKind(dataVal) + + switch { + case dataKind == reflect.Int: + i := dataVal.Int() + if i < 0 && !d.config.WeaklyTypedInput { + return fmt.Errorf("cannot parse '%s', %d overflows uint", + name, i) + } + val.SetUint(uint64(i)) + case dataKind == reflect.Uint: + val.SetUint(dataVal.Uint()) + case dataKind == reflect.Float32: + f := dataVal.Float() + if f < 0 && !d.config.WeaklyTypedInput { + return fmt.Errorf("cannot parse '%s', %f overflows uint", + name, f) + } + val.SetUint(uint64(f)) + case dataKind == reflect.Bool && d.config.WeaklyTypedInput: + if dataVal.Bool() { + val.SetUint(1) + } else { + val.SetUint(0) + } + case dataKind == reflect.String && d.config.WeaklyTypedInput: + i, err := strconv.ParseUint(dataVal.String(), 0, val.Type().Bits()) + if err == nil { + val.SetUint(i) + } else { + return fmt.Errorf("cannot parse '%s' as uint: %s", name, err) + } + default: + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s'", + name, val.Type(), dataVal.Type()) + } + + return nil +} + +func (d *Decoder) decodeBool(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.ValueOf(data) + dataKind := getKind(dataVal) + + switch { + case dataKind == reflect.Bool: + val.SetBool(dataVal.Bool()) + case dataKind == reflect.Int && d.config.WeaklyTypedInput: + val.SetBool(dataVal.Int() != 0) + case dataKind == reflect.Uint && d.config.WeaklyTypedInput: + val.SetBool(dataVal.Uint() != 0) + case dataKind == reflect.Float32 && d.config.WeaklyTypedInput: + val.SetBool(dataVal.Float() != 0) + case dataKind == reflect.String && d.config.WeaklyTypedInput: + b, err := strconv.ParseBool(dataVal.String()) + if err == nil { + val.SetBool(b) + } else if dataVal.String() == "" { + val.SetBool(false) + } else { + return fmt.Errorf("cannot parse '%s' as bool: %s", name, err) + } + default: + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s'", + name, val.Type(), dataVal.Type()) + } + + return nil +} + +func (d *Decoder) decodeFloat(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.ValueOf(data) + dataKind := getKind(dataVal) + dataType := dataVal.Type() + + switch { + case dataKind == reflect.Int: + val.SetFloat(float64(dataVal.Int())) + case dataKind == reflect.Uint: + val.SetFloat(float64(dataVal.Uint())) + case dataKind == reflect.Float32: + val.SetFloat(dataVal.Float()) + case dataKind == reflect.Bool && d.config.WeaklyTypedInput: + if dataVal.Bool() { + val.SetFloat(1) + } else { + val.SetFloat(0) + } + case dataKind == reflect.String && d.config.WeaklyTypedInput: + f, err := strconv.ParseFloat(dataVal.String(), val.Type().Bits()) + if err == nil { + val.SetFloat(f) + } else { + return fmt.Errorf("cannot parse '%s' as float: %s", name, err) + } + case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number": + jn := data.(json.Number) + i, err := jn.Float64() + if err != nil { + return fmt.Errorf( + "error decoding json.Number into %s: %s", name, err) + } + val.SetFloat(i) + default: + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s'", + name, val.Type(), dataVal.Type()) + } + + return nil +} + +func (d *Decoder) decodeMap(name string, data interface{}, val reflect.Value) error { + valType := val.Type() + valKeyType := valType.Key() + valElemType := valType.Elem() + + // By default we overwrite keys in the current map + valMap := val + + // If the map is nil or we're purposely zeroing fields, make a new map + if valMap.IsNil() || d.config.ZeroFields { + // Make a new map to hold our result + mapType := reflect.MapOf(valKeyType, valElemType) + valMap = reflect.MakeMap(mapType) + } + + // Check input type and based on the input type jump to the proper func + dataVal := reflect.Indirect(reflect.ValueOf(data)) + switch dataVal.Kind() { + case reflect.Map: + return d.decodeMapFromMap(name, dataVal, val, valMap) + + case reflect.Struct: + return d.decodeMapFromStruct(name, dataVal, val, valMap) + + case reflect.Array, reflect.Slice: + if d.config.WeaklyTypedInput { + return d.decodeMapFromSlice(name, dataVal, val, valMap) + } + + fallthrough + + default: + return fmt.Errorf("'%s' expected a map, got '%s'", name, dataVal.Kind()) + } +} + +func (d *Decoder) decodeMapFromSlice(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error { + // Special case for BC reasons (covered by tests) + if dataVal.Len() == 0 { + val.Set(valMap) + return nil + } + + for i := 0; i < dataVal.Len(); i++ { + err := d.decode( + fmt.Sprintf("%s[%d]", name, i), + dataVal.Index(i).Interface(), val) + if err != nil { + return err + } + } + + return nil +} + +func (d *Decoder) decodeMapFromMap(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error { + valType := val.Type() + valKeyType := valType.Key() + valElemType := valType.Elem() + + // Accumulate errors + errors := make([]string, 0) + + for _, k := range dataVal.MapKeys() { + fieldName := fmt.Sprintf("%s[%s]", name, k) + + // First decode the key into the proper type + currentKey := reflect.Indirect(reflect.New(valKeyType)) + if err := d.decode(fieldName, k.Interface(), currentKey); err != nil { + errors = appendErrors(errors, err) + continue + } + + // Next decode the data into the proper type + v := dataVal.MapIndex(k).Interface() + currentVal := reflect.Indirect(reflect.New(valElemType)) + if err := d.decode(fieldName, v, currentVal); err != nil { + errors = appendErrors(errors, err) + continue + } + + valMap.SetMapIndex(currentKey, currentVal) + } + + // Set the built up map to the value + val.Set(valMap) + + // If we had errors, return those + if len(errors) > 0 { + return &Error{errors} + } + + return nil +} + +func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error { + typ := dataVal.Type() + for i := 0; i < typ.NumField(); i++ { + // Get the StructField first since this is a cheap operation. If the + // field is unexported, then ignore it. + f := typ.Field(i) + if f.PkgPath != "" { + continue + } + + // Next get the actual value of this field and verify it is assignable + // to the map value. + v := dataVal.Field(i) + if !v.Type().AssignableTo(valMap.Type().Elem()) { + return fmt.Errorf("cannot assign type '%s' to map value field of type '%s'", v.Type(), valMap.Type().Elem()) + } + + // Determine the name of the key in the map + keyName := f.Name + tagValue := f.Tag.Get(d.config.TagName) + tagValue = strings.SplitN(tagValue, ",", 2)[0] + if tagValue != "" { + if tagValue == "-" { + continue + } + + keyName = tagValue + } + + switch v.Kind() { + // this is an embedded struct, so handle it differently + case reflect.Struct: + x := reflect.New(v.Type()) + x.Elem().Set(v) + + vType := valMap.Type() + vKeyType := vType.Key() + vElemType := vType.Elem() + mType := reflect.MapOf(vKeyType, vElemType) + vMap := reflect.MakeMap(mType) + + err := d.decode(keyName, x.Interface(), vMap) + if err != nil { + return err + } + + valMap.SetMapIndex(reflect.ValueOf(keyName), vMap) + + default: + valMap.SetMapIndex(reflect.ValueOf(keyName), v) + } + } + + if val.CanAddr() { + val.Set(valMap) + } + + return nil +} + +func (d *Decoder) decodePtr(name string, data interface{}, val reflect.Value) error { + // Create an element of the concrete (non pointer) type and decode + // into that. Then set the value of the pointer to this type. + valType := val.Type() + valElemType := valType.Elem() + + if val.CanSet() { + realVal := val + if realVal.IsNil() || d.config.ZeroFields { + realVal = reflect.New(valElemType) + } + + if err := d.decode(name, data, reflect.Indirect(realVal)); err != nil { + return err + } + + val.Set(realVal) + } else { + if err := d.decode(name, data, reflect.Indirect(val)); err != nil { + return err + } + } + return nil +} + +func (d *Decoder) decodeFunc(name string, data interface{}, val reflect.Value) error { + // Create an element of the concrete (non pointer) type and decode + // into that. Then set the value of the pointer to this type. + dataVal := reflect.Indirect(reflect.ValueOf(data)) + if val.Type() != dataVal.Type() { + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s'", + name, val.Type(), dataVal.Type()) + } + val.Set(dataVal) + return nil +} + +func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataValKind := dataVal.Kind() + valType := val.Type() + valElemType := valType.Elem() + sliceType := reflect.SliceOf(valElemType) + + valSlice := val + if valSlice.IsNil() || d.config.ZeroFields { + // Check input type + if dataValKind != reflect.Array && dataValKind != reflect.Slice { + if d.config.WeaklyTypedInput { + switch { + // Empty maps turn into empty slices + case dataValKind == reflect.Map: + if dataVal.Len() == 0 { + val.Set(reflect.MakeSlice(sliceType, 0, 0)) + return nil + } + case dataValKind == reflect.String && valElemType.Kind() == reflect.Uint8: + return d.decodeSlice(name, []byte(dataVal.String()), val) + // All other types we try to convert to the slice type + // and "lift" it into it. i.e. a string becomes a string slice. + default: + // Just re-try this function with data as a slice. + return d.decodeSlice(name, []interface{}{data}, val) + } + } + return fmt.Errorf( + "'%s': source data must be an array or slice, got %s", name, dataValKind) + + } + + // Make a new slice to hold our result, same size as the original data. + valSlice = reflect.MakeSlice(sliceType, dataVal.Len(), dataVal.Len()) + } + + // Accumulate any errors + errors := make([]string, 0) + + for i := 0; i < dataVal.Len(); i++ { + currentData := dataVal.Index(i).Interface() + for valSlice.Len() <= i { + valSlice = reflect.Append(valSlice, reflect.Zero(valElemType)) + } + currentField := valSlice.Index(i) + + fieldName := fmt.Sprintf("%s[%d]", name, i) + if err := d.decode(fieldName, currentData, currentField); err != nil { + errors = appendErrors(errors, err) + } + } + + // Finally, set the value to the slice we built up + val.Set(valSlice) + + // If there were errors, we return those + if len(errors) > 0 { + return &Error{errors} + } + + return nil +} + +func (d *Decoder) decodeArray(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataValKind := dataVal.Kind() + valType := val.Type() + valElemType := valType.Elem() + arrayType := reflect.ArrayOf(valType.Len(), valElemType) + + valArray := val + + if valArray.Interface() == reflect.Zero(valArray.Type()).Interface() || d.config.ZeroFields { + // Check input type + if dataValKind != reflect.Array && dataValKind != reflect.Slice { + if d.config.WeaklyTypedInput { + switch { + // Empty maps turn into empty arrays + case dataValKind == reflect.Map: + if dataVal.Len() == 0 { + val.Set(reflect.Zero(arrayType)) + return nil + } + + // All other types we try to convert to the array type + // and "lift" it into it. i.e. a string becomes a string array. + default: + // Just re-try this function with data as a slice. + return d.decodeArray(name, []interface{}{data}, val) + } + } + + return fmt.Errorf( + "'%s': source data must be an array or slice, got %s", name, dataValKind) + + } + if dataVal.Len() > arrayType.Len() { + return fmt.Errorf( + "'%s': expected source data to have length less or equal to %d, got %d", name, arrayType.Len(), dataVal.Len()) + + } + + // Make a new array to hold our result, same size as the original data. + valArray = reflect.New(arrayType).Elem() + } + + // Accumulate any errors + errors := make([]string, 0) + + for i := 0; i < dataVal.Len(); i++ { + currentData := dataVal.Index(i).Interface() + currentField := valArray.Index(i) + + fieldName := fmt.Sprintf("%s[%d]", name, i) + if err := d.decode(fieldName, currentData, currentField); err != nil { + errors = appendErrors(errors, err) + } + } + + // Finally, set the value to the array we built up + val.Set(valArray) + + // If there were errors, we return those + if len(errors) > 0 { + return &Error{errors} + } + + return nil +} + +func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + + // If the type of the value to write to and the data match directly, + // then we just set it directly instead of recursing into the structure. + if dataVal.Type() == val.Type() { + val.Set(dataVal) + return nil + } + + dataValKind := dataVal.Kind() + if dataValKind != reflect.Map { + return fmt.Errorf("'%s' expected a map, got '%s'", name, dataValKind) + } + + dataValType := dataVal.Type() + if kind := dataValType.Key().Kind(); kind != reflect.String && kind != reflect.Interface { + return fmt.Errorf( + "'%s' needs a map with string keys, has '%s' keys", + name, dataValType.Key().Kind()) + } + + dataValKeys := make(map[reflect.Value]struct{}) + dataValKeysUnused := make(map[interface{}]struct{}) + for _, dataValKey := range dataVal.MapKeys() { + dataValKeys[dataValKey] = struct{}{} + dataValKeysUnused[dataValKey.Interface()] = struct{}{} + } + + errors := make([]string, 0) + + // This slice will keep track of all the structs we'll be decoding. + // There can be more than one struct if there are embedded structs + // that are squashed. + structs := make([]reflect.Value, 1, 5) + structs[0] = val + + // Compile the list of all the fields that we're going to be decoding + // from all the structs. + type field struct { + field reflect.StructField + val reflect.Value + } + fields := []field{} + for len(structs) > 0 { + structVal := structs[0] + structs = structs[1:] + + structType := structVal.Type() + + for i := 0; i < structType.NumField(); i++ { + fieldType := structType.Field(i) + fieldKind := fieldType.Type.Kind() + + // If "squash" is specified in the tag, we squash the field down. + squash := false + tagParts := strings.Split(fieldType.Tag.Get(d.config.TagName), ",") + for _, tag := range tagParts[1:] { + if tag == "squash" { + squash = true + break + } + } + + if squash { + if fieldKind != reflect.Struct { + errors = appendErrors(errors, + fmt.Errorf("%s: unsupported type for squash: %s", fieldType.Name, fieldKind)) + } else { + structs = append(structs, structVal.FieldByName(fieldType.Name)) + } + continue + } + + // Normal struct field, store it away + fields = append(fields, field{fieldType, structVal.Field(i)}) + } + } + + // for fieldType, field := range fields { + for _, f := range fields { + field, fieldValue := f.field, f.val + fieldName := field.Name + + tagValue := field.Tag.Get(d.config.TagName) + tagValue = strings.SplitN(tagValue, ",", 2)[0] + if tagValue != "" { + fieldName = tagValue + } + + rawMapKey := reflect.ValueOf(fieldName) + rawMapVal := dataVal.MapIndex(rawMapKey) + if !rawMapVal.IsValid() { + // Do a slower search by iterating over each key and + // doing case-insensitive search. + for dataValKey := range dataValKeys { + mK, ok := dataValKey.Interface().(string) + if !ok { + // Not a string key + continue + } + + if strings.EqualFold(mK, fieldName) { + rawMapKey = dataValKey + rawMapVal = dataVal.MapIndex(dataValKey) + break + } + } + + if !rawMapVal.IsValid() { + // There was no matching key in the map for the value in + // the struct. Just ignore. + continue + } + } + + // Delete the key we're using from the unused map so we stop tracking + delete(dataValKeysUnused, rawMapKey.Interface()) + + if !fieldValue.IsValid() { + // This should never happen + panic("field is not valid") + } + + // If we can't set the field, then it is unexported or something, + // and we just continue onwards. + if !fieldValue.CanSet() { + continue + } + + // If the name is empty string, then we're at the root, and we + // don't dot-join the fields. + if name != "" { + fieldName = fmt.Sprintf("%s.%s", name, fieldName) + } + + if err := d.decode(fieldName, rawMapVal.Interface(), fieldValue); err != nil { + errors = appendErrors(errors, err) + } + } + + if d.config.ErrorUnused && len(dataValKeysUnused) > 0 { + keys := make([]string, 0, len(dataValKeysUnused)) + for rawKey := range dataValKeysUnused { + keys = append(keys, rawKey.(string)) + } + sort.Strings(keys) + + err := fmt.Errorf("'%s' has invalid keys: %s", name, strings.Join(keys, ", ")) + errors = appendErrors(errors, err) + } + + if len(errors) > 0 { + return &Error{errors} + } + + // Add the unused keys to the list of unused keys if we're tracking metadata + if d.config.Metadata != nil { + for rawKey := range dataValKeysUnused { + key := rawKey.(string) + if name != "" { + key = fmt.Sprintf("%s.%s", name, key) + } + + d.config.Metadata.Unused = append(d.config.Metadata.Unused, key) + } + } + + return nil +} + +func getKind(val reflect.Value) reflect.Kind { + kind := val.Kind() + + switch { + case kind >= reflect.Int && kind <= reflect.Int64: + return reflect.Int + case kind >= reflect.Uint && kind <= reflect.Uint64: + return reflect.Uint + case kind >= reflect.Float32 && kind <= reflect.Float64: + return reflect.Float32 + default: + return kind + } +} diff --git a/vendor/github.com/pelletier/go-toml/LICENSE b/vendor/github.com/pelletier/go-toml/LICENSE new file mode 100644 index 00000000..583bdae6 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2013 - 2017 Thomas Pelletier, Eric Anderton + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/pelletier/go-toml/cmd/test_program.go b/vendor/github.com/pelletier/go-toml/cmd/test_program.go new file mode 100644 index 00000000..73077f61 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/cmd/test_program.go @@ -0,0 +1,91 @@ +package main + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "log" + "os" + "time" + + "github.com/pelletier/go-toml" +) + +func main() { + bytes, err := ioutil.ReadAll(os.Stdin) + if err != nil { + log.Fatalf("Error during TOML read: %s", err) + os.Exit(2) + } + tree, err := toml.Load(string(bytes)) + if err != nil { + log.Fatalf("Error during TOML load: %s", err) + os.Exit(1) + } + + typedTree := translate(*tree) + + if err := json.NewEncoder(os.Stdout).Encode(typedTree); err != nil { + log.Fatalf("Error encoding JSON: %s", err) + os.Exit(3) + } + + os.Exit(0) +} + +func translate(tomlData interface{}) interface{} { + switch orig := tomlData.(type) { + case map[string]interface{}: + typed := make(map[string]interface{}, len(orig)) + for k, v := range orig { + typed[k] = translate(v) + } + return typed + case *toml.Tree: + return translate(*orig) + case toml.Tree: + keys := orig.Keys() + typed := make(map[string]interface{}, len(keys)) + for _, k := range keys { + typed[k] = translate(orig.GetPath([]string{k})) + } + return typed + case []*toml.Tree: + typed := make([]map[string]interface{}, len(orig)) + for i, v := range orig { + typed[i] = translate(v).(map[string]interface{}) + } + return typed + case []map[string]interface{}: + typed := make([]map[string]interface{}, len(orig)) + for i, v := range orig { + typed[i] = translate(v).(map[string]interface{}) + } + return typed + case []interface{}: + typed := make([]interface{}, len(orig)) + for i, v := range orig { + typed[i] = translate(v) + } + return tag("array", typed) + case time.Time: + return tag("datetime", orig.Format("2006-01-02T15:04:05Z")) + case bool: + return tag("bool", fmt.Sprintf("%v", orig)) + case int64: + return tag("integer", fmt.Sprintf("%d", orig)) + case float64: + return tag("float", fmt.Sprintf("%v", orig)) + case string: + return tag("string", orig) + } + + panic(fmt.Sprintf("Unknown type: %T", tomlData)) +} + +func tag(typeName string, data interface{}) map[string]interface{} { + return map[string]interface{}{ + "type": typeName, + "value": data, + } +} diff --git a/vendor/github.com/pelletier/go-toml/cmd/tomljson/main.go b/vendor/github.com/pelletier/go-toml/cmd/tomljson/main.go new file mode 100644 index 00000000..b2d6fc67 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/cmd/tomljson/main.go @@ -0,0 +1,72 @@ +// Tomljson reads TOML and converts to JSON. +// +// Usage: +// cat file.toml | tomljson > file.json +// tomljson file1.toml > file.json +package main + +import ( + "encoding/json" + "flag" + "fmt" + "io" + "os" + + "github.com/pelletier/go-toml" +) + +func main() { + flag.Usage = func() { + fmt.Fprintln(os.Stderr, `tomljson can be used in two ways: +Writing to STDIN and reading from STDOUT: + cat file.toml | tomljson > file.json + +Reading from a file name: + tomljson file.toml +`) + } + flag.Parse() + os.Exit(processMain(flag.Args(), os.Stdin, os.Stdout, os.Stderr)) +} + +func processMain(files []string, defaultInput io.Reader, output io.Writer, errorOutput io.Writer) int { + // read from stdin and print to stdout + inputReader := defaultInput + + if len(files) > 0 { + var err error + inputReader, err = os.Open(files[0]) + if err != nil { + printError(err, errorOutput) + return -1 + } + } + s, err := reader(inputReader) + if err != nil { + printError(err, errorOutput) + return -1 + } + io.WriteString(output, s+"\n") + return 0 +} + +func printError(err error, output io.Writer) { + io.WriteString(output, err.Error()+"\n") +} + +func reader(r io.Reader) (string, error) { + tree, err := toml.LoadReader(r) + if err != nil { + return "", err + } + return mapToJSON(tree) +} + +func mapToJSON(tree *toml.Tree) (string, error) { + treeMap := tree.ToMap() + bytes, err := json.MarshalIndent(treeMap, "", " ") + if err != nil { + return "", err + } + return string(bytes[:]), nil +} diff --git a/vendor/github.com/pelletier/go-toml/cmd/tomll/main.go b/vendor/github.com/pelletier/go-toml/cmd/tomll/main.go new file mode 100644 index 00000000..36c7e375 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/cmd/tomll/main.go @@ -0,0 +1,66 @@ +// Tomll is a linter for TOML +// +// Usage: +// cat file.toml | tomll > file_linted.toml +// tomll file1.toml file2.toml # lint the two files in place +package main + +import ( + "flag" + "fmt" + "io" + "io/ioutil" + "os" + + "github.com/pelletier/go-toml" +) + +func main() { + flag.Usage = func() { + fmt.Fprintln(os.Stderr, `tomll can be used in two ways: +Writing to STDIN and reading from STDOUT: + cat file.toml | tomll > file.toml + +Reading and updating a list of files: + tomll a.toml b.toml c.toml + +When given a list of files, tomll will modify all files in place without asking. +`) + } + flag.Parse() + // read from stdin and print to stdout + if flag.NArg() == 0 { + s, err := lintReader(os.Stdin) + if err != nil { + io.WriteString(os.Stderr, err.Error()) + os.Exit(-1) + } + io.WriteString(os.Stdout, s) + } else { + // otherwise modify a list of files + for _, filename := range flag.Args() { + s, err := lintFile(filename) + if err != nil { + io.WriteString(os.Stderr, err.Error()) + os.Exit(-1) + } + ioutil.WriteFile(filename, []byte(s), 0644) + } + } +} + +func lintFile(filename string) (string, error) { + tree, err := toml.LoadFile(filename) + if err != nil { + return "", err + } + return tree.String(), nil +} + +func lintReader(r io.Reader) (string, error) { + tree, err := toml.LoadReader(r) + if err != nil { + return "", err + } + return tree.String(), nil +} diff --git a/vendor/github.com/pelletier/go-toml/doc.go b/vendor/github.com/pelletier/go-toml/doc.go new file mode 100644 index 00000000..d5fd98c0 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/doc.go @@ -0,0 +1,23 @@ +// Package toml is a TOML parser and manipulation library. +// +// This version supports the specification as described in +// https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.4.0.md +// +// Marshaling +// +// Go-toml can marshal and unmarshal TOML documents from and to data +// structures. +// +// TOML document as a tree +// +// Go-toml can operate on a TOML document as a tree. Use one of the Load* +// functions to parse TOML data and obtain a Tree instance, then one of its +// methods to manipulate the tree. +// +// JSONPath-like queries +// +// The package github.com/pelletier/go-toml/query implements a system +// similar to JSONPath to quickly retrieve elements of a TOML document using a +// single expression. See the package documentation for more information. +// +package toml diff --git a/vendor/github.com/pelletier/go-toml/fuzz.go b/vendor/github.com/pelletier/go-toml/fuzz.go new file mode 100644 index 00000000..14570c8d --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/fuzz.go @@ -0,0 +1,31 @@ +// +build gofuzz + +package toml + +func Fuzz(data []byte) int { + tree, err := LoadBytes(data) + if err != nil { + if tree != nil { + panic("tree must be nil if there is an error") + } + return 0 + } + + str, err := tree.ToTomlString() + if err != nil { + if str != "" { + panic(`str must be "" if there is an error`) + } + panic(err) + } + + tree, err = Load(str) + if err != nil { + if tree != nil { + panic("tree must be nil if there is an error") + } + return 0 + } + + return 1 +} diff --git a/vendor/github.com/pelletier/go-toml/keysparsing.go b/vendor/github.com/pelletier/go-toml/keysparsing.go new file mode 100644 index 00000000..284db646 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/keysparsing.go @@ -0,0 +1,85 @@ +// Parsing keys handling both bare and quoted keys. + +package toml + +import ( + "bytes" + "errors" + "fmt" + "unicode" +) + +// Convert the bare key group string to an array. +// The input supports double quotation to allow "." inside the key name, +// but escape sequences are not supported. Lexers must unescape them beforehand. +func parseKey(key string) ([]string, error) { + groups := []string{} + var buffer bytes.Buffer + inQuotes := false + wasInQuotes := false + ignoreSpace := true + expectDot := false + + for _, char := range key { + if ignoreSpace { + if char == ' ' { + continue + } + ignoreSpace = false + } + switch char { + case '"': + if inQuotes { + groups = append(groups, buffer.String()) + buffer.Reset() + wasInQuotes = true + } + inQuotes = !inQuotes + expectDot = false + case '.': + if inQuotes { + buffer.WriteRune(char) + } else { + if !wasInQuotes { + if buffer.Len() == 0 { + return nil, errors.New("empty table key") + } + groups = append(groups, buffer.String()) + buffer.Reset() + } + ignoreSpace = true + expectDot = false + wasInQuotes = false + } + case ' ': + if inQuotes { + buffer.WriteRune(char) + } else { + expectDot = true + } + default: + if !inQuotes && !isValidBareChar(char) { + return nil, fmt.Errorf("invalid bare character: %c", char) + } + if !inQuotes && expectDot { + return nil, errors.New("what?") + } + buffer.WriteRune(char) + expectDot = false + } + } + if inQuotes { + return nil, errors.New("mismatched quotes") + } + if buffer.Len() > 0 { + groups = append(groups, buffer.String()) + } + if len(groups) == 0 { + return nil, errors.New("empty key") + } + return groups, nil +} + +func isValidBareChar(r rune) bool { + return isAlphanumeric(r) || r == '-' || unicode.IsNumber(r) +} diff --git a/vendor/github.com/pelletier/go-toml/lexer.go b/vendor/github.com/pelletier/go-toml/lexer.go new file mode 100644 index 00000000..d11de428 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/lexer.go @@ -0,0 +1,750 @@ +// TOML lexer. +// +// Written using the principles developed by Rob Pike in +// http://www.youtube.com/watch?v=HxaD_trXwRE + +package toml + +import ( + "bytes" + "errors" + "fmt" + "regexp" + "strconv" + "strings" +) + +var dateRegexp *regexp.Regexp + +// Define state functions +type tomlLexStateFn func() tomlLexStateFn + +// Define lexer +type tomlLexer struct { + inputIdx int + input []rune // Textual source + currentTokenStart int + currentTokenStop int + tokens []token + depth int + line int + col int + endbufferLine int + endbufferCol int +} + +// Basic read operations on input + +func (l *tomlLexer) read() rune { + r := l.peek() + if r == '\n' { + l.endbufferLine++ + l.endbufferCol = 1 + } else { + l.endbufferCol++ + } + l.inputIdx++ + return r +} + +func (l *tomlLexer) next() rune { + r := l.read() + + if r != eof { + l.currentTokenStop++ + } + return r +} + +func (l *tomlLexer) ignore() { + l.currentTokenStart = l.currentTokenStop + l.line = l.endbufferLine + l.col = l.endbufferCol +} + +func (l *tomlLexer) skip() { + l.next() + l.ignore() +} + +func (l *tomlLexer) fastForward(n int) { + for i := 0; i < n; i++ { + l.next() + } +} + +func (l *tomlLexer) emitWithValue(t tokenType, value string) { + l.tokens = append(l.tokens, token{ + Position: Position{l.line, l.col}, + typ: t, + val: value, + }) + l.ignore() +} + +func (l *tomlLexer) emit(t tokenType) { + l.emitWithValue(t, string(l.input[l.currentTokenStart:l.currentTokenStop])) +} + +func (l *tomlLexer) peek() rune { + if l.inputIdx >= len(l.input) { + return eof + } + return l.input[l.inputIdx] +} + +func (l *tomlLexer) peekString(size int) string { + maxIdx := len(l.input) + upperIdx := l.inputIdx + size // FIXME: potential overflow + if upperIdx > maxIdx { + upperIdx = maxIdx + } + return string(l.input[l.inputIdx:upperIdx]) +} + +func (l *tomlLexer) follow(next string) bool { + return next == l.peekString(len(next)) +} + +// Error management + +func (l *tomlLexer) errorf(format string, args ...interface{}) tomlLexStateFn { + l.tokens = append(l.tokens, token{ + Position: Position{l.line, l.col}, + typ: tokenError, + val: fmt.Sprintf(format, args...), + }) + return nil +} + +// State functions + +func (l *tomlLexer) lexVoid() tomlLexStateFn { + for { + next := l.peek() + switch next { + case '[': + return l.lexTableKey + case '#': + return l.lexComment(l.lexVoid) + case '=': + return l.lexEqual + case '\r': + fallthrough + case '\n': + l.skip() + continue + } + + if isSpace(next) { + l.skip() + } + + if l.depth > 0 { + return l.lexRvalue + } + + if isKeyStartChar(next) { + return l.lexKey + } + + if next == eof { + l.next() + break + } + } + + l.emit(tokenEOF) + return nil +} + +func (l *tomlLexer) lexRvalue() tomlLexStateFn { + for { + next := l.peek() + switch next { + case '.': + return l.errorf("cannot start float with a dot") + case '=': + return l.lexEqual + case '[': + l.depth++ + return l.lexLeftBracket + case ']': + l.depth-- + return l.lexRightBracket + case '{': + return l.lexLeftCurlyBrace + case '}': + return l.lexRightCurlyBrace + case '#': + return l.lexComment(l.lexRvalue) + case '"': + return l.lexString + case '\'': + return l.lexLiteralString + case ',': + return l.lexComma + case '\r': + fallthrough + case '\n': + l.skip() + if l.depth == 0 { + return l.lexVoid + } + return l.lexRvalue + case '_': + return l.errorf("cannot start number with underscore") + } + + if l.follow("true") { + return l.lexTrue + } + + if l.follow("false") { + return l.lexFalse + } + + if l.follow("inf") { + return l.lexInf + } + + if l.follow("nan") { + return l.lexNan + } + + if isSpace(next) { + l.skip() + continue + } + + if next == eof { + l.next() + break + } + + possibleDate := l.peekString(35) + dateMatch := dateRegexp.FindString(possibleDate) + if dateMatch != "" { + l.fastForward(len(dateMatch)) + return l.lexDate + } + + if next == '+' || next == '-' || isDigit(next) { + return l.lexNumber + } + + if isAlphanumeric(next) { + return l.lexKey + } + + return l.errorf("no value can start with %c", next) + } + + l.emit(tokenEOF) + return nil +} + +func (l *tomlLexer) lexLeftCurlyBrace() tomlLexStateFn { + l.next() + l.emit(tokenLeftCurlyBrace) + return l.lexRvalue +} + +func (l *tomlLexer) lexRightCurlyBrace() tomlLexStateFn { + l.next() + l.emit(tokenRightCurlyBrace) + return l.lexRvalue +} + +func (l *tomlLexer) lexDate() tomlLexStateFn { + l.emit(tokenDate) + return l.lexRvalue +} + +func (l *tomlLexer) lexTrue() tomlLexStateFn { + l.fastForward(4) + l.emit(tokenTrue) + return l.lexRvalue +} + +func (l *tomlLexer) lexFalse() tomlLexStateFn { + l.fastForward(5) + l.emit(tokenFalse) + return l.lexRvalue +} + +func (l *tomlLexer) lexInf() tomlLexStateFn { + l.fastForward(3) + l.emit(tokenInf) + return l.lexRvalue +} + +func (l *tomlLexer) lexNan() tomlLexStateFn { + l.fastForward(3) + l.emit(tokenNan) + return l.lexRvalue +} + +func (l *tomlLexer) lexEqual() tomlLexStateFn { + l.next() + l.emit(tokenEqual) + return l.lexRvalue +} + +func (l *tomlLexer) lexComma() tomlLexStateFn { + l.next() + l.emit(tokenComma) + return l.lexRvalue +} + +// Parse the key and emits its value without escape sequences. +// bare keys, basic string keys and literal string keys are supported. +func (l *tomlLexer) lexKey() tomlLexStateFn { + growingString := "" + + for r := l.peek(); isKeyChar(r) || r == '\n' || r == '\r'; r = l.peek() { + if r == '"' { + l.next() + str, err := l.lexStringAsString(`"`, false, true) + if err != nil { + return l.errorf(err.Error()) + } + growingString += str + l.next() + continue + } else if r == '\'' { + l.next() + str, err := l.lexLiteralStringAsString(`'`, false) + if err != nil { + return l.errorf(err.Error()) + } + growingString += str + l.next() + continue + } else if r == '\n' { + return l.errorf("keys cannot contain new lines") + } else if isSpace(r) { + break + } else if !isValidBareChar(r) { + return l.errorf("keys cannot contain %c character", r) + } + growingString += string(r) + l.next() + } + l.emitWithValue(tokenKey, growingString) + return l.lexVoid +} + +func (l *tomlLexer) lexComment(previousState tomlLexStateFn) tomlLexStateFn { + return func() tomlLexStateFn { + for next := l.peek(); next != '\n' && next != eof; next = l.peek() { + if next == '\r' && l.follow("\r\n") { + break + } + l.next() + } + l.ignore() + return previousState + } +} + +func (l *tomlLexer) lexLeftBracket() tomlLexStateFn { + l.next() + l.emit(tokenLeftBracket) + return l.lexRvalue +} + +func (l *tomlLexer) lexLiteralStringAsString(terminator string, discardLeadingNewLine bool) (string, error) { + growingString := "" + + if discardLeadingNewLine { + if l.follow("\r\n") { + l.skip() + l.skip() + } else if l.peek() == '\n' { + l.skip() + } + } + + // find end of string + for { + if l.follow(terminator) { + return growingString, nil + } + + next := l.peek() + if next == eof { + break + } + growingString += string(l.next()) + } + + return "", errors.New("unclosed string") +} + +func (l *tomlLexer) lexLiteralString() tomlLexStateFn { + l.skip() + + // handle special case for triple-quote + terminator := "'" + discardLeadingNewLine := false + if l.follow("''") { + l.skip() + l.skip() + terminator = "'''" + discardLeadingNewLine = true + } + + str, err := l.lexLiteralStringAsString(terminator, discardLeadingNewLine) + if err != nil { + return l.errorf(err.Error()) + } + + l.emitWithValue(tokenString, str) + l.fastForward(len(terminator)) + l.ignore() + return l.lexRvalue +} + +// Lex a string and return the results as a string. +// Terminator is the substring indicating the end of the token. +// The resulting string does not include the terminator. +func (l *tomlLexer) lexStringAsString(terminator string, discardLeadingNewLine, acceptNewLines bool) (string, error) { + growingString := "" + + if discardLeadingNewLine { + if l.follow("\r\n") { + l.skip() + l.skip() + } else if l.peek() == '\n' { + l.skip() + } + } + + for { + if l.follow(terminator) { + return growingString, nil + } + + if l.follow("\\") { + l.next() + switch l.peek() { + case '\r': + fallthrough + case '\n': + fallthrough + case '\t': + fallthrough + case ' ': + // skip all whitespace chars following backslash + for strings.ContainsRune("\r\n\t ", l.peek()) { + l.next() + } + case '"': + growingString += "\"" + l.next() + case 'n': + growingString += "\n" + l.next() + case 'b': + growingString += "\b" + l.next() + case 'f': + growingString += "\f" + l.next() + case '/': + growingString += "/" + l.next() + case 't': + growingString += "\t" + l.next() + case 'r': + growingString += "\r" + l.next() + case '\\': + growingString += "\\" + l.next() + case 'u': + l.next() + code := "" + for i := 0; i < 4; i++ { + c := l.peek() + if !isHexDigit(c) { + return "", errors.New("unfinished unicode escape") + } + l.next() + code = code + string(c) + } + intcode, err := strconv.ParseInt(code, 16, 32) + if err != nil { + return "", errors.New("invalid unicode escape: \\u" + code) + } + growingString += string(rune(intcode)) + case 'U': + l.next() + code := "" + for i := 0; i < 8; i++ { + c := l.peek() + if !isHexDigit(c) { + return "", errors.New("unfinished unicode escape") + } + l.next() + code = code + string(c) + } + intcode, err := strconv.ParseInt(code, 16, 64) + if err != nil { + return "", errors.New("invalid unicode escape: \\U" + code) + } + growingString += string(rune(intcode)) + default: + return "", errors.New("invalid escape sequence: \\" + string(l.peek())) + } + } else { + r := l.peek() + + if 0x00 <= r && r <= 0x1F && !(acceptNewLines && (r == '\n' || r == '\r')) { + return "", fmt.Errorf("unescaped control character %U", r) + } + l.next() + growingString += string(r) + } + + if l.peek() == eof { + break + } + } + + return "", errors.New("unclosed string") +} + +func (l *tomlLexer) lexString() tomlLexStateFn { + l.skip() + + // handle special case for triple-quote + terminator := `"` + discardLeadingNewLine := false + acceptNewLines := false + if l.follow(`""`) { + l.skip() + l.skip() + terminator = `"""` + discardLeadingNewLine = true + acceptNewLines = true + } + + str, err := l.lexStringAsString(terminator, discardLeadingNewLine, acceptNewLines) + + if err != nil { + return l.errorf(err.Error()) + } + + l.emitWithValue(tokenString, str) + l.fastForward(len(terminator)) + l.ignore() + return l.lexRvalue +} + +func (l *tomlLexer) lexTableKey() tomlLexStateFn { + l.next() + + if l.peek() == '[' { + // token '[[' signifies an array of tables + l.next() + l.emit(tokenDoubleLeftBracket) + return l.lexInsideTableArrayKey + } + // vanilla table key + l.emit(tokenLeftBracket) + return l.lexInsideTableKey +} + +// Parse the key till "]]", but only bare keys are supported +func (l *tomlLexer) lexInsideTableArrayKey() tomlLexStateFn { + for r := l.peek(); r != eof; r = l.peek() { + switch r { + case ']': + if l.currentTokenStop > l.currentTokenStart { + l.emit(tokenKeyGroupArray) + } + l.next() + if l.peek() != ']' { + break + } + l.next() + l.emit(tokenDoubleRightBracket) + return l.lexVoid + case '[': + return l.errorf("table array key cannot contain ']'") + default: + l.next() + } + } + return l.errorf("unclosed table array key") +} + +// Parse the key till "]" but only bare keys are supported +func (l *tomlLexer) lexInsideTableKey() tomlLexStateFn { + for r := l.peek(); r != eof; r = l.peek() { + switch r { + case ']': + if l.currentTokenStop > l.currentTokenStart { + l.emit(tokenKeyGroup) + } + l.next() + l.emit(tokenRightBracket) + return l.lexVoid + case '[': + return l.errorf("table key cannot contain ']'") + default: + l.next() + } + } + return l.errorf("unclosed table key") +} + +func (l *tomlLexer) lexRightBracket() tomlLexStateFn { + l.next() + l.emit(tokenRightBracket) + return l.lexRvalue +} + +type validRuneFn func(r rune) bool + +func isValidHexRune(r rune) bool { + return r >= 'a' && r <= 'f' || + r >= 'A' && r <= 'F' || + r >= '0' && r <= '9' || + r == '_' +} + +func isValidOctalRune(r rune) bool { + return r >= '0' && r <= '7' || r == '_' +} + +func isValidBinaryRune(r rune) bool { + return r == '0' || r == '1' || r == '_' +} + +func (l *tomlLexer) lexNumber() tomlLexStateFn { + r := l.peek() + + if r == '0' { + follow := l.peekString(2) + if len(follow) == 2 { + var isValidRune validRuneFn + switch follow[1] { + case 'x': + isValidRune = isValidHexRune + case 'o': + isValidRune = isValidOctalRune + case 'b': + isValidRune = isValidBinaryRune + default: + if follow[1] >= 'a' && follow[1] <= 'z' || follow[1] >= 'A' && follow[1] <= 'Z' { + return l.errorf("unknown number base: %s. possible options are x (hex) o (octal) b (binary)", string(follow[1])) + } + } + + if isValidRune != nil { + l.next() + l.next() + digitSeen := false + for { + next := l.peek() + if !isValidRune(next) { + break + } + digitSeen = true + l.next() + } + + if !digitSeen { + return l.errorf("number needs at least one digit") + } + + l.emit(tokenInteger) + + return l.lexRvalue + } + } + } + + if r == '+' || r == '-' { + l.next() + if l.follow("inf") { + return l.lexInf + } + if l.follow("nan") { + return l.lexNan + } + } + + pointSeen := false + expSeen := false + digitSeen := false + for { + next := l.peek() + if next == '.' { + if pointSeen { + return l.errorf("cannot have two dots in one float") + } + l.next() + if !isDigit(l.peek()) { + return l.errorf("float cannot end with a dot") + } + pointSeen = true + } else if next == 'e' || next == 'E' { + expSeen = true + l.next() + r := l.peek() + if r == '+' || r == '-' { + l.next() + } + } else if isDigit(next) { + digitSeen = true + l.next() + } else if next == '_' { + l.next() + } else { + break + } + if pointSeen && !digitSeen { + return l.errorf("cannot start float with a dot") + } + } + + if !digitSeen { + return l.errorf("no digit in that number") + } + if pointSeen || expSeen { + l.emit(tokenFloat) + } else { + l.emit(tokenInteger) + } + return l.lexRvalue +} + +func (l *tomlLexer) run() { + for state := l.lexVoid; state != nil; { + state = state() + } +} + +func init() { + dateRegexp = regexp.MustCompile(`^\d{1,4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}(\.\d{1,9})?(Z|[+-]\d{2}:\d{2})`) +} + +// Entry point +func lexToml(inputBytes []byte) []token { + runes := bytes.Runes(inputBytes) + l := &tomlLexer{ + input: runes, + tokens: make([]token, 0, 256), + line: 1, + col: 1, + endbufferLine: 1, + endbufferCol: 1, + } + l.run() + return l.tokens +} diff --git a/vendor/github.com/pelletier/go-toml/marshal.go b/vendor/github.com/pelletier/go-toml/marshal.go new file mode 100644 index 00000000..f584ba4e --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/marshal.go @@ -0,0 +1,600 @@ +package toml + +import ( + "bytes" + "errors" + "fmt" + "io" + "reflect" + "strconv" + "strings" + "time" +) + +type tomlOpts struct { + name string + comment string + commented bool + include bool + omitempty bool +} + +type encOpts struct { + quoteMapKeys bool + arraysOneElementPerLine bool +} + +var encOptsDefaults = encOpts{ + quoteMapKeys: false, +} + +var timeType = reflect.TypeOf(time.Time{}) +var marshalerType = reflect.TypeOf(new(Marshaler)).Elem() + +// Check if the given marshall type maps to a Tree primitive +func isPrimitive(mtype reflect.Type) bool { + switch mtype.Kind() { + case reflect.Ptr: + return isPrimitive(mtype.Elem()) + case reflect.Bool: + return true + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return true + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return true + case reflect.Float32, reflect.Float64: + return true + case reflect.String: + return true + case reflect.Struct: + return mtype == timeType || isCustomMarshaler(mtype) + default: + return false + } +} + +// Check if the given marshall type maps to a Tree slice +func isTreeSlice(mtype reflect.Type) bool { + switch mtype.Kind() { + case reflect.Slice: + return !isOtherSlice(mtype) + default: + return false + } +} + +// Check if the given marshall type maps to a non-Tree slice +func isOtherSlice(mtype reflect.Type) bool { + switch mtype.Kind() { + case reflect.Ptr: + return isOtherSlice(mtype.Elem()) + case reflect.Slice: + return isPrimitive(mtype.Elem()) || isOtherSlice(mtype.Elem()) + default: + return false + } +} + +// Check if the given marshall type maps to a Tree +func isTree(mtype reflect.Type) bool { + switch mtype.Kind() { + case reflect.Map: + return true + case reflect.Struct: + return !isPrimitive(mtype) + default: + return false + } +} + +func isCustomMarshaler(mtype reflect.Type) bool { + return mtype.Implements(marshalerType) +} + +func callCustomMarshaler(mval reflect.Value) ([]byte, error) { + return mval.Interface().(Marshaler).MarshalTOML() +} + +// Marshaler is the interface implemented by types that +// can marshal themselves into valid TOML. +type Marshaler interface { + MarshalTOML() ([]byte, error) +} + +/* +Marshal returns the TOML encoding of v. Behavior is similar to the Go json +encoder, except that there is no concept of a Marshaler interface or MarshalTOML +function for sub-structs, and currently only definite types can be marshaled +(i.e. no `interface{}`). + +The following struct annotations are supported: + + toml:"Field" Overrides the field's name to output. + omitempty When set, empty values and groups are not emitted. + comment:"comment" Emits a # comment on the same line. This supports new lines. + commented:"true" Emits the value as commented. + +Note that pointers are automatically assigned the "omitempty" option, as TOML +explicitly does not handle null values (saying instead the label should be +dropped). + +Tree structural types and corresponding marshal types: + + *Tree (*)struct, (*)map[string]interface{} + []*Tree (*)[](*)struct, (*)[](*)map[string]interface{} + []interface{} (as interface{}) (*)[]primitive, (*)[]([]interface{}) + interface{} (*)primitive + +Tree primitive types and corresponding marshal types: + + uint64 uint, uint8-uint64, pointers to same + int64 int, int8-uint64, pointers to same + float64 float32, float64, pointers to same + string string, pointers to same + bool bool, pointers to same + time.Time time.Time{}, pointers to same +*/ +func Marshal(v interface{}) ([]byte, error) { + return NewEncoder(nil).marshal(v) +} + +// Encoder writes TOML values to an output stream. +type Encoder struct { + w io.Writer + encOpts +} + +// NewEncoder returns a new encoder that writes to w. +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{ + w: w, + encOpts: encOptsDefaults, + } +} + +// Encode writes the TOML encoding of v to the stream. +// +// See the documentation for Marshal for details. +func (e *Encoder) Encode(v interface{}) error { + b, err := e.marshal(v) + if err != nil { + return err + } + if _, err := e.w.Write(b); err != nil { + return err + } + return nil +} + +// QuoteMapKeys sets up the encoder to encode +// maps with string type keys with quoted TOML keys. +// +// This relieves the character limitations on map keys. +func (e *Encoder) QuoteMapKeys(v bool) *Encoder { + e.quoteMapKeys = v + return e +} + +// ArraysWithOneElementPerLine sets up the encoder to encode arrays +// with more than one element on multiple lines instead of one. +// +// For example: +// +// A = [1,2,3] +// +// Becomes +// +// A = [ +// 1, +// 2, +// 3, +// ] +func (e *Encoder) ArraysWithOneElementPerLine(v bool) *Encoder { + e.arraysOneElementPerLine = v + return e +} + +func (e *Encoder) marshal(v interface{}) ([]byte, error) { + mtype := reflect.TypeOf(v) + if mtype.Kind() != reflect.Struct { + return []byte{}, errors.New("Only a struct can be marshaled to TOML") + } + sval := reflect.ValueOf(v) + if isCustomMarshaler(mtype) { + return callCustomMarshaler(sval) + } + t, err := e.valueToTree(mtype, sval) + if err != nil { + return []byte{}, err + } + + var buf bytes.Buffer + _, err = t.writeTo(&buf, "", "", 0, e.arraysOneElementPerLine) + + return buf.Bytes(), err +} + +// Convert given marshal struct or map value to toml tree +func (e *Encoder) valueToTree(mtype reflect.Type, mval reflect.Value) (*Tree, error) { + if mtype.Kind() == reflect.Ptr { + return e.valueToTree(mtype.Elem(), mval.Elem()) + } + tval := newTree() + switch mtype.Kind() { + case reflect.Struct: + for i := 0; i < mtype.NumField(); i++ { + mtypef, mvalf := mtype.Field(i), mval.Field(i) + opts := tomlOptions(mtypef) + if opts.include && (!opts.omitempty || !isZero(mvalf)) { + val, err := e.valueToToml(mtypef.Type, mvalf) + if err != nil { + return nil, err + } + tval.SetWithComment(opts.name, opts.comment, opts.commented, val) + } + } + case reflect.Map: + for _, key := range mval.MapKeys() { + mvalf := mval.MapIndex(key) + val, err := e.valueToToml(mtype.Elem(), mvalf) + if err != nil { + return nil, err + } + if e.quoteMapKeys { + keyStr, err := tomlValueStringRepresentation(key.String(), "", e.arraysOneElementPerLine) + if err != nil { + return nil, err + } + tval.SetPath([]string{keyStr}, val) + } else { + tval.Set(key.String(), val) + } + } + } + return tval, nil +} + +// Convert given marshal slice to slice of Toml trees +func (e *Encoder) valueToTreeSlice(mtype reflect.Type, mval reflect.Value) ([]*Tree, error) { + tval := make([]*Tree, mval.Len(), mval.Len()) + for i := 0; i < mval.Len(); i++ { + val, err := e.valueToTree(mtype.Elem(), mval.Index(i)) + if err != nil { + return nil, err + } + tval[i] = val + } + return tval, nil +} + +// Convert given marshal slice to slice of toml values +func (e *Encoder) valueToOtherSlice(mtype reflect.Type, mval reflect.Value) (interface{}, error) { + tval := make([]interface{}, mval.Len(), mval.Len()) + for i := 0; i < mval.Len(); i++ { + val, err := e.valueToToml(mtype.Elem(), mval.Index(i)) + if err != nil { + return nil, err + } + tval[i] = val + } + return tval, nil +} + +// Convert given marshal value to toml value +func (e *Encoder) valueToToml(mtype reflect.Type, mval reflect.Value) (interface{}, error) { + if mtype.Kind() == reflect.Ptr { + return e.valueToToml(mtype.Elem(), mval.Elem()) + } + switch { + case isCustomMarshaler(mtype): + return callCustomMarshaler(mval) + case isTree(mtype): + return e.valueToTree(mtype, mval) + case isTreeSlice(mtype): + return e.valueToTreeSlice(mtype, mval) + case isOtherSlice(mtype): + return e.valueToOtherSlice(mtype, mval) + default: + switch mtype.Kind() { + case reflect.Bool: + return mval.Bool(), nil + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return mval.Int(), nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return mval.Uint(), nil + case reflect.Float32, reflect.Float64: + return mval.Float(), nil + case reflect.String: + return mval.String(), nil + case reflect.Struct: + return mval.Interface().(time.Time), nil + default: + return nil, fmt.Errorf("Marshal can't handle %v(%v)", mtype, mtype.Kind()) + } + } +} + +// Unmarshal attempts to unmarshal the Tree into a Go struct pointed by v. +// Neither Unmarshaler interfaces nor UnmarshalTOML functions are supported for +// sub-structs, and only definite types can be unmarshaled. +func (t *Tree) Unmarshal(v interface{}) error { + d := Decoder{tval: t} + return d.unmarshal(v) +} + +// Marshal returns the TOML encoding of Tree. +// See Marshal() documentation for types mapping table. +func (t *Tree) Marshal() ([]byte, error) { + var buf bytes.Buffer + err := NewEncoder(&buf).Encode(t) + return buf.Bytes(), err +} + +// Unmarshal parses the TOML-encoded data and stores the result in the value +// pointed to by v. Behavior is similar to the Go json encoder, except that there +// is no concept of an Unmarshaler interface or UnmarshalTOML function for +// sub-structs, and currently only definite types can be unmarshaled to (i.e. no +// `interface{}`). +// +// The following struct annotations are supported: +// +// toml:"Field" Overrides the field's name to map to. +// +// See Marshal() documentation for types mapping table. +func Unmarshal(data []byte, v interface{}) error { + t, err := LoadReader(bytes.NewReader(data)) + if err != nil { + return err + } + return t.Unmarshal(v) +} + +// Decoder reads and decodes TOML values from an input stream. +type Decoder struct { + r io.Reader + tval *Tree + encOpts +} + +// NewDecoder returns a new decoder that reads from r. +func NewDecoder(r io.Reader) *Decoder { + return &Decoder{ + r: r, + encOpts: encOptsDefaults, + } +} + +// Decode reads a TOML-encoded value from it's input +// and unmarshals it in the value pointed at by v. +// +// See the documentation for Marshal for details. +func (d *Decoder) Decode(v interface{}) error { + var err error + d.tval, err = LoadReader(d.r) + if err != nil { + return err + } + return d.unmarshal(v) +} + +func (d *Decoder) unmarshal(v interface{}) error { + mtype := reflect.TypeOf(v) + if mtype.Kind() != reflect.Ptr || mtype.Elem().Kind() != reflect.Struct { + return errors.New("Only a pointer to struct can be unmarshaled from TOML") + } + + sval, err := d.valueFromTree(mtype.Elem(), d.tval) + if err != nil { + return err + } + reflect.ValueOf(v).Elem().Set(sval) + return nil +} + +// Convert toml tree to marshal struct or map, using marshal type +func (d *Decoder) valueFromTree(mtype reflect.Type, tval *Tree) (reflect.Value, error) { + if mtype.Kind() == reflect.Ptr { + return d.unwrapPointer(mtype, tval) + } + var mval reflect.Value + switch mtype.Kind() { + case reflect.Struct: + mval = reflect.New(mtype).Elem() + for i := 0; i < mtype.NumField(); i++ { + mtypef := mtype.Field(i) + opts := tomlOptions(mtypef) + if opts.include { + baseKey := opts.name + keysToTry := []string{baseKey, strings.ToLower(baseKey), strings.ToTitle(baseKey)} + for _, key := range keysToTry { + exists := tval.Has(key) + if !exists { + continue + } + val := tval.Get(key) + mvalf, err := d.valueFromToml(mtypef.Type, val) + if err != nil { + return mval, formatError(err, tval.GetPosition(key)) + } + mval.Field(i).Set(mvalf) + break + } + } + } + case reflect.Map: + mval = reflect.MakeMap(mtype) + for _, key := range tval.Keys() { + // TODO: path splits key + val := tval.GetPath([]string{key}) + mvalf, err := d.valueFromToml(mtype.Elem(), val) + if err != nil { + return mval, formatError(err, tval.GetPosition(key)) + } + mval.SetMapIndex(reflect.ValueOf(key), mvalf) + } + } + return mval, nil +} + +// Convert toml value to marshal struct/map slice, using marshal type +func (d *Decoder) valueFromTreeSlice(mtype reflect.Type, tval []*Tree) (reflect.Value, error) { + mval := reflect.MakeSlice(mtype, len(tval), len(tval)) + for i := 0; i < len(tval); i++ { + val, err := d.valueFromTree(mtype.Elem(), tval[i]) + if err != nil { + return mval, err + } + mval.Index(i).Set(val) + } + return mval, nil +} + +// Convert toml value to marshal primitive slice, using marshal type +func (d *Decoder) valueFromOtherSlice(mtype reflect.Type, tval []interface{}) (reflect.Value, error) { + mval := reflect.MakeSlice(mtype, len(tval), len(tval)) + for i := 0; i < len(tval); i++ { + val, err := d.valueFromToml(mtype.Elem(), tval[i]) + if err != nil { + return mval, err + } + mval.Index(i).Set(val) + } + return mval, nil +} + +// Convert toml value to marshal value, using marshal type +func (d *Decoder) valueFromToml(mtype reflect.Type, tval interface{}) (reflect.Value, error) { + if mtype.Kind() == reflect.Ptr { + return d.unwrapPointer(mtype, tval) + } + + switch tval.(type) { + case *Tree: + if isTree(mtype) { + return d.valueFromTree(mtype, tval.(*Tree)) + } + return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to a tree", tval, tval) + case []*Tree: + if isTreeSlice(mtype) { + return d.valueFromTreeSlice(mtype, tval.([]*Tree)) + } + return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to trees", tval, tval) + case []interface{}: + if isOtherSlice(mtype) { + return d.valueFromOtherSlice(mtype, tval.([]interface{})) + } + return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to a slice", tval, tval) + default: + switch mtype.Kind() { + case reflect.Bool, reflect.Struct: + val := reflect.ValueOf(tval) + // if this passes for when mtype is reflect.Struct, tval is a time.Time + if !val.Type().ConvertibleTo(mtype) { + return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String()) + } + + return val.Convert(mtype), nil + case reflect.String: + val := reflect.ValueOf(tval) + // stupidly, int64 is convertible to string. So special case this. + if !val.Type().ConvertibleTo(mtype) || val.Kind() == reflect.Int64 { + return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String()) + } + + return val.Convert(mtype), nil + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + val := reflect.ValueOf(tval) + if !val.Type().ConvertibleTo(mtype) { + return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String()) + } + if reflect.Indirect(reflect.New(mtype)).OverflowInt(val.Int()) { + return reflect.ValueOf(nil), fmt.Errorf("%v(%T) would overflow %v", tval, tval, mtype.String()) + } + + return val.Convert(mtype), nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + val := reflect.ValueOf(tval) + if !val.Type().ConvertibleTo(mtype) { + return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String()) + } + if val.Int() < 0 { + return reflect.ValueOf(nil), fmt.Errorf("%v(%T) is negative so does not fit in %v", tval, tval, mtype.String()) + } + if reflect.Indirect(reflect.New(mtype)).OverflowUint(uint64(val.Int())) { + return reflect.ValueOf(nil), fmt.Errorf("%v(%T) would overflow %v", tval, tval, mtype.String()) + } + + return val.Convert(mtype), nil + case reflect.Float32, reflect.Float64: + val := reflect.ValueOf(tval) + if !val.Type().ConvertibleTo(mtype) { + return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String()) + } + if reflect.Indirect(reflect.New(mtype)).OverflowFloat(val.Float()) { + return reflect.ValueOf(nil), fmt.Errorf("%v(%T) would overflow %v", tval, tval, mtype.String()) + } + + return val.Convert(mtype), nil + default: + return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v(%v)", tval, tval, mtype, mtype.Kind()) + } + } +} + +func (d *Decoder) unwrapPointer(mtype reflect.Type, tval interface{}) (reflect.Value, error) { + val, err := d.valueFromToml(mtype.Elem(), tval) + if err != nil { + return reflect.ValueOf(nil), err + } + mval := reflect.New(mtype.Elem()) + mval.Elem().Set(val) + return mval, nil +} + +func tomlOptions(vf reflect.StructField) tomlOpts { + tag := vf.Tag.Get("toml") + parse := strings.Split(tag, ",") + var comment string + if c := vf.Tag.Get("comment"); c != "" { + comment = c + } + commented, _ := strconv.ParseBool(vf.Tag.Get("commented")) + result := tomlOpts{name: vf.Name, comment: comment, commented: commented, include: true, omitempty: false} + if parse[0] != "" { + if parse[0] == "-" && len(parse) == 1 { + result.include = false + } else { + result.name = strings.Trim(parse[0], " ") + } + } + if vf.PkgPath != "" { + result.include = false + } + if len(parse) > 1 && strings.Trim(parse[1], " ") == "omitempty" { + result.omitempty = true + } + if vf.Type.Kind() == reflect.Ptr { + result.omitempty = true + } + return result +} + +func isZero(val reflect.Value) bool { + switch val.Type().Kind() { + case reflect.Map: + fallthrough + case reflect.Array: + fallthrough + case reflect.Slice: + return val.Len() == 0 + default: + return reflect.DeepEqual(val.Interface(), reflect.Zero(val.Type()).Interface()) + } +} + +func formatError(err error, pos Position) error { + if err.Error()[0] == '(' { // Error already contains position information + return err + } + return fmt.Errorf("%s: %s", pos, err) +} diff --git a/vendor/github.com/pelletier/go-toml/parser.go b/vendor/github.com/pelletier/go-toml/parser.go new file mode 100644 index 00000000..2d27599a --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/parser.go @@ -0,0 +1,430 @@ +// TOML Parser. + +package toml + +import ( + "errors" + "fmt" + "math" + "reflect" + "regexp" + "strconv" + "strings" + "time" +) + +type tomlParser struct { + flowIdx int + flow []token + tree *Tree + currentTable []string + seenTableKeys []string +} + +type tomlParserStateFn func() tomlParserStateFn + +// Formats and panics an error message based on a token +func (p *tomlParser) raiseError(tok *token, msg string, args ...interface{}) { + panic(tok.Position.String() + ": " + fmt.Sprintf(msg, args...)) +} + +func (p *tomlParser) run() { + for state := p.parseStart; state != nil; { + state = state() + } +} + +func (p *tomlParser) peek() *token { + if p.flowIdx >= len(p.flow) { + return nil + } + return &p.flow[p.flowIdx] +} + +func (p *tomlParser) assume(typ tokenType) { + tok := p.getToken() + if tok == nil { + p.raiseError(tok, "was expecting token %s, but token stream is empty", tok) + } + if tok.typ != typ { + p.raiseError(tok, "was expecting token %s, but got %s instead", typ, tok) + } +} + +func (p *tomlParser) getToken() *token { + tok := p.peek() + if tok == nil { + return nil + } + p.flowIdx++ + return tok +} + +func (p *tomlParser) parseStart() tomlParserStateFn { + tok := p.peek() + + // end of stream, parsing is finished + if tok == nil { + return nil + } + + switch tok.typ { + case tokenDoubleLeftBracket: + return p.parseGroupArray + case tokenLeftBracket: + return p.parseGroup + case tokenKey: + return p.parseAssign + case tokenEOF: + return nil + default: + p.raiseError(tok, "unexpected token") + } + return nil +} + +func (p *tomlParser) parseGroupArray() tomlParserStateFn { + startToken := p.getToken() // discard the [[ + key := p.getToken() + if key.typ != tokenKeyGroupArray { + p.raiseError(key, "unexpected token %s, was expecting a table array key", key) + } + + // get or create table array element at the indicated part in the path + keys, err := parseKey(key.val) + if err != nil { + p.raiseError(key, "invalid table array key: %s", err) + } + p.tree.createSubTree(keys[:len(keys)-1], startToken.Position) // create parent entries + destTree := p.tree.GetPath(keys) + var array []*Tree + if destTree == nil { + array = make([]*Tree, 0) + } else if target, ok := destTree.([]*Tree); ok && target != nil { + array = destTree.([]*Tree) + } else { + p.raiseError(key, "key %s is already assigned and not of type table array", key) + } + p.currentTable = keys + + // add a new tree to the end of the table array + newTree := newTree() + newTree.position = startToken.Position + array = append(array, newTree) + p.tree.SetPath(p.currentTable, array) + + // remove all keys that were children of this table array + prefix := key.val + "." + found := false + for ii := 0; ii < len(p.seenTableKeys); { + tableKey := p.seenTableKeys[ii] + if strings.HasPrefix(tableKey, prefix) { + p.seenTableKeys = append(p.seenTableKeys[:ii], p.seenTableKeys[ii+1:]...) + } else { + found = (tableKey == key.val) + ii++ + } + } + + // keep this key name from use by other kinds of assignments + if !found { + p.seenTableKeys = append(p.seenTableKeys, key.val) + } + + // move to next parser state + p.assume(tokenDoubleRightBracket) + return p.parseStart +} + +func (p *tomlParser) parseGroup() tomlParserStateFn { + startToken := p.getToken() // discard the [ + key := p.getToken() + if key.typ != tokenKeyGroup { + p.raiseError(key, "unexpected token %s, was expecting a table key", key) + } + for _, item := range p.seenTableKeys { + if item == key.val { + p.raiseError(key, "duplicated tables") + } + } + + p.seenTableKeys = append(p.seenTableKeys, key.val) + keys, err := parseKey(key.val) + if err != nil { + p.raiseError(key, "invalid table array key: %s", err) + } + if err := p.tree.createSubTree(keys, startToken.Position); err != nil { + p.raiseError(key, "%s", err) + } + p.assume(tokenRightBracket) + p.currentTable = keys + return p.parseStart +} + +func (p *tomlParser) parseAssign() tomlParserStateFn { + key := p.getToken() + p.assume(tokenEqual) + + value := p.parseRvalue() + var tableKey []string + if len(p.currentTable) > 0 { + tableKey = p.currentTable + } else { + tableKey = []string{} + } + + // find the table to assign, looking out for arrays of tables + var targetNode *Tree + switch node := p.tree.GetPath(tableKey).(type) { + case []*Tree: + targetNode = node[len(node)-1] + case *Tree: + targetNode = node + default: + p.raiseError(key, "Unknown table type for path: %s", + strings.Join(tableKey, ".")) + } + + // assign value to the found table + keyVals := []string{key.val} + if len(keyVals) != 1 { + p.raiseError(key, "Invalid key") + } + keyVal := keyVals[0] + localKey := []string{keyVal} + finalKey := append(tableKey, keyVal) + if targetNode.GetPath(localKey) != nil { + p.raiseError(key, "The following key was defined twice: %s", + strings.Join(finalKey, ".")) + } + var toInsert interface{} + + switch value.(type) { + case *Tree, []*Tree: + toInsert = value + default: + toInsert = &tomlValue{value: value, position: key.Position} + } + targetNode.values[keyVal] = toInsert + return p.parseStart +} + +var numberUnderscoreInvalidRegexp *regexp.Regexp +var hexNumberUnderscoreInvalidRegexp *regexp.Regexp + +func numberContainsInvalidUnderscore(value string) error { + if numberUnderscoreInvalidRegexp.MatchString(value) { + return errors.New("invalid use of _ in number") + } + return nil +} + +func hexNumberContainsInvalidUnderscore(value string) error { + if hexNumberUnderscoreInvalidRegexp.MatchString(value) { + return errors.New("invalid use of _ in hex number") + } + return nil +} + +func cleanupNumberToken(value string) string { + cleanedVal := strings.Replace(value, "_", "", -1) + return cleanedVal +} + +func (p *tomlParser) parseRvalue() interface{} { + tok := p.getToken() + if tok == nil || tok.typ == tokenEOF { + p.raiseError(tok, "expecting a value") + } + + switch tok.typ { + case tokenString: + return tok.val + case tokenTrue: + return true + case tokenFalse: + return false + case tokenInf: + if tok.val[0] == '-' { + return math.Inf(-1) + } + return math.Inf(1) + case tokenNan: + return math.NaN() + case tokenInteger: + cleanedVal := cleanupNumberToken(tok.val) + var err error + var val int64 + if len(cleanedVal) >= 3 && cleanedVal[0] == '0' { + switch cleanedVal[1] { + case 'x': + err = hexNumberContainsInvalidUnderscore(tok.val) + if err != nil { + p.raiseError(tok, "%s", err) + } + val, err = strconv.ParseInt(cleanedVal[2:], 16, 64) + case 'o': + err = numberContainsInvalidUnderscore(tok.val) + if err != nil { + p.raiseError(tok, "%s", err) + } + val, err = strconv.ParseInt(cleanedVal[2:], 8, 64) + case 'b': + err = numberContainsInvalidUnderscore(tok.val) + if err != nil { + p.raiseError(tok, "%s", err) + } + val, err = strconv.ParseInt(cleanedVal[2:], 2, 64) + default: + panic("invalid base") // the lexer should catch this first + } + } else { + err = numberContainsInvalidUnderscore(tok.val) + if err != nil { + p.raiseError(tok, "%s", err) + } + val, err = strconv.ParseInt(cleanedVal, 10, 64) + } + if err != nil { + p.raiseError(tok, "%s", err) + } + return val + case tokenFloat: + err := numberContainsInvalidUnderscore(tok.val) + if err != nil { + p.raiseError(tok, "%s", err) + } + cleanedVal := cleanupNumberToken(tok.val) + val, err := strconv.ParseFloat(cleanedVal, 64) + if err != nil { + p.raiseError(tok, "%s", err) + } + return val + case tokenDate: + val, err := time.ParseInLocation(time.RFC3339Nano, tok.val, time.UTC) + if err != nil { + p.raiseError(tok, "%s", err) + } + return val + case tokenLeftBracket: + return p.parseArray() + case tokenLeftCurlyBrace: + return p.parseInlineTable() + case tokenEqual: + p.raiseError(tok, "cannot have multiple equals for the same key") + case tokenError: + p.raiseError(tok, "%s", tok) + } + + p.raiseError(tok, "never reached") + + return nil +} + +func tokenIsComma(t *token) bool { + return t != nil && t.typ == tokenComma +} + +func (p *tomlParser) parseInlineTable() *Tree { + tree := newTree() + var previous *token +Loop: + for { + follow := p.peek() + if follow == nil || follow.typ == tokenEOF { + p.raiseError(follow, "unterminated inline table") + } + switch follow.typ { + case tokenRightCurlyBrace: + p.getToken() + break Loop + case tokenKey: + if !tokenIsComma(previous) && previous != nil { + p.raiseError(follow, "comma expected between fields in inline table") + } + key := p.getToken() + p.assume(tokenEqual) + value := p.parseRvalue() + tree.Set(key.val, value) + case tokenComma: + if previous == nil { + p.raiseError(follow, "inline table cannot start with a comma") + } + if tokenIsComma(previous) { + p.raiseError(follow, "need field between two commas in inline table") + } + p.getToken() + default: + p.raiseError(follow, "unexpected token type in inline table: %s", follow.String()) + } + previous = follow + } + if tokenIsComma(previous) { + p.raiseError(previous, "trailing comma at the end of inline table") + } + return tree +} + +func (p *tomlParser) parseArray() interface{} { + var array []interface{} + arrayType := reflect.TypeOf(nil) + for { + follow := p.peek() + if follow == nil || follow.typ == tokenEOF { + p.raiseError(follow, "unterminated array") + } + if follow.typ == tokenRightBracket { + p.getToken() + break + } + val := p.parseRvalue() + if arrayType == nil { + arrayType = reflect.TypeOf(val) + } + if reflect.TypeOf(val) != arrayType { + p.raiseError(follow, "mixed types in array") + } + array = append(array, val) + follow = p.peek() + if follow == nil || follow.typ == tokenEOF { + p.raiseError(follow, "unterminated array") + } + if follow.typ != tokenRightBracket && follow.typ != tokenComma { + p.raiseError(follow, "missing comma") + } + if follow.typ == tokenComma { + p.getToken() + } + } + // An array of Trees is actually an array of inline + // tables, which is a shorthand for a table array. If the + // array was not converted from []interface{} to []*Tree, + // the two notations would not be equivalent. + if arrayType == reflect.TypeOf(newTree()) { + tomlArray := make([]*Tree, len(array)) + for i, v := range array { + tomlArray[i] = v.(*Tree) + } + return tomlArray + } + return array +} + +func parseToml(flow []token) *Tree { + result := newTree() + result.position = Position{1, 1} + parser := &tomlParser{ + flowIdx: 0, + flow: flow, + tree: result, + currentTable: make([]string, 0), + seenTableKeys: make([]string, 0), + } + parser.run() + return result +} + +func init() { + numberUnderscoreInvalidRegexp = regexp.MustCompile(`([^\d]_|_[^\d])|_$|^_`) + hexNumberUnderscoreInvalidRegexp = regexp.MustCompile(`(^0x_)|([^\da-f]_|_[^\da-f])|_$|^_`) +} diff --git a/vendor/github.com/pelletier/go-toml/position.go b/vendor/github.com/pelletier/go-toml/position.go new file mode 100644 index 00000000..c17bff87 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/position.go @@ -0,0 +1,29 @@ +// Position support for go-toml + +package toml + +import ( + "fmt" +) + +// Position of a document element within a TOML document. +// +// Line and Col are both 1-indexed positions for the element's line number and +// column number, respectively. Values of zero or less will cause Invalid(), +// to return true. +type Position struct { + Line int // line within the document + Col int // column within the line +} + +// String representation of the position. +// Displays 1-indexed line and column numbers. +func (p Position) String() string { + return fmt.Sprintf("(%d, %d)", p.Line, p.Col) +} + +// Invalid returns whether or not the position is valid (i.e. with negative or +// null values) +func (p Position) Invalid() bool { + return p.Line <= 0 || p.Col <= 0 +} diff --git a/vendor/github.com/pelletier/go-toml/query/doc.go b/vendor/github.com/pelletier/go-toml/query/doc.go new file mode 100644 index 00000000..ed63c110 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/query/doc.go @@ -0,0 +1,175 @@ +// Package query performs JSONPath-like queries on a TOML document. +// +// The query path implementation is based loosely on the JSONPath specification: +// http://goessner.net/articles/JsonPath/. +// +// The idea behind a query path is to allow quick access to any element, or set +// of elements within TOML document, with a single expression. +// +// result, err := query.CompileAndExecute("$.foo.bar.baz", tree) +// +// This is roughly equivalent to: +// +// next := tree.Get("foo") +// if next != nil { +// next = next.Get("bar") +// if next != nil { +// next = next.Get("baz") +// } +// } +// result := next +// +// err is nil if any parsing exception occurs. +// +// If no node in the tree matches the query, result will simply contain an empty list of +// items. +// +// As illustrated above, the query path is much more efficient, especially since +// the structure of the TOML file can vary. Rather than making assumptions about +// a document's structure, a query allows the programmer to make structured +// requests into the document, and get zero or more values as a result. +// +// Query syntax +// +// The syntax of a query begins with a root token, followed by any number +// sub-expressions: +// +// $ +// Root of the TOML tree. This must always come first. +// .name +// Selects child of this node, where 'name' is a TOML key +// name. +// ['name'] +// Selects child of this node, where 'name' is a string +// containing a TOML key name. +// [index] +// Selcts child array element at 'index'. +// ..expr +// Recursively selects all children, filtered by an a union, +// index, or slice expression. +// ..* +// Recursive selection of all nodes at this point in the +// tree. +// .* +// Selects all children of the current node. +// [expr,expr] +// Union operator - a logical 'or' grouping of two or more +// sub-expressions: index, key name, or filter. +// [start:end:step] +// Slice operator - selects array elements from start to +// end-1, at the given step. All three arguments are +// optional. +// [?(filter)] +// Named filter expression - the function 'filter' is +// used to filter children at this node. +// +// Query Indexes And Slices +// +// Index expressions perform no bounds checking, and will contribute no +// values to the result set if the provided index or index range is invalid. +// Negative indexes represent values from the end of the array, counting backwards. +// +// // select the last index of the array named 'foo' +// query.CompileAndExecute("$.foo[-1]", tree) +// +// Slice expressions are supported, by using ':' to separate a start/end index pair. +// +// // select up to the first five elements in the array +// query.CompileAndExecute("$.foo[0:5]", tree) +// +// Slice expressions also allow negative indexes for the start and stop +// arguments. +// +// // select all array elements. +// query.CompileAndExecute("$.foo[0:-1]", tree) +// +// Slice expressions may have an optional stride/step parameter: +// +// // select every other element +// query.CompileAndExecute("$.foo[0:-1:2]", tree) +// +// Slice start and end parameters are also optional: +// +// // these are all equivalent and select all the values in the array +// query.CompileAndExecute("$.foo[:]", tree) +// query.CompileAndExecute("$.foo[0:]", tree) +// query.CompileAndExecute("$.foo[:-1]", tree) +// query.CompileAndExecute("$.foo[0:-1:]", tree) +// query.CompileAndExecute("$.foo[::1]", tree) +// query.CompileAndExecute("$.foo[0::1]", tree) +// query.CompileAndExecute("$.foo[:-1:1]", tree) +// query.CompileAndExecute("$.foo[0:-1:1]", tree) +// +// Query Filters +// +// Query filters are used within a Union [,] or single Filter [] expression. +// A filter only allows nodes that qualify through to the next expression, +// and/or into the result set. +// +// // returns children of foo that are permitted by the 'bar' filter. +// query.CompileAndExecute("$.foo[?(bar)]", tree) +// +// There are several filters provided with the library: +// +// tree +// Allows nodes of type Tree. +// int +// Allows nodes of type int64. +// float +// Allows nodes of type float64. +// string +// Allows nodes of type string. +// time +// Allows nodes of type time.Time. +// bool +// Allows nodes of type bool. +// +// Query Results +// +// An executed query returns a Result object. This contains the nodes +// in the TOML tree that qualify the query expression. Position information +// is also available for each value in the set. +// +// // display the results of a query +// results := query.CompileAndExecute("$.foo.bar.baz", tree) +// for idx, value := results.Values() { +// fmt.Println("%v: %v", results.Positions()[idx], value) +// } +// +// Compiled Queries +// +// Queries may be executed directly on a Tree object, or compiled ahead +// of time and executed discretely. The former is more convenient, but has the +// penalty of having to recompile the query expression each time. +// +// // basic query +// results := query.CompileAndExecute("$.foo.bar.baz", tree) +// +// // compiled query +// query, err := toml.Compile("$.foo.bar.baz") +// results := query.Execute(tree) +// +// // run the compiled query again on a different tree +// moreResults := query.Execute(anotherTree) +// +// User Defined Query Filters +// +// Filter expressions may also be user defined by using the SetFilter() +// function on the Query object. The function must return true/false, which +// signifies if the passed node is kept or discarded, respectively. +// +// // create a query that references a user-defined filter +// query, _ := query.Compile("$[?(bazOnly)]") +// +// // define the filter, and assign it to the query +// query.SetFilter("bazOnly", func(node interface{}) bool{ +// if tree, ok := node.(*Tree); ok { +// return tree.Has("baz") +// } +// return false // reject all other node types +// }) +// +// // run the query +// query.Execute(tree) +// +package query diff --git a/vendor/github.com/pelletier/go-toml/query/lexer.go b/vendor/github.com/pelletier/go-toml/query/lexer.go new file mode 100644 index 00000000..2dc31940 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/query/lexer.go @@ -0,0 +1,357 @@ +// TOML JSONPath lexer. +// +// Written using the principles developed by Rob Pike in +// http://www.youtube.com/watch?v=HxaD_trXwRE + +package query + +import ( + "fmt" + "github.com/pelletier/go-toml" + "strconv" + "strings" + "unicode/utf8" +) + +// Lexer state function +type queryLexStateFn func() queryLexStateFn + +// Lexer definition +type queryLexer struct { + input string + start int + pos int + width int + tokens chan token + depth int + line int + col int + stringTerm string +} + +func (l *queryLexer) run() { + for state := l.lexVoid; state != nil; { + state = state() + } + close(l.tokens) +} + +func (l *queryLexer) nextStart() { + // iterate by runes (utf8 characters) + // search for newlines and advance line/col counts + for i := l.start; i < l.pos; { + r, width := utf8.DecodeRuneInString(l.input[i:]) + if r == '\n' { + l.line++ + l.col = 1 + } else { + l.col++ + } + i += width + } + // advance start position to next token + l.start = l.pos +} + +func (l *queryLexer) emit(t tokenType) { + l.tokens <- token{ + Position: toml.Position{Line: l.line, Col: l.col}, + typ: t, + val: l.input[l.start:l.pos], + } + l.nextStart() +} + +func (l *queryLexer) emitWithValue(t tokenType, value string) { + l.tokens <- token{ + Position: toml.Position{Line: l.line, Col: l.col}, + typ: t, + val: value, + } + l.nextStart() +} + +func (l *queryLexer) next() rune { + if l.pos >= len(l.input) { + l.width = 0 + return eof + } + var r rune + r, l.width = utf8.DecodeRuneInString(l.input[l.pos:]) + l.pos += l.width + return r +} + +func (l *queryLexer) ignore() { + l.nextStart() +} + +func (l *queryLexer) backup() { + l.pos -= l.width +} + +func (l *queryLexer) errorf(format string, args ...interface{}) queryLexStateFn { + l.tokens <- token{ + Position: toml.Position{Line: l.line, Col: l.col}, + typ: tokenError, + val: fmt.Sprintf(format, args...), + } + return nil +} + +func (l *queryLexer) peek() rune { + r := l.next() + l.backup() + return r +} + +func (l *queryLexer) accept(valid string) bool { + if strings.ContainsRune(valid, l.next()) { + return true + } + l.backup() + return false +} + +func (l *queryLexer) follow(next string) bool { + return strings.HasPrefix(l.input[l.pos:], next) +} + +func (l *queryLexer) lexVoid() queryLexStateFn { + for { + next := l.peek() + switch next { + case '$': + l.pos++ + l.emit(tokenDollar) + continue + case '.': + if l.follow("..") { + l.pos += 2 + l.emit(tokenDotDot) + } else { + l.pos++ + l.emit(tokenDot) + } + continue + case '[': + l.pos++ + l.emit(tokenLeftBracket) + continue + case ']': + l.pos++ + l.emit(tokenRightBracket) + continue + case ',': + l.pos++ + l.emit(tokenComma) + continue + case '*': + l.pos++ + l.emit(tokenStar) + continue + case '(': + l.pos++ + l.emit(tokenLeftParen) + continue + case ')': + l.pos++ + l.emit(tokenRightParen) + continue + case '?': + l.pos++ + l.emit(tokenQuestion) + continue + case ':': + l.pos++ + l.emit(tokenColon) + continue + case '\'': + l.ignore() + l.stringTerm = string(next) + return l.lexString + case '"': + l.ignore() + l.stringTerm = string(next) + return l.lexString + } + + if isSpace(next) { + l.next() + l.ignore() + continue + } + + if isAlphanumeric(next) { + return l.lexKey + } + + if next == '+' || next == '-' || isDigit(next) { + return l.lexNumber + } + + if l.next() == eof { + break + } + + return l.errorf("unexpected char: '%v'", next) + } + l.emit(tokenEOF) + return nil +} + +func (l *queryLexer) lexKey() queryLexStateFn { + for { + next := l.peek() + if !isAlphanumeric(next) { + l.emit(tokenKey) + return l.lexVoid + } + + if l.next() == eof { + break + } + } + l.emit(tokenEOF) + return nil +} + +func (l *queryLexer) lexString() queryLexStateFn { + l.pos++ + l.ignore() + growingString := "" + + for { + if l.follow(l.stringTerm) { + l.emitWithValue(tokenString, growingString) + l.pos++ + l.ignore() + return l.lexVoid + } + + if l.follow("\\\"") { + l.pos++ + growingString += "\"" + } else if l.follow("\\'") { + l.pos++ + growingString += "'" + } else if l.follow("\\n") { + l.pos++ + growingString += "\n" + } else if l.follow("\\b") { + l.pos++ + growingString += "\b" + } else if l.follow("\\f") { + l.pos++ + growingString += "\f" + } else if l.follow("\\/") { + l.pos++ + growingString += "/" + } else if l.follow("\\t") { + l.pos++ + growingString += "\t" + } else if l.follow("\\r") { + l.pos++ + growingString += "\r" + } else if l.follow("\\\\") { + l.pos++ + growingString += "\\" + } else if l.follow("\\u") { + l.pos += 2 + code := "" + for i := 0; i < 4; i++ { + c := l.peek() + l.pos++ + if !isHexDigit(c) { + return l.errorf("unfinished unicode escape") + } + code = code + string(c) + } + l.pos-- + intcode, err := strconv.ParseInt(code, 16, 32) + if err != nil { + return l.errorf("invalid unicode escape: \\u" + code) + } + growingString += string(rune(intcode)) + } else if l.follow("\\U") { + l.pos += 2 + code := "" + for i := 0; i < 8; i++ { + c := l.peek() + l.pos++ + if !isHexDigit(c) { + return l.errorf("unfinished unicode escape") + } + code = code + string(c) + } + l.pos-- + intcode, err := strconv.ParseInt(code, 16, 32) + if err != nil { + return l.errorf("invalid unicode escape: \\u" + code) + } + growingString += string(rune(intcode)) + } else if l.follow("\\") { + l.pos++ + return l.errorf("invalid escape sequence: \\" + string(l.peek())) + } else { + growingString += string(l.peek()) + } + + if l.next() == eof { + break + } + } + + return l.errorf("unclosed string") +} + +func (l *queryLexer) lexNumber() queryLexStateFn { + l.ignore() + if !l.accept("+") { + l.accept("-") + } + pointSeen := false + digitSeen := false + for { + next := l.next() + if next == '.' { + if pointSeen { + return l.errorf("cannot have two dots in one float") + } + if !isDigit(l.peek()) { + return l.errorf("float cannot end with a dot") + } + pointSeen = true + } else if isDigit(next) { + digitSeen = true + } else { + l.backup() + break + } + if pointSeen && !digitSeen { + return l.errorf("cannot start float with a dot") + } + } + + if !digitSeen { + return l.errorf("no digit in that number") + } + if pointSeen { + l.emit(tokenFloat) + } else { + l.emit(tokenInteger) + } + return l.lexVoid +} + +// Entry point +func lexQuery(input string) chan token { + l := &queryLexer{ + input: input, + tokens: make(chan token), + line: 1, + col: 1, + } + go l.run() + return l.tokens +} diff --git a/vendor/github.com/pelletier/go-toml/query/match.go b/vendor/github.com/pelletier/go-toml/query/match.go new file mode 100644 index 00000000..d7bb15a4 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/query/match.go @@ -0,0 +1,232 @@ +package query + +import ( + "fmt" + "github.com/pelletier/go-toml" +) + +// base match +type matchBase struct { + next pathFn +} + +func (f *matchBase) setNext(next pathFn) { + f.next = next +} + +// terminating functor - gathers results +type terminatingFn struct { + // empty +} + +func newTerminatingFn() *terminatingFn { + return &terminatingFn{} +} + +func (f *terminatingFn) setNext(next pathFn) { + // do nothing +} + +func (f *terminatingFn) call(node interface{}, ctx *queryContext) { + ctx.result.appendResult(node, ctx.lastPosition) +} + +// match single key +type matchKeyFn struct { + matchBase + Name string +} + +func newMatchKeyFn(name string) *matchKeyFn { + return &matchKeyFn{Name: name} +} + +func (f *matchKeyFn) call(node interface{}, ctx *queryContext) { + if array, ok := node.([]*toml.Tree); ok { + for _, tree := range array { + item := tree.Get(f.Name) + if item != nil { + ctx.lastPosition = tree.GetPosition(f.Name) + f.next.call(item, ctx) + } + } + } else if tree, ok := node.(*toml.Tree); ok { + item := tree.Get(f.Name) + if item != nil { + ctx.lastPosition = tree.GetPosition(f.Name) + f.next.call(item, ctx) + } + } +} + +// match single index +type matchIndexFn struct { + matchBase + Idx int +} + +func newMatchIndexFn(idx int) *matchIndexFn { + return &matchIndexFn{Idx: idx} +} + +func (f *matchIndexFn) call(node interface{}, ctx *queryContext) { + if arr, ok := node.([]interface{}); ok { + if f.Idx < len(arr) && f.Idx >= 0 { + if treesArray, ok := node.([]*toml.Tree); ok { + if len(treesArray) > 0 { + ctx.lastPosition = treesArray[0].Position() + } + } + f.next.call(arr[f.Idx], ctx) + } + } +} + +// filter by slicing +type matchSliceFn struct { + matchBase + Start, End, Step int +} + +func newMatchSliceFn(start, end, step int) *matchSliceFn { + return &matchSliceFn{Start: start, End: end, Step: step} +} + +func (f *matchSliceFn) call(node interface{}, ctx *queryContext) { + if arr, ok := node.([]interface{}); ok { + // adjust indexes for negative values, reverse ordering + realStart, realEnd := f.Start, f.End + if realStart < 0 { + realStart = len(arr) + realStart + } + if realEnd < 0 { + realEnd = len(arr) + realEnd + } + if realEnd < realStart { + realEnd, realStart = realStart, realEnd // swap + } + // loop and gather + for idx := realStart; idx < realEnd; idx += f.Step { + if treesArray, ok := node.([]*toml.Tree); ok { + if len(treesArray) > 0 { + ctx.lastPosition = treesArray[0].Position() + } + } + f.next.call(arr[idx], ctx) + } + } +} + +// match anything +type matchAnyFn struct { + matchBase +} + +func newMatchAnyFn() *matchAnyFn { + return &matchAnyFn{} +} + +func (f *matchAnyFn) call(node interface{}, ctx *queryContext) { + if tree, ok := node.(*toml.Tree); ok { + for _, k := range tree.Keys() { + v := tree.Get(k) + ctx.lastPosition = tree.GetPosition(k) + f.next.call(v, ctx) + } + } +} + +// filter through union +type matchUnionFn struct { + Union []pathFn +} + +func (f *matchUnionFn) setNext(next pathFn) { + for _, fn := range f.Union { + fn.setNext(next) + } +} + +func (f *matchUnionFn) call(node interface{}, ctx *queryContext) { + for _, fn := range f.Union { + fn.call(node, ctx) + } +} + +// match every single last node in the tree +type matchRecursiveFn struct { + matchBase +} + +func newMatchRecursiveFn() *matchRecursiveFn { + return &matchRecursiveFn{} +} + +func (f *matchRecursiveFn) call(node interface{}, ctx *queryContext) { + originalPosition := ctx.lastPosition + if tree, ok := node.(*toml.Tree); ok { + var visit func(tree *toml.Tree) + visit = func(tree *toml.Tree) { + for _, k := range tree.Keys() { + v := tree.Get(k) + ctx.lastPosition = tree.GetPosition(k) + f.next.call(v, ctx) + switch node := v.(type) { + case *toml.Tree: + visit(node) + case []*toml.Tree: + for _, subtree := range node { + visit(subtree) + } + } + } + } + ctx.lastPosition = originalPosition + f.next.call(tree, ctx) + visit(tree) + } +} + +// match based on an externally provided functional filter +type matchFilterFn struct { + matchBase + Pos toml.Position + Name string +} + +func newMatchFilterFn(name string, pos toml.Position) *matchFilterFn { + return &matchFilterFn{Name: name, Pos: pos} +} + +func (f *matchFilterFn) call(node interface{}, ctx *queryContext) { + fn, ok := (*ctx.filters)[f.Name] + if !ok { + panic(fmt.Sprintf("%s: query context does not have filter '%s'", + f.Pos.String(), f.Name)) + } + switch castNode := node.(type) { + case *toml.Tree: + for _, k := range castNode.Keys() { + v := castNode.Get(k) + if fn(v) { + ctx.lastPosition = castNode.GetPosition(k) + f.next.call(v, ctx) + } + } + case []*toml.Tree: + for _, v := range castNode { + if fn(v) { + if len(castNode) > 0 { + ctx.lastPosition = castNode[0].Position() + } + f.next.call(v, ctx) + } + } + case []interface{}: + for _, v := range castNode { + if fn(v) { + f.next.call(v, ctx) + } + } + } +} diff --git a/vendor/github.com/pelletier/go-toml/query/parser.go b/vendor/github.com/pelletier/go-toml/query/parser.go new file mode 100644 index 00000000..5f69b70d --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/query/parser.go @@ -0,0 +1,275 @@ +/* + Based on the "jsonpath" spec/concept. + + http://goessner.net/articles/JsonPath/ + https://code.google.com/p/json-path/ +*/ + +package query + +import ( + "fmt" +) + +const maxInt = int(^uint(0) >> 1) + +type queryParser struct { + flow chan token + tokensBuffer []token + query *Query + union []pathFn + err error +} + +type queryParserStateFn func() queryParserStateFn + +// Formats and panics an error message based on a token +func (p *queryParser) parseError(tok *token, msg string, args ...interface{}) queryParserStateFn { + p.err = fmt.Errorf(tok.Position.String()+": "+msg, args...) + return nil // trigger parse to end +} + +func (p *queryParser) run() { + for state := p.parseStart; state != nil; { + state = state() + } +} + +func (p *queryParser) backup(tok *token) { + p.tokensBuffer = append(p.tokensBuffer, *tok) +} + +func (p *queryParser) peek() *token { + if len(p.tokensBuffer) != 0 { + return &(p.tokensBuffer[0]) + } + + tok, ok := <-p.flow + if !ok { + return nil + } + p.backup(&tok) + return &tok +} + +func (p *queryParser) lookahead(types ...tokenType) bool { + result := true + buffer := []token{} + + for _, typ := range types { + tok := p.getToken() + if tok == nil { + result = false + break + } + buffer = append(buffer, *tok) + if tok.typ != typ { + result = false + break + } + } + // add the tokens back to the buffer, and return + p.tokensBuffer = append(p.tokensBuffer, buffer...) + return result +} + +func (p *queryParser) getToken() *token { + if len(p.tokensBuffer) != 0 { + tok := p.tokensBuffer[0] + p.tokensBuffer = p.tokensBuffer[1:] + return &tok + } + tok, ok := <-p.flow + if !ok { + return nil + } + return &tok +} + +func (p *queryParser) parseStart() queryParserStateFn { + tok := p.getToken() + + if tok == nil || tok.typ == tokenEOF { + return nil + } + + if tok.typ != tokenDollar { + return p.parseError(tok, "Expected '$' at start of expression") + } + + return p.parseMatchExpr +} + +// handle '.' prefix, '[]', and '..' +func (p *queryParser) parseMatchExpr() queryParserStateFn { + tok := p.getToken() + switch tok.typ { + case tokenDotDot: + p.query.appendPath(&matchRecursiveFn{}) + // nested parse for '..' + tok := p.getToken() + switch tok.typ { + case tokenKey: + p.query.appendPath(newMatchKeyFn(tok.val)) + return p.parseMatchExpr + case tokenLeftBracket: + return p.parseBracketExpr + case tokenStar: + // do nothing - the recursive predicate is enough + return p.parseMatchExpr + } + + case tokenDot: + // nested parse for '.' + tok := p.getToken() + switch tok.typ { + case tokenKey: + p.query.appendPath(newMatchKeyFn(tok.val)) + return p.parseMatchExpr + case tokenStar: + p.query.appendPath(&matchAnyFn{}) + return p.parseMatchExpr + } + + case tokenLeftBracket: + return p.parseBracketExpr + + case tokenEOF: + return nil // allow EOF at this stage + } + return p.parseError(tok, "expected match expression") +} + +func (p *queryParser) parseBracketExpr() queryParserStateFn { + if p.lookahead(tokenInteger, tokenColon) { + return p.parseSliceExpr + } + if p.peek().typ == tokenColon { + return p.parseSliceExpr + } + return p.parseUnionExpr +} + +func (p *queryParser) parseUnionExpr() queryParserStateFn { + var tok *token + + // this state can be traversed after some sub-expressions + // so be careful when setting up state in the parser + if p.union == nil { + p.union = []pathFn{} + } + +loop: // labeled loop for easy breaking + for { + if len(p.union) > 0 { + // parse delimiter or terminator + tok = p.getToken() + switch tok.typ { + case tokenComma: + // do nothing + case tokenRightBracket: + break loop + default: + return p.parseError(tok, "expected ',' or ']', not '%s'", tok.val) + } + } + + // parse sub expression + tok = p.getToken() + switch tok.typ { + case tokenInteger: + p.union = append(p.union, newMatchIndexFn(tok.Int())) + case tokenKey: + p.union = append(p.union, newMatchKeyFn(tok.val)) + case tokenString: + p.union = append(p.union, newMatchKeyFn(tok.val)) + case tokenQuestion: + return p.parseFilterExpr + default: + return p.parseError(tok, "expected union sub expression, not '%s', %d", tok.val, len(p.union)) + } + } + + // if there is only one sub-expression, use that instead + if len(p.union) == 1 { + p.query.appendPath(p.union[0]) + } else { + p.query.appendPath(&matchUnionFn{p.union}) + } + + p.union = nil // clear out state + return p.parseMatchExpr +} + +func (p *queryParser) parseSliceExpr() queryParserStateFn { + // init slice to grab all elements + start, end, step := 0, maxInt, 1 + + // parse optional start + tok := p.getToken() + if tok.typ == tokenInteger { + start = tok.Int() + tok = p.getToken() + } + if tok.typ != tokenColon { + return p.parseError(tok, "expected ':'") + } + + // parse optional end + tok = p.getToken() + if tok.typ == tokenInteger { + end = tok.Int() + tok = p.getToken() + } + if tok.typ == tokenRightBracket { + p.query.appendPath(newMatchSliceFn(start, end, step)) + return p.parseMatchExpr + } + if tok.typ != tokenColon { + return p.parseError(tok, "expected ']' or ':'") + } + + // parse optional step + tok = p.getToken() + if tok.typ == tokenInteger { + step = tok.Int() + if step < 0 { + return p.parseError(tok, "step must be a positive value") + } + tok = p.getToken() + } + if tok.typ != tokenRightBracket { + return p.parseError(tok, "expected ']'") + } + + p.query.appendPath(newMatchSliceFn(start, end, step)) + return p.parseMatchExpr +} + +func (p *queryParser) parseFilterExpr() queryParserStateFn { + tok := p.getToken() + if tok.typ != tokenLeftParen { + return p.parseError(tok, "expected left-parenthesis for filter expression") + } + tok = p.getToken() + if tok.typ != tokenKey && tok.typ != tokenString { + return p.parseError(tok, "expected key or string for filter function name") + } + name := tok.val + tok = p.getToken() + if tok.typ != tokenRightParen { + return p.parseError(tok, "expected right-parenthesis for filter expression") + } + p.union = append(p.union, newMatchFilterFn(name, tok.Position)) + return p.parseUnionExpr +} + +func parseQuery(flow chan token) (*Query, error) { + parser := &queryParser{ + flow: flow, + tokensBuffer: []token{}, + query: newQuery(), + } + parser.run() + return parser.query, parser.err +} diff --git a/vendor/github.com/pelletier/go-toml/query/query.go b/vendor/github.com/pelletier/go-toml/query/query.go new file mode 100644 index 00000000..1c6cd801 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/query/query.go @@ -0,0 +1,158 @@ +package query + +import ( + "time" + + "github.com/pelletier/go-toml" +) + +// NodeFilterFn represents a user-defined filter function, for use with +// Query.SetFilter(). +// +// The return value of the function must indicate if 'node' is to be included +// at this stage of the TOML path. Returning true will include the node, and +// returning false will exclude it. +// +// NOTE: Care should be taken to write script callbacks such that they are safe +// to use from multiple goroutines. +type NodeFilterFn func(node interface{}) bool + +// Result is the result of Executing a Query. +type Result struct { + items []interface{} + positions []toml.Position +} + +// appends a value/position pair to the result set. +func (r *Result) appendResult(node interface{}, pos toml.Position) { + r.items = append(r.items, node) + r.positions = append(r.positions, pos) +} + +// Values is a set of values within a Result. The order of values is not +// guaranteed to be in document order, and may be different each time a query is +// executed. +func (r Result) Values() []interface{} { + return r.items +} + +// Positions is a set of positions for values within a Result. Each index +// in Positions() corresponds to the entry in Value() of the same index. +func (r Result) Positions() []toml.Position { + return r.positions +} + +// runtime context for executing query paths +type queryContext struct { + result *Result + filters *map[string]NodeFilterFn + lastPosition toml.Position +} + +// generic path functor interface +type pathFn interface { + setNext(next pathFn) + // it is the caller's responsibility to set the ctx.lastPosition before invoking call() + // node can be one of: *toml.Tree, []*toml.Tree, or a scalar + call(node interface{}, ctx *queryContext) +} + +// A Query is the representation of a compiled TOML path. A Query is safe +// for concurrent use by multiple goroutines. +type Query struct { + root pathFn + tail pathFn + filters *map[string]NodeFilterFn +} + +func newQuery() *Query { + return &Query{ + root: nil, + tail: nil, + filters: &defaultFilterFunctions, + } +} + +func (q *Query) appendPath(next pathFn) { + if q.root == nil { + q.root = next + } else { + q.tail.setNext(next) + } + q.tail = next + next.setNext(newTerminatingFn()) // init the next functor +} + +// Compile compiles a TOML path expression. The returned Query can be used +// to match elements within a Tree and its descendants. See Execute. +func Compile(path string) (*Query, error) { + return parseQuery(lexQuery(path)) +} + +// Execute executes a query against a Tree, and returns the result of the query. +func (q *Query) Execute(tree *toml.Tree) *Result { + result := &Result{ + items: []interface{}{}, + positions: []toml.Position{}, + } + if q.root == nil { + result.appendResult(tree, tree.GetPosition("")) + } else { + ctx := &queryContext{ + result: result, + filters: q.filters, + } + ctx.lastPosition = tree.Position() + q.root.call(tree, ctx) + } + return result +} + +// CompileAndExecute is a shorthand for Compile(path) followed by Execute(tree). +func CompileAndExecute(path string, tree *toml.Tree) (*Result, error) { + query, err := Compile(path) + if err != nil { + return nil, err + } + return query.Execute(tree), nil +} + +// SetFilter sets a user-defined filter function. These may be used inside +// "?(..)" query expressions to filter TOML document elements within a query. +func (q *Query) SetFilter(name string, fn NodeFilterFn) { + if q.filters == &defaultFilterFunctions { + // clone the static table + q.filters = &map[string]NodeFilterFn{} + for k, v := range defaultFilterFunctions { + (*q.filters)[k] = v + } + } + (*q.filters)[name] = fn +} + +var defaultFilterFunctions = map[string]NodeFilterFn{ + "tree": func(node interface{}) bool { + _, ok := node.(*toml.Tree) + return ok + }, + "int": func(node interface{}) bool { + _, ok := node.(int64) + return ok + }, + "float": func(node interface{}) bool { + _, ok := node.(float64) + return ok + }, + "string": func(node interface{}) bool { + _, ok := node.(string) + return ok + }, + "time": func(node interface{}) bool { + _, ok := node.(time.Time) + return ok + }, + "bool": func(node interface{}) bool { + _, ok := node.(bool) + return ok + }, +} diff --git a/vendor/github.com/pelletier/go-toml/query/tokens.go b/vendor/github.com/pelletier/go-toml/query/tokens.go new file mode 100644 index 00000000..9ae579de --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/query/tokens.go @@ -0,0 +1,106 @@ +package query + +import ( + "fmt" + "github.com/pelletier/go-toml" + "strconv" + "unicode" +) + +// Define tokens +type tokenType int + +const ( + eof = -(iota + 1) +) + +const ( + tokenError tokenType = iota + tokenEOF + tokenKey + tokenString + tokenInteger + tokenFloat + tokenLeftBracket + tokenRightBracket + tokenLeftParen + tokenRightParen + tokenComma + tokenColon + tokenDollar + tokenStar + tokenQuestion + tokenDot + tokenDotDot +) + +var tokenTypeNames = []string{ + "Error", + "EOF", + "Key", + "String", + "Integer", + "Float", + "[", + "]", + "(", + ")", + ",", + ":", + "$", + "*", + "?", + ".", + "..", +} + +type token struct { + toml.Position + typ tokenType + val string +} + +func (tt tokenType) String() string { + idx := int(tt) + if idx < len(tokenTypeNames) { + return tokenTypeNames[idx] + } + return "Unknown" +} + +func (t token) Int() int { + if result, err := strconv.Atoi(t.val); err != nil { + panic(err) + } else { + return result + } +} + +func (t token) String() string { + switch t.typ { + case tokenEOF: + return "EOF" + case tokenError: + return t.val + } + + return fmt.Sprintf("%q", t.val) +} + +func isSpace(r rune) bool { + return r == ' ' || r == '\t' +} + +func isAlphanumeric(r rune) bool { + return unicode.IsLetter(r) || r == '_' +} + +func isDigit(r rune) bool { + return unicode.IsNumber(r) +} + +func isHexDigit(r rune) bool { + return isDigit(r) || + (r >= 'a' && r <= 'f') || + (r >= 'A' && r <= 'F') +} diff --git a/vendor/github.com/pelletier/go-toml/token.go b/vendor/github.com/pelletier/go-toml/token.go new file mode 100644 index 00000000..1a908134 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/token.go @@ -0,0 +1,144 @@ +package toml + +import ( + "fmt" + "strconv" + "unicode" +) + +// Define tokens +type tokenType int + +const ( + eof = -(iota + 1) +) + +const ( + tokenError tokenType = iota + tokenEOF + tokenComment + tokenKey + tokenString + tokenInteger + tokenTrue + tokenFalse + tokenFloat + tokenInf + tokenNan + tokenEqual + tokenLeftBracket + tokenRightBracket + tokenLeftCurlyBrace + tokenRightCurlyBrace + tokenLeftParen + tokenRightParen + tokenDoubleLeftBracket + tokenDoubleRightBracket + tokenDate + tokenKeyGroup + tokenKeyGroupArray + tokenComma + tokenColon + tokenDollar + tokenStar + tokenQuestion + tokenDot + tokenDotDot + tokenEOL +) + +var tokenTypeNames = []string{ + "Error", + "EOF", + "Comment", + "Key", + "String", + "Integer", + "True", + "False", + "Float", + "Inf", + "NaN", + "=", + "[", + "]", + "{", + "}", + "(", + ")", + "]]", + "[[", + "Date", + "KeyGroup", + "KeyGroupArray", + ",", + ":", + "$", + "*", + "?", + ".", + "..", + "EOL", +} + +type token struct { + Position + typ tokenType + val string +} + +func (tt tokenType) String() string { + idx := int(tt) + if idx < len(tokenTypeNames) { + return tokenTypeNames[idx] + } + return "Unknown" +} + +func (t token) Int() int { + if result, err := strconv.Atoi(t.val); err != nil { + panic(err) + } else { + return result + } +} + +func (t token) String() string { + switch t.typ { + case tokenEOF: + return "EOF" + case tokenError: + return t.val + } + + return fmt.Sprintf("%q", t.val) +} + +func isSpace(r rune) bool { + return r == ' ' || r == '\t' +} + +func isAlphanumeric(r rune) bool { + return unicode.IsLetter(r) || r == '_' +} + +func isKeyChar(r rune) bool { + // Keys start with the first character that isn't whitespace or [ and end + // with the last non-whitespace character before the equals sign. Keys + // cannot contain a # character." + return !(r == '\r' || r == '\n' || r == eof || r == '=') +} + +func isKeyStartChar(r rune) bool { + return !(isSpace(r) || r == '\r' || r == '\n' || r == eof || r == '[') +} + +func isDigit(r rune) bool { + return unicode.IsNumber(r) +} + +func isHexDigit(r rune) bool { + return isDigit(r) || + (r >= 'a' && r <= 'f') || + (r >= 'A' && r <= 'F') +} diff --git a/vendor/github.com/pelletier/go-toml/toml.go b/vendor/github.com/pelletier/go-toml/toml.go new file mode 100644 index 00000000..05493a44 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/toml.go @@ -0,0 +1,309 @@ +package toml + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "runtime" + "strings" +) + +type tomlValue struct { + value interface{} // string, int64, uint64, float64, bool, time.Time, [] of any of this list + comment string + commented bool + position Position +} + +// Tree is the result of the parsing of a TOML file. +type Tree struct { + values map[string]interface{} // string -> *tomlValue, *Tree, []*Tree + comment string + commented bool + position Position +} + +func newTree() *Tree { + return &Tree{ + values: make(map[string]interface{}), + position: Position{}, + } +} + +// TreeFromMap initializes a new Tree object using the given map. +func TreeFromMap(m map[string]interface{}) (*Tree, error) { + result, err := toTree(m) + if err != nil { + return nil, err + } + return result.(*Tree), nil +} + +// Position returns the position of the tree. +func (t *Tree) Position() Position { + return t.position +} + +// Has returns a boolean indicating if the given key exists. +func (t *Tree) Has(key string) bool { + if key == "" { + return false + } + return t.HasPath(strings.Split(key, ".")) +} + +// HasPath returns true if the given path of keys exists, false otherwise. +func (t *Tree) HasPath(keys []string) bool { + return t.GetPath(keys) != nil +} + +// Keys returns the keys of the toplevel tree (does not recurse). +func (t *Tree) Keys() []string { + keys := make([]string, len(t.values)) + i := 0 + for k := range t.values { + keys[i] = k + i++ + } + return keys +} + +// Get the value at key in the Tree. +// Key is a dot-separated path (e.g. a.b.c) without single/double quoted strings. +// If you need to retrieve non-bare keys, use GetPath. +// Returns nil if the path does not exist in the tree. +// If keys is of length zero, the current tree is returned. +func (t *Tree) Get(key string) interface{} { + if key == "" { + return t + } + return t.GetPath(strings.Split(key, ".")) +} + +// GetPath returns the element in the tree indicated by 'keys'. +// If keys is of length zero, the current tree is returned. +func (t *Tree) GetPath(keys []string) interface{} { + if len(keys) == 0 { + return t + } + subtree := t + for _, intermediateKey := range keys[:len(keys)-1] { + value, exists := subtree.values[intermediateKey] + if !exists { + return nil + } + switch node := value.(type) { + case *Tree: + subtree = node + case []*Tree: + // go to most recent element + if len(node) == 0 { + return nil + } + subtree = node[len(node)-1] + default: + return nil // cannot navigate through other node types + } + } + // branch based on final node type + switch node := subtree.values[keys[len(keys)-1]].(type) { + case *tomlValue: + return node.value + default: + return node + } +} + +// GetPosition returns the position of the given key. +func (t *Tree) GetPosition(key string) Position { + if key == "" { + return t.position + } + return t.GetPositionPath(strings.Split(key, ".")) +} + +// GetPositionPath returns the element in the tree indicated by 'keys'. +// If keys is of length zero, the current tree is returned. +func (t *Tree) GetPositionPath(keys []string) Position { + if len(keys) == 0 { + return t.position + } + subtree := t + for _, intermediateKey := range keys[:len(keys)-1] { + value, exists := subtree.values[intermediateKey] + if !exists { + return Position{0, 0} + } + switch node := value.(type) { + case *Tree: + subtree = node + case []*Tree: + // go to most recent element + if len(node) == 0 { + return Position{0, 0} + } + subtree = node[len(node)-1] + default: + return Position{0, 0} + } + } + // branch based on final node type + switch node := subtree.values[keys[len(keys)-1]].(type) { + case *tomlValue: + return node.position + case *Tree: + return node.position + case []*Tree: + // go to most recent element + if len(node) == 0 { + return Position{0, 0} + } + return node[len(node)-1].position + default: + return Position{0, 0} + } +} + +// GetDefault works like Get but with a default value +func (t *Tree) GetDefault(key string, def interface{}) interface{} { + val := t.Get(key) + if val == nil { + return def + } + return val +} + +// Set an element in the tree. +// Key is a dot-separated path (e.g. a.b.c). +// Creates all necessary intermediate trees, if needed. +func (t *Tree) Set(key string, value interface{}) { + t.SetWithComment(key, "", false, value) +} + +// SetWithComment is the same as Set, but allows you to provide comment +// information to the key, that will be reused by Marshal(). +func (t *Tree) SetWithComment(key string, comment string, commented bool, value interface{}) { + t.SetPathWithComment(strings.Split(key, "."), comment, commented, value) +} + +// SetPath sets an element in the tree. +// Keys is an array of path elements (e.g. {"a","b","c"}). +// Creates all necessary intermediate trees, if needed. +func (t *Tree) SetPath(keys []string, value interface{}) { + t.SetPathWithComment(keys, "", false, value) +} + +// SetPathWithComment is the same as SetPath, but allows you to provide comment +// information to the key, that will be reused by Marshal(). +func (t *Tree) SetPathWithComment(keys []string, comment string, commented bool, value interface{}) { + subtree := t + for _, intermediateKey := range keys[:len(keys)-1] { + nextTree, exists := subtree.values[intermediateKey] + if !exists { + nextTree = newTree() + subtree.values[intermediateKey] = nextTree // add new element here + } + switch node := nextTree.(type) { + case *Tree: + subtree = node + case []*Tree: + // go to most recent element + if len(node) == 0 { + // create element if it does not exist + subtree.values[intermediateKey] = append(node, newTree()) + } + subtree = node[len(node)-1] + } + } + + var toInsert interface{} + + switch value.(type) { + case *Tree: + tt := value.(*Tree) + tt.comment = comment + toInsert = value + case []*Tree: + toInsert = value + case *tomlValue: + tt := value.(*tomlValue) + tt.comment = comment + toInsert = tt + default: + toInsert = &tomlValue{value: value, comment: comment, commented: commented} + } + + subtree.values[keys[len(keys)-1]] = toInsert +} + +// createSubTree takes a tree and a key and create the necessary intermediate +// subtrees to create a subtree at that point. In-place. +// +// e.g. passing a.b.c will create (assuming tree is empty) tree[a], tree[a][b] +// and tree[a][b][c] +// +// Returns nil on success, error object on failure +func (t *Tree) createSubTree(keys []string, pos Position) error { + subtree := t + for _, intermediateKey := range keys { + nextTree, exists := subtree.values[intermediateKey] + if !exists { + tree := newTree() + tree.position = pos + subtree.values[intermediateKey] = tree + nextTree = tree + } + + switch node := nextTree.(type) { + case []*Tree: + subtree = node[len(node)-1] + case *Tree: + subtree = node + default: + return fmt.Errorf("unknown type for path %s (%s): %T (%#v)", + strings.Join(keys, "."), intermediateKey, nextTree, nextTree) + } + } + return nil +} + +// LoadBytes creates a Tree from a []byte. +func LoadBytes(b []byte) (tree *Tree, err error) { + defer func() { + if r := recover(); r != nil { + if _, ok := r.(runtime.Error); ok { + panic(r) + } + err = errors.New(r.(string)) + } + }() + tree = parseToml(lexToml(b)) + return +} + +// LoadReader creates a Tree from any io.Reader. +func LoadReader(reader io.Reader) (tree *Tree, err error) { + inputBytes, err := ioutil.ReadAll(reader) + if err != nil { + return + } + tree, err = LoadBytes(inputBytes) + return +} + +// Load creates a Tree from a string. +func Load(content string) (tree *Tree, err error) { + return LoadBytes([]byte(content)) +} + +// LoadFile creates a Tree from a file. +func LoadFile(path string) (tree *Tree, err error) { + file, err := os.Open(path) + if err != nil { + return nil, err + } + defer file.Close() + return LoadReader(file) +} diff --git a/vendor/github.com/pelletier/go-toml/tomltree_create.go b/vendor/github.com/pelletier/go-toml/tomltree_create.go new file mode 100644 index 00000000..79610e9b --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/tomltree_create.go @@ -0,0 +1,142 @@ +package toml + +import ( + "fmt" + "reflect" + "time" +) + +var kindToType = [reflect.String + 1]reflect.Type{ + reflect.Bool: reflect.TypeOf(true), + reflect.String: reflect.TypeOf(""), + reflect.Float32: reflect.TypeOf(float64(1)), + reflect.Float64: reflect.TypeOf(float64(1)), + reflect.Int: reflect.TypeOf(int64(1)), + reflect.Int8: reflect.TypeOf(int64(1)), + reflect.Int16: reflect.TypeOf(int64(1)), + reflect.Int32: reflect.TypeOf(int64(1)), + reflect.Int64: reflect.TypeOf(int64(1)), + reflect.Uint: reflect.TypeOf(uint64(1)), + reflect.Uint8: reflect.TypeOf(uint64(1)), + reflect.Uint16: reflect.TypeOf(uint64(1)), + reflect.Uint32: reflect.TypeOf(uint64(1)), + reflect.Uint64: reflect.TypeOf(uint64(1)), +} + +// typeFor returns a reflect.Type for a reflect.Kind, or nil if none is found. +// supported values: +// string, bool, int64, uint64, float64, time.Time, int, int8, int16, int32, uint, uint8, uint16, uint32, float32 +func typeFor(k reflect.Kind) reflect.Type { + if k > 0 && int(k) < len(kindToType) { + return kindToType[k] + } + return nil +} + +func simpleValueCoercion(object interface{}) (interface{}, error) { + switch original := object.(type) { + case string, bool, int64, uint64, float64, time.Time: + return original, nil + case int: + return int64(original), nil + case int8: + return int64(original), nil + case int16: + return int64(original), nil + case int32: + return int64(original), nil + case uint: + return uint64(original), nil + case uint8: + return uint64(original), nil + case uint16: + return uint64(original), nil + case uint32: + return uint64(original), nil + case float32: + return float64(original), nil + case fmt.Stringer: + return original.String(), nil + default: + return nil, fmt.Errorf("cannot convert type %T to Tree", object) + } +} + +func sliceToTree(object interface{}) (interface{}, error) { + // arrays are a bit tricky, since they can represent either a + // collection of simple values, which is represented by one + // *tomlValue, or an array of tables, which is represented by an + // array of *Tree. + + // holding the assumption that this function is called from toTree only when value.Kind() is Array or Slice + value := reflect.ValueOf(object) + insideType := value.Type().Elem() + length := value.Len() + if length > 0 { + insideType = reflect.ValueOf(value.Index(0).Interface()).Type() + } + if insideType.Kind() == reflect.Map { + // this is considered as an array of tables + tablesArray := make([]*Tree, 0, length) + for i := 0; i < length; i++ { + table := value.Index(i) + tree, err := toTree(table.Interface()) + if err != nil { + return nil, err + } + tablesArray = append(tablesArray, tree.(*Tree)) + } + return tablesArray, nil + } + + sliceType := typeFor(insideType.Kind()) + if sliceType == nil { + sliceType = insideType + } + + arrayValue := reflect.MakeSlice(reflect.SliceOf(sliceType), 0, length) + + for i := 0; i < length; i++ { + val := value.Index(i).Interface() + simpleValue, err := simpleValueCoercion(val) + if err != nil { + return nil, err + } + arrayValue = reflect.Append(arrayValue, reflect.ValueOf(simpleValue)) + } + return &tomlValue{value: arrayValue.Interface(), position: Position{}}, nil +} + +func toTree(object interface{}) (interface{}, error) { + value := reflect.ValueOf(object) + + if value.Kind() == reflect.Map { + values := map[string]interface{}{} + keys := value.MapKeys() + for _, key := range keys { + if key.Kind() != reflect.String { + if _, ok := key.Interface().(string); !ok { + return nil, fmt.Errorf("map key needs to be a string, not %T (%v)", key.Interface(), key.Kind()) + } + } + + v := value.MapIndex(key) + newValue, err := toTree(v.Interface()) + if err != nil { + return nil, err + } + values[key.String()] = newValue + } + return &Tree{values: values, position: Position{}}, nil + } + + if value.Kind() == reflect.Array || value.Kind() == reflect.Slice { + return sliceToTree(object) + } + + simpleValue, err := simpleValueCoercion(object) + if err != nil { + return nil, err + } + return &tomlValue{value: simpleValue, position: Position{}}, nil +} diff --git a/vendor/github.com/pelletier/go-toml/tomltree_write.go b/vendor/github.com/pelletier/go-toml/tomltree_write.go new file mode 100644 index 00000000..b5600a58 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/tomltree_write.go @@ -0,0 +1,287 @@ +package toml + +import ( + "bytes" + "fmt" + "io" + "math" + "reflect" + "sort" + "strconv" + "strings" + "time" +) + +// encodes a string to a TOML-compliant string value +func encodeTomlString(value string) string { + var b bytes.Buffer + + for _, rr := range value { + switch rr { + case '\b': + b.WriteString(`\b`) + case '\t': + b.WriteString(`\t`) + case '\n': + b.WriteString(`\n`) + case '\f': + b.WriteString(`\f`) + case '\r': + b.WriteString(`\r`) + case '"': + b.WriteString(`\"`) + case '\\': + b.WriteString(`\\`) + default: + intRr := uint16(rr) + if intRr < 0x001F { + b.WriteString(fmt.Sprintf("\\u%0.4X", intRr)) + } else { + b.WriteRune(rr) + } + } + } + return b.String() +} + +func tomlValueStringRepresentation(v interface{}, indent string, arraysOneElementPerLine bool) (string, error) { + switch value := v.(type) { + case uint64: + return strconv.FormatUint(value, 10), nil + case int64: + return strconv.FormatInt(value, 10), nil + case float64: + // Ensure a round float does contain a decimal point. Otherwise feeding + // the output back to the parser would convert to an integer. + if math.Trunc(value) == value { + return strings.ToLower(strconv.FormatFloat(value, 'f', 1, 32)), nil + } + return strings.ToLower(strconv.FormatFloat(value, 'f', -1, 32)), nil + case string: + return "\"" + encodeTomlString(value) + "\"", nil + case []byte: + b, _ := v.([]byte) + return tomlValueStringRepresentation(string(b), indent, arraysOneElementPerLine) + case bool: + if value { + return "true", nil + } + return "false", nil + case time.Time: + return value.Format(time.RFC3339), nil + case nil: + return "", nil + } + + rv := reflect.ValueOf(v) + + if rv.Kind() == reflect.Slice { + var values []string + for i := 0; i < rv.Len(); i++ { + item := rv.Index(i).Interface() + itemRepr, err := tomlValueStringRepresentation(item, indent, arraysOneElementPerLine) + if err != nil { + return "", err + } + values = append(values, itemRepr) + } + if arraysOneElementPerLine && len(values) > 1 { + stringBuffer := bytes.Buffer{} + valueIndent := indent + ` ` // TODO: move that to a shared encoder state + + stringBuffer.WriteString("[\n") + + for _, value := range values { + stringBuffer.WriteString(valueIndent) + stringBuffer.WriteString(value) + stringBuffer.WriteString(`,`) + stringBuffer.WriteString("\n") + } + + stringBuffer.WriteString(indent + "]") + + return stringBuffer.String(), nil + } + return "[" + strings.Join(values, ",") + "]", nil + } + return "", fmt.Errorf("unsupported value type %T: %v", v, v) +} + +func (t *Tree) writeTo(w io.Writer, indent, keyspace string, bytesCount int64, arraysOneElementPerLine bool) (int64, error) { + simpleValuesKeys := make([]string, 0) + complexValuesKeys := make([]string, 0) + + for k := range t.values { + v := t.values[k] + switch v.(type) { + case *Tree, []*Tree: + complexValuesKeys = append(complexValuesKeys, k) + default: + simpleValuesKeys = append(simpleValuesKeys, k) + } + } + + sort.Strings(simpleValuesKeys) + sort.Strings(complexValuesKeys) + + for _, k := range simpleValuesKeys { + v, ok := t.values[k].(*tomlValue) + if !ok { + return bytesCount, fmt.Errorf("invalid value type at %s: %T", k, t.values[k]) + } + + repr, err := tomlValueStringRepresentation(v.value, indent, arraysOneElementPerLine) + if err != nil { + return bytesCount, err + } + + if v.comment != "" { + comment := strings.Replace(v.comment, "\n", "\n"+indent+"#", -1) + start := "# " + if strings.HasPrefix(comment, "#") { + start = "" + } + writtenBytesCountComment, errc := writeStrings(w, "\n", indent, start, comment, "\n") + bytesCount += int64(writtenBytesCountComment) + if errc != nil { + return bytesCount, errc + } + } + + var commented string + if v.commented { + commented = "# " + } + writtenBytesCount, err := writeStrings(w, indent, commented, k, " = ", repr, "\n") + bytesCount += int64(writtenBytesCount) + if err != nil { + return bytesCount, err + } + } + + for _, k := range complexValuesKeys { + v := t.values[k] + + combinedKey := k + if keyspace != "" { + combinedKey = keyspace + "." + combinedKey + } + var commented string + if t.commented { + commented = "# " + } + + switch node := v.(type) { + // node has to be of those two types given how keys are sorted above + case *Tree: + tv, ok := t.values[k].(*Tree) + if !ok { + return bytesCount, fmt.Errorf("invalid value type at %s: %T", k, t.values[k]) + } + if tv.comment != "" { + comment := strings.Replace(tv.comment, "\n", "\n"+indent+"#", -1) + start := "# " + if strings.HasPrefix(comment, "#") { + start = "" + } + writtenBytesCountComment, errc := writeStrings(w, "\n", indent, start, comment) + bytesCount += int64(writtenBytesCountComment) + if errc != nil { + return bytesCount, errc + } + } + writtenBytesCount, err := writeStrings(w, "\n", indent, commented, "[", combinedKey, "]\n") + bytesCount += int64(writtenBytesCount) + if err != nil { + return bytesCount, err + } + bytesCount, err = node.writeTo(w, indent+" ", combinedKey, bytesCount, arraysOneElementPerLine) + if err != nil { + return bytesCount, err + } + case []*Tree: + for _, subTree := range node { + writtenBytesCount, err := writeStrings(w, "\n", indent, commented, "[[", combinedKey, "]]\n") + bytesCount += int64(writtenBytesCount) + if err != nil { + return bytesCount, err + } + + bytesCount, err = subTree.writeTo(w, indent+" ", combinedKey, bytesCount, arraysOneElementPerLine) + if err != nil { + return bytesCount, err + } + } + } + } + + return bytesCount, nil +} + +func writeStrings(w io.Writer, s ...string) (int, error) { + var n int + for i := range s { + b, err := io.WriteString(w, s[i]) + n += b + if err != nil { + return n, err + } + } + return n, nil +} + +// WriteTo encode the Tree as Toml and writes it to the writer w. +// Returns the number of bytes written in case of success, or an error if anything happened. +func (t *Tree) WriteTo(w io.Writer) (int64, error) { + return t.writeTo(w, "", "", 0, false) +} + +// ToTomlString generates a human-readable representation of the current tree. +// Output spans multiple lines, and is suitable for ingest by a TOML parser. +// If the conversion cannot be performed, ToString returns a non-nil error. +func (t *Tree) ToTomlString() (string, error) { + var buf bytes.Buffer + _, err := t.WriteTo(&buf) + if err != nil { + return "", err + } + return buf.String(), nil +} + +// String generates a human-readable representation of the current tree. +// Alias of ToString. Present to implement the fmt.Stringer interface. +func (t *Tree) String() string { + result, _ := t.ToTomlString() + return result +} + +// ToMap recursively generates a representation of the tree using Go built-in structures. +// The following types are used: +// +// * bool +// * float64 +// * int64 +// * string +// * uint64 +// * time.Time +// * map[string]interface{} (where interface{} is any of this list) +// * []interface{} (where interface{} is any of this list) +func (t *Tree) ToMap() map[string]interface{} { + result := map[string]interface{}{} + + for k, v := range t.values { + switch node := v.(type) { + case []*Tree: + var array []interface{} + for _, item := range node { + array = append(array, item.ToMap()) + } + result[k] = array + case *Tree: + result[k] = node.ToMap() + case *tomlValue: + result[k] = node.value + } + } + return result +} diff --git a/vendor/github.com/pkg/errors/LICENSE b/vendor/github.com/pkg/errors/LICENSE new file mode 100644 index 00000000..835ba3e7 --- /dev/null +++ b/vendor/github.com/pkg/errors/LICENSE @@ -0,0 +1,23 @@ +Copyright (c) 2015, Dave Cheney +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/pkg/errors/errors.go b/vendor/github.com/pkg/errors/errors.go new file mode 100644 index 00000000..842ee804 --- /dev/null +++ b/vendor/github.com/pkg/errors/errors.go @@ -0,0 +1,269 @@ +// Package errors provides simple error handling primitives. +// +// The traditional error handling idiom in Go is roughly akin to +// +// if err != nil { +// return err +// } +// +// which applied recursively up the call stack results in error reports +// without context or debugging information. The errors package allows +// programmers to add context to the failure path in their code in a way +// that does not destroy the original value of the error. +// +// Adding context to an error +// +// The errors.Wrap function returns a new error that adds context to the +// original error by recording a stack trace at the point Wrap is called, +// and the supplied message. For example +// +// _, err := ioutil.ReadAll(r) +// if err != nil { +// return errors.Wrap(err, "read failed") +// } +// +// If additional control is required the errors.WithStack and errors.WithMessage +// functions destructure errors.Wrap into its component operations of annotating +// an error with a stack trace and an a message, respectively. +// +// Retrieving the cause of an error +// +// Using errors.Wrap constructs a stack of errors, adding context to the +// preceding error. Depending on the nature of the error it may be necessary +// to reverse the operation of errors.Wrap to retrieve the original error +// for inspection. Any error value which implements this interface +// +// type causer interface { +// Cause() error +// } +// +// can be inspected by errors.Cause. errors.Cause will recursively retrieve +// the topmost error which does not implement causer, which is assumed to be +// the original cause. For example: +// +// switch err := errors.Cause(err).(type) { +// case *MyError: +// // handle specifically +// default: +// // unknown error +// } +// +// causer interface is not exported by this package, but is considered a part +// of stable public API. +// +// Formatted printing of errors +// +// All error values returned from this package implement fmt.Formatter and can +// be formatted by the fmt package. The following verbs are supported +// +// %s print the error. If the error has a Cause it will be +// printed recursively +// %v see %s +// %+v extended format. Each Frame of the error's StackTrace will +// be printed in detail. +// +// Retrieving the stack trace of an error or wrapper +// +// New, Errorf, Wrap, and Wrapf record a stack trace at the point they are +// invoked. This information can be retrieved with the following interface. +// +// type stackTracer interface { +// StackTrace() errors.StackTrace +// } +// +// Where errors.StackTrace is defined as +// +// type StackTrace []Frame +// +// The Frame type represents a call site in the stack trace. Frame supports +// the fmt.Formatter interface that can be used for printing information about +// the stack trace of this error. For example: +// +// if err, ok := err.(stackTracer); ok { +// for _, f := range err.StackTrace() { +// fmt.Printf("%+s:%d", f) +// } +// } +// +// stackTracer interface is not exported by this package, but is considered a part +// of stable public API. +// +// See the documentation for Frame.Format for more details. +package errors + +import ( + "fmt" + "io" +) + +// New returns an error with the supplied message. +// New also records the stack trace at the point it was called. +func New(message string) error { + return &fundamental{ + msg: message, + stack: callers(), + } +} + +// Errorf formats according to a format specifier and returns the string +// as a value that satisfies error. +// Errorf also records the stack trace at the point it was called. +func Errorf(format string, args ...interface{}) error { + return &fundamental{ + msg: fmt.Sprintf(format, args...), + stack: callers(), + } +} + +// fundamental is an error that has a message and a stack, but no caller. +type fundamental struct { + msg string + *stack +} + +func (f *fundamental) Error() string { return f.msg } + +func (f *fundamental) Format(s fmt.State, verb rune) { + switch verb { + case 'v': + if s.Flag('+') { + io.WriteString(s, f.msg) + f.stack.Format(s, verb) + return + } + fallthrough + case 's': + io.WriteString(s, f.msg) + case 'q': + fmt.Fprintf(s, "%q", f.msg) + } +} + +// WithStack annotates err with a stack trace at the point WithStack was called. +// If err is nil, WithStack returns nil. +func WithStack(err error) error { + if err == nil { + return nil + } + return &withStack{ + err, + callers(), + } +} + +type withStack struct { + error + *stack +} + +func (w *withStack) Cause() error { return w.error } + +func (w *withStack) Format(s fmt.State, verb rune) { + switch verb { + case 'v': + if s.Flag('+') { + fmt.Fprintf(s, "%+v", w.Cause()) + w.stack.Format(s, verb) + return + } + fallthrough + case 's': + io.WriteString(s, w.Error()) + case 'q': + fmt.Fprintf(s, "%q", w.Error()) + } +} + +// Wrap returns an error annotating err with a stack trace +// at the point Wrap is called, and the supplied message. +// If err is nil, Wrap returns nil. +func Wrap(err error, message string) error { + if err == nil { + return nil + } + err = &withMessage{ + cause: err, + msg: message, + } + return &withStack{ + err, + callers(), + } +} + +// Wrapf returns an error annotating err with a stack trace +// at the point Wrapf is call, and the format specifier. +// If err is nil, Wrapf returns nil. +func Wrapf(err error, format string, args ...interface{}) error { + if err == nil { + return nil + } + err = &withMessage{ + cause: err, + msg: fmt.Sprintf(format, args...), + } + return &withStack{ + err, + callers(), + } +} + +// WithMessage annotates err with a new message. +// If err is nil, WithMessage returns nil. +func WithMessage(err error, message string) error { + if err == nil { + return nil + } + return &withMessage{ + cause: err, + msg: message, + } +} + +type withMessage struct { + cause error + msg string +} + +func (w *withMessage) Error() string { return w.msg + ": " + w.cause.Error() } +func (w *withMessage) Cause() error { return w.cause } + +func (w *withMessage) Format(s fmt.State, verb rune) { + switch verb { + case 'v': + if s.Flag('+') { + fmt.Fprintf(s, "%+v\n", w.Cause()) + io.WriteString(s, w.msg) + return + } + fallthrough + case 's', 'q': + io.WriteString(s, w.Error()) + } +} + +// Cause returns the underlying cause of the error, if possible. +// An error value has a cause if it implements the following +// interface: +// +// type causer interface { +// Cause() error +// } +// +// If the error does not implement Cause, the original error will +// be returned. If the error is nil, nil will be returned without further +// investigation. +func Cause(err error) error { + type causer interface { + Cause() error + } + + for err != nil { + cause, ok := err.(causer) + if !ok { + break + } + err = cause.Cause() + } + return err +} diff --git a/vendor/github.com/pkg/errors/stack.go b/vendor/github.com/pkg/errors/stack.go new file mode 100644 index 00000000..2874a048 --- /dev/null +++ b/vendor/github.com/pkg/errors/stack.go @@ -0,0 +1,147 @@ +package errors + +import ( + "fmt" + "io" + "path" + "runtime" + "strings" +) + +// Frame represents a program counter inside a stack frame. +type Frame uintptr + +// pc returns the program counter for this frame; +// multiple frames may have the same PC value. +func (f Frame) pc() uintptr { return uintptr(f) - 1 } + +// file returns the full path to the file that contains the +// function for this Frame's pc. +func (f Frame) file() string { + fn := runtime.FuncForPC(f.pc()) + if fn == nil { + return "unknown" + } + file, _ := fn.FileLine(f.pc()) + return file +} + +// line returns the line number of source code of the +// function for this Frame's pc. +func (f Frame) line() int { + fn := runtime.FuncForPC(f.pc()) + if fn == nil { + return 0 + } + _, line := fn.FileLine(f.pc()) + return line +} + +// Format formats the frame according to the fmt.Formatter interface. +// +// %s source file +// %d source line +// %n function name +// %v equivalent to %s:%d +// +// Format accepts flags that alter the printing of some verbs, as follows: +// +// %+s function name and path of source file relative to the compile time +// GOPATH separated by \n\t (\n\t) +// %+v equivalent to %+s:%d +func (f Frame) Format(s fmt.State, verb rune) { + switch verb { + case 's': + switch { + case s.Flag('+'): + pc := f.pc() + fn := runtime.FuncForPC(pc) + if fn == nil { + io.WriteString(s, "unknown") + } else { + file, _ := fn.FileLine(pc) + fmt.Fprintf(s, "%s\n\t%s", fn.Name(), file) + } + default: + io.WriteString(s, path.Base(f.file())) + } + case 'd': + fmt.Fprintf(s, "%d", f.line()) + case 'n': + name := runtime.FuncForPC(f.pc()).Name() + io.WriteString(s, funcname(name)) + case 'v': + f.Format(s, 's') + io.WriteString(s, ":") + f.Format(s, 'd') + } +} + +// StackTrace is stack of Frames from innermost (newest) to outermost (oldest). +type StackTrace []Frame + +// Format formats the stack of Frames according to the fmt.Formatter interface. +// +// %s lists source files for each Frame in the stack +// %v lists the source file and line number for each Frame in the stack +// +// Format accepts flags that alter the printing of some verbs, as follows: +// +// %+v Prints filename, function, and line number for each Frame in the stack. +func (st StackTrace) Format(s fmt.State, verb rune) { + switch verb { + case 'v': + switch { + case s.Flag('+'): + for _, f := range st { + fmt.Fprintf(s, "\n%+v", f) + } + case s.Flag('#'): + fmt.Fprintf(s, "%#v", []Frame(st)) + default: + fmt.Fprintf(s, "%v", []Frame(st)) + } + case 's': + fmt.Fprintf(s, "%s", []Frame(st)) + } +} + +// stack represents a stack of program counters. +type stack []uintptr + +func (s *stack) Format(st fmt.State, verb rune) { + switch verb { + case 'v': + switch { + case st.Flag('+'): + for _, pc := range *s { + f := Frame(pc) + fmt.Fprintf(st, "\n%+v", f) + } + } + } +} + +func (s *stack) StackTrace() StackTrace { + f := make([]Frame, len(*s)) + for i := 0; i < len(f); i++ { + f[i] = Frame((*s)[i]) + } + return f +} + +func callers() *stack { + const depth = 32 + var pcs [depth]uintptr + n := runtime.Callers(3, pcs[:]) + var st stack = pcs[0:n] + return &st +} + +// funcname removes the path prefix component of a function's name reported by func.Name(). +func funcname(name string) string { + i := strings.LastIndex(name, "/") + name = name[i+1:] + i = strings.Index(name, ".") + return name[i+1:] +} diff --git a/vendor/github.com/pkg/sftp/LICENSE b/vendor/github.com/pkg/sftp/LICENSE new file mode 100644 index 00000000..b7b53921 --- /dev/null +++ b/vendor/github.com/pkg/sftp/LICENSE @@ -0,0 +1,9 @@ +Copyright (c) 2013, Dave Cheney +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/pkg/sftp/attrs.go b/vendor/github.com/pkg/sftp/attrs.go new file mode 100644 index 00000000..18412e33 --- /dev/null +++ b/vendor/github.com/pkg/sftp/attrs.go @@ -0,0 +1,241 @@ +package sftp + +// ssh_FXP_ATTRS support +// see http://tools.ietf.org/html/draft-ietf-secsh-filexfer-02#section-5 + +import ( + "os" + "syscall" + "time" +) + +const ( + ssh_FILEXFER_ATTR_SIZE = 0x00000001 + ssh_FILEXFER_ATTR_UIDGID = 0x00000002 + ssh_FILEXFER_ATTR_PERMISSIONS = 0x00000004 + ssh_FILEXFER_ATTR_ACMODTIME = 0x00000008 + ssh_FILEXFER_ATTR_EXTENDED = 0x80000000 +) + +// fileInfo is an artificial type designed to satisfy os.FileInfo. +type fileInfo struct { + name string + size int64 + mode os.FileMode + mtime time.Time + sys interface{} +} + +// Name returns the base name of the file. +func (fi *fileInfo) Name() string { return fi.name } + +// Size returns the length in bytes for regular files; system-dependent for others. +func (fi *fileInfo) Size() int64 { return fi.size } + +// Mode returns file mode bits. +func (fi *fileInfo) Mode() os.FileMode { return fi.mode } + +// ModTime returns the last modification time of the file. +func (fi *fileInfo) ModTime() time.Time { return fi.mtime } + +// IsDir returns true if the file is a directory. +func (fi *fileInfo) IsDir() bool { return fi.Mode().IsDir() } + +func (fi *fileInfo) Sys() interface{} { return fi.sys } + +// FileStat holds the original unmarshalled values from a call to READDIR or *STAT. +// It is exported for the purposes of accessing the raw values via os.FileInfo.Sys() +type FileStat struct { + Size uint64 + Mode uint32 + Mtime uint32 + Atime uint32 + UID uint32 + GID uint32 + Extended []StatExtended +} + +// StatExtended contains additional, extended information for a FileStat. +type StatExtended struct { + ExtType string + ExtData string +} + +func fileInfoFromStat(st *FileStat, name string) os.FileInfo { + fs := &fileInfo{ + name: name, + size: int64(st.Size), + mode: toFileMode(st.Mode), + mtime: time.Unix(int64(st.Mtime), 0), + sys: st, + } + return fs +} + +func fileStatFromInfo(fi os.FileInfo) (uint32, FileStat) { + mtime := fi.ModTime().Unix() + atime := mtime + var flags uint32 = ssh_FILEXFER_ATTR_SIZE | + ssh_FILEXFER_ATTR_PERMISSIONS | + ssh_FILEXFER_ATTR_ACMODTIME + + fileStat := FileStat{ + Size: uint64(fi.Size()), + Mode: fromFileMode(fi.Mode()), + Mtime: uint32(mtime), + Atime: uint32(atime), + } + + // os specific file stat decoding + fileStatFromInfoOs(fi, &flags, &fileStat) + + return flags, fileStat +} + +func unmarshalAttrs(b []byte) (*FileStat, []byte) { + flags, b := unmarshalUint32(b) + return getFileStat(flags, b) +} + +func getFileStat(flags uint32, b []byte) (*FileStat, []byte) { + var fs FileStat + if flags&ssh_FILEXFER_ATTR_SIZE == ssh_FILEXFER_ATTR_SIZE { + fs.Size, b = unmarshalUint64(b) + } + if flags&ssh_FILEXFER_ATTR_UIDGID == ssh_FILEXFER_ATTR_UIDGID { + fs.UID, b = unmarshalUint32(b) + } + if flags&ssh_FILEXFER_ATTR_UIDGID == ssh_FILEXFER_ATTR_UIDGID { + fs.GID, b = unmarshalUint32(b) + } + if flags&ssh_FILEXFER_ATTR_PERMISSIONS == ssh_FILEXFER_ATTR_PERMISSIONS { + fs.Mode, b = unmarshalUint32(b) + } + if flags&ssh_FILEXFER_ATTR_ACMODTIME == ssh_FILEXFER_ATTR_ACMODTIME { + fs.Atime, b = unmarshalUint32(b) + fs.Mtime, b = unmarshalUint32(b) + } + if flags&ssh_FILEXFER_ATTR_EXTENDED == ssh_FILEXFER_ATTR_EXTENDED { + var count uint32 + count, b = unmarshalUint32(b) + ext := make([]StatExtended, count) + for i := uint32(0); i < count; i++ { + var typ string + var data string + typ, b = unmarshalString(b) + data, b = unmarshalString(b) + ext[i] = StatExtended{typ, data} + } + fs.Extended = ext + } + return &fs, b +} + +func marshalFileInfo(b []byte, fi os.FileInfo) []byte { + // attributes variable struct, and also variable per protocol version + // spec version 3 attributes: + // uint32 flags + // uint64 size present only if flag SSH_FILEXFER_ATTR_SIZE + // uint32 uid present only if flag SSH_FILEXFER_ATTR_UIDGID + // uint32 gid present only if flag SSH_FILEXFER_ATTR_UIDGID + // uint32 permissions present only if flag SSH_FILEXFER_ATTR_PERMISSIONS + // uint32 atime present only if flag SSH_FILEXFER_ACMODTIME + // uint32 mtime present only if flag SSH_FILEXFER_ACMODTIME + // uint32 extended_count present only if flag SSH_FILEXFER_ATTR_EXTENDED + // string extended_type + // string extended_data + // ... more extended data (extended_type - extended_data pairs), + // so that number of pairs equals extended_count + + flags, fileStat := fileStatFromInfo(fi) + + b = marshalUint32(b, flags) + if flags&ssh_FILEXFER_ATTR_SIZE != 0 { + b = marshalUint64(b, fileStat.Size) + } + if flags&ssh_FILEXFER_ATTR_UIDGID != 0 { + b = marshalUint32(b, fileStat.UID) + b = marshalUint32(b, fileStat.GID) + } + if flags&ssh_FILEXFER_ATTR_PERMISSIONS != 0 { + b = marshalUint32(b, fileStat.Mode) + } + if flags&ssh_FILEXFER_ATTR_ACMODTIME != 0 { + b = marshalUint32(b, fileStat.Atime) + b = marshalUint32(b, fileStat.Mtime) + } + + return b +} + +// toFileMode converts sftp filemode bits to the os.FileMode specification +func toFileMode(mode uint32) os.FileMode { + var fm = os.FileMode(mode & 0777) + switch mode & syscall.S_IFMT { + case syscall.S_IFBLK: + fm |= os.ModeDevice + case syscall.S_IFCHR: + fm |= os.ModeDevice | os.ModeCharDevice + case syscall.S_IFDIR: + fm |= os.ModeDir + case syscall.S_IFIFO: + fm |= os.ModeNamedPipe + case syscall.S_IFLNK: + fm |= os.ModeSymlink + case syscall.S_IFREG: + // nothing to do + case syscall.S_IFSOCK: + fm |= os.ModeSocket + } + if mode&syscall.S_ISGID != 0 { + fm |= os.ModeSetgid + } + if mode&syscall.S_ISUID != 0 { + fm |= os.ModeSetuid + } + if mode&syscall.S_ISVTX != 0 { + fm |= os.ModeSticky + } + return fm +} + +// fromFileMode converts from the os.FileMode specification to sftp filemode bits +func fromFileMode(mode os.FileMode) uint32 { + ret := uint32(0) + + if mode&os.ModeDevice != 0 { + if mode&os.ModeCharDevice != 0 { + ret |= syscall.S_IFCHR + } else { + ret |= syscall.S_IFBLK + } + } + if mode&os.ModeDir != 0 { + ret |= syscall.S_IFDIR + } + if mode&os.ModeSymlink != 0 { + ret |= syscall.S_IFLNK + } + if mode&os.ModeNamedPipe != 0 { + ret |= syscall.S_IFIFO + } + if mode&os.ModeSetgid != 0 { + ret |= syscall.S_ISGID + } + if mode&os.ModeSetuid != 0 { + ret |= syscall.S_ISUID + } + if mode&os.ModeSticky != 0 { + ret |= syscall.S_ISVTX + } + if mode&os.ModeSocket != 0 { + ret |= syscall.S_IFSOCK + } + + if mode&os.ModeType == 0 { + ret |= syscall.S_IFREG + } + ret |= uint32(mode & os.ModePerm) + + return ret +} diff --git a/vendor/github.com/pkg/sftp/attrs_stubs.go b/vendor/github.com/pkg/sftp/attrs_stubs.go new file mode 100644 index 00000000..81cf3eac --- /dev/null +++ b/vendor/github.com/pkg/sftp/attrs_stubs.go @@ -0,0 +1,11 @@ +// +build !cgo,!plan9 windows android + +package sftp + +import ( + "os" +) + +func fileStatFromInfoOs(fi os.FileInfo, flags *uint32, fileStat *FileStat) { + // todo +} diff --git a/vendor/github.com/pkg/sftp/attrs_unix.go b/vendor/github.com/pkg/sftp/attrs_unix.go new file mode 100644 index 00000000..ab6ecdea --- /dev/null +++ b/vendor/github.com/pkg/sftp/attrs_unix.go @@ -0,0 +1,17 @@ +// +build darwin dragonfly freebsd !android,linux netbsd openbsd solaris +// +build cgo + +package sftp + +import ( + "os" + "syscall" +) + +func fileStatFromInfoOs(fi os.FileInfo, flags *uint32, fileStat *FileStat) { + if statt, ok := fi.Sys().(*syscall.Stat_t); ok { + *flags |= ssh_FILEXFER_ATTR_UIDGID + fileStat.UID = statt.Uid + fileStat.GID = statt.Gid + } +} diff --git a/vendor/github.com/pkg/sftp/client.go b/vendor/github.com/pkg/sftp/client.go new file mode 100644 index 00000000..c90d0b23 --- /dev/null +++ b/vendor/github.com/pkg/sftp/client.go @@ -0,0 +1,1174 @@ +package sftp + +import ( + "bytes" + "encoding/binary" + "io" + "os" + "path" + "sync/atomic" + "time" + + "github.com/kr/fs" + "github.com/pkg/errors" + "golang.org/x/crypto/ssh" +) + +// InternalInconsistency indicates the packets sent and the data queued to be +// written to the file don't match up. It is an unusual error and usually is +// caused by bad behavior server side or connection issues. The error is +// limited in scope to the call where it happened, the client object is still +// OK to use as long as the connection is still open. +var InternalInconsistency = errors.New("internal inconsistency") + +// A ClientOption is a function which applies configuration to a Client. +type ClientOption func(*Client) error + +// This is based on Openssh's max accepted size of 1<<18 - overhead +const maxMaxPacket = (1 << 18) - 1024 + +// MaxPacket sets the maximum size of the payload. The size param must be +// between 32768 (1<<15) and 261120 ((1 << 18) - 1024). The minimum size is +// given by the RFC, while the maximum size is a de-facto standard based on +// Openssh's SFTP server which won't accept packets much larger than that. +// +// Note if you aren't using Openssh's sftp server and get the error "failed to +// send packet header: EOF" when copying a large file try lowering this number. +func MaxPacket(size int) ClientOption { + return func(c *Client) error { + if size < 1<<15 { + return errors.Errorf("size must be greater or equal to 32k") + } + if size > maxMaxPacket { + return errors.Errorf("max packet size is too large (see docs)") + } + c.maxPacket = size + return nil + } +} + +// NewClient creates a new SFTP client on conn, using zero or more option +// functions. +func NewClient(conn *ssh.Client, opts ...ClientOption) (*Client, error) { + s, err := conn.NewSession() + if err != nil { + return nil, err + } + if err := s.RequestSubsystem("sftp"); err != nil { + return nil, err + } + pw, err := s.StdinPipe() + if err != nil { + return nil, err + } + pr, err := s.StdoutPipe() + if err != nil { + return nil, err + } + + return NewClientPipe(pr, pw, opts...) +} + +// NewClientPipe creates a new SFTP client given a Reader and a WriteCloser. +// This can be used for connecting to an SFTP server over TCP/TLS or by using +// the system's ssh client program (e.g. via exec.Command). +func NewClientPipe(rd io.Reader, wr io.WriteCloser, opts ...ClientOption) (*Client, error) { + sftp := &Client{ + clientConn: clientConn{ + conn: conn{ + Reader: rd, + WriteCloser: wr, + }, + inflight: make(map[uint32]chan<- result), + }, + maxPacket: 1 << 15, + } + if err := sftp.applyOptions(opts...); err != nil { + wr.Close() + return nil, err + } + if err := sftp.sendInit(); err != nil { + wr.Close() + return nil, err + } + if err := sftp.recvVersion(); err != nil { + wr.Close() + return nil, err + } + sftp.clientConn.wg.Add(1) + go sftp.loop() + return sftp, nil +} + +// Client represents an SFTP session on a *ssh.ClientConn SSH connection. +// Multiple Clients can be active on a single SSH connection, and a Client +// may be called concurrently from multiple Goroutines. +// +// Client implements the github.com/kr/fs.FileSystem interface. +type Client struct { + clientConn + + maxPacket int // max packet size read or written. + nextid uint32 +} + +// Create creates the named file mode 0666 (before umask), truncating it if it +// already exists. If successful, methods on the returned File can be used for +// I/O; the associated file descriptor has mode O_RDWR. If you need more +// control over the flags/mode used to open the file see client.OpenFile. +func (c *Client) Create(path string) (*File, error) { + return c.open(path, flags(os.O_RDWR|os.O_CREATE|os.O_TRUNC)) +} + +const sftpProtocolVersion = 3 // http://tools.ietf.org/html/draft-ietf-secsh-filexfer-02 + +func (c *Client) sendInit() error { + return c.clientConn.conn.sendPacket(sshFxInitPacket{ + Version: sftpProtocolVersion, // http://tools.ietf.org/html/draft-ietf-secsh-filexfer-02 + }) +} + +// returns the next value of c.nextid +func (c *Client) nextID() uint32 { + return atomic.AddUint32(&c.nextid, 1) +} + +func (c *Client) recvVersion() error { + typ, data, err := c.recvPacket() + if err != nil { + return err + } + if typ != ssh_FXP_VERSION { + return &unexpectedPacketErr{ssh_FXP_VERSION, typ} + } + + version, _ := unmarshalUint32(data) + if version != sftpProtocolVersion { + return &unexpectedVersionErr{sftpProtocolVersion, version} + } + + return nil +} + +// Walk returns a new Walker rooted at root. +func (c *Client) Walk(root string) *fs.Walker { + return fs.WalkFS(root, c) +} + +// ReadDir reads the directory named by dirname and returns a list of +// directory entries. +func (c *Client) ReadDir(p string) ([]os.FileInfo, error) { + handle, err := c.opendir(p) + if err != nil { + return nil, err + } + defer c.close(handle) // this has to defer earlier than the lock below + var attrs []os.FileInfo + var done = false + for !done { + id := c.nextID() + typ, data, err1 := c.sendPacket(sshFxpReaddirPacket{ + ID: id, + Handle: handle, + }) + if err1 != nil { + err = err1 + done = true + break + } + switch typ { + case ssh_FXP_NAME: + sid, data := unmarshalUint32(data) + if sid != id { + return nil, &unexpectedIDErr{id, sid} + } + count, data := unmarshalUint32(data) + for i := uint32(0); i < count; i++ { + var filename string + filename, data = unmarshalString(data) + _, data = unmarshalString(data) // discard longname + var attr *FileStat + attr, data = unmarshalAttrs(data) + if filename == "." || filename == ".." { + continue + } + attrs = append(attrs, fileInfoFromStat(attr, path.Base(filename))) + } + case ssh_FXP_STATUS: + // TODO(dfc) scope warning! + err = normaliseError(unmarshalStatus(id, data)) + done = true + default: + return nil, unimplementedPacketErr(typ) + } + } + if err == io.EOF { + err = nil + } + return attrs, err +} + +func (c *Client) opendir(path string) (string, error) { + id := c.nextID() + typ, data, err := c.sendPacket(sshFxpOpendirPacket{ + ID: id, + Path: path, + }) + if err != nil { + return "", err + } + switch typ { + case ssh_FXP_HANDLE: + sid, data := unmarshalUint32(data) + if sid != id { + return "", &unexpectedIDErr{id, sid} + } + handle, _ := unmarshalString(data) + return handle, nil + case ssh_FXP_STATUS: + return "", normaliseError(unmarshalStatus(id, data)) + default: + return "", unimplementedPacketErr(typ) + } +} + +// Stat returns a FileInfo structure describing the file specified by path 'p'. +// If 'p' is a symbolic link, the returned FileInfo structure describes the referent file. +func (c *Client) Stat(p string) (os.FileInfo, error) { + id := c.nextID() + typ, data, err := c.sendPacket(sshFxpStatPacket{ + ID: id, + Path: p, + }) + if err != nil { + return nil, err + } + switch typ { + case ssh_FXP_ATTRS: + sid, data := unmarshalUint32(data) + if sid != id { + return nil, &unexpectedIDErr{id, sid} + } + attr, _ := unmarshalAttrs(data) + return fileInfoFromStat(attr, path.Base(p)), nil + case ssh_FXP_STATUS: + return nil, normaliseError(unmarshalStatus(id, data)) + default: + return nil, unimplementedPacketErr(typ) + } +} + +// Lstat returns a FileInfo structure describing the file specified by path 'p'. +// If 'p' is a symbolic link, the returned FileInfo structure describes the symbolic link. +func (c *Client) Lstat(p string) (os.FileInfo, error) { + id := c.nextID() + typ, data, err := c.sendPacket(sshFxpLstatPacket{ + ID: id, + Path: p, + }) + if err != nil { + return nil, err + } + switch typ { + case ssh_FXP_ATTRS: + sid, data := unmarshalUint32(data) + if sid != id { + return nil, &unexpectedIDErr{id, sid} + } + attr, _ := unmarshalAttrs(data) + return fileInfoFromStat(attr, path.Base(p)), nil + case ssh_FXP_STATUS: + return nil, normaliseError(unmarshalStatus(id, data)) + default: + return nil, unimplementedPacketErr(typ) + } +} + +// ReadLink reads the target of a symbolic link. +func (c *Client) ReadLink(p string) (string, error) { + id := c.nextID() + typ, data, err := c.sendPacket(sshFxpReadlinkPacket{ + ID: id, + Path: p, + }) + if err != nil { + return "", err + } + switch typ { + case ssh_FXP_NAME: + sid, data := unmarshalUint32(data) + if sid != id { + return "", &unexpectedIDErr{id, sid} + } + count, data := unmarshalUint32(data) + if count != 1 { + return "", unexpectedCount(1, count) + } + filename, _ := unmarshalString(data) // ignore dummy attributes + return filename, nil + case ssh_FXP_STATUS: + return "", normaliseError(unmarshalStatus(id, data)) + default: + return "", unimplementedPacketErr(typ) + } +} + +// Symlink creates a symbolic link at 'newname', pointing at target 'oldname' +func (c *Client) Symlink(oldname, newname string) error { + id := c.nextID() + typ, data, err := c.sendPacket(sshFxpSymlinkPacket{ + ID: id, + Linkpath: newname, + Targetpath: oldname, + }) + if err != nil { + return err + } + switch typ { + case ssh_FXP_STATUS: + return normaliseError(unmarshalStatus(id, data)) + default: + return unimplementedPacketErr(typ) + } +} + +// setstat is a convience wrapper to allow for changing of various parts of the file descriptor. +func (c *Client) setstat(path string, flags uint32, attrs interface{}) error { + id := c.nextID() + typ, data, err := c.sendPacket(sshFxpSetstatPacket{ + ID: id, + Path: path, + Flags: flags, + Attrs: attrs, + }) + if err != nil { + return err + } + switch typ { + case ssh_FXP_STATUS: + return normaliseError(unmarshalStatus(id, data)) + default: + return unimplementedPacketErr(typ) + } +} + +// Chtimes changes the access and modification times of the named file. +func (c *Client) Chtimes(path string, atime time.Time, mtime time.Time) error { + type times struct { + Atime uint32 + Mtime uint32 + } + attrs := times{uint32(atime.Unix()), uint32(mtime.Unix())} + return c.setstat(path, ssh_FILEXFER_ATTR_ACMODTIME, attrs) +} + +// Chown changes the user and group owners of the named file. +func (c *Client) Chown(path string, uid, gid int) error { + type owner struct { + UID uint32 + GID uint32 + } + attrs := owner{uint32(uid), uint32(gid)} + return c.setstat(path, ssh_FILEXFER_ATTR_UIDGID, attrs) +} + +// Chmod changes the permissions of the named file. +func (c *Client) Chmod(path string, mode os.FileMode) error { + return c.setstat(path, ssh_FILEXFER_ATTR_PERMISSIONS, uint32(mode)) +} + +// Truncate sets the size of the named file. Although it may be safely assumed +// that if the size is less than its current size it will be truncated to fit, +// the SFTP protocol does not specify what behavior the server should do when setting +// size greater than the current size. +func (c *Client) Truncate(path string, size int64) error { + return c.setstat(path, ssh_FILEXFER_ATTR_SIZE, uint64(size)) +} + +// Open opens the named file for reading. If successful, methods on the +// returned file can be used for reading; the associated file descriptor +// has mode O_RDONLY. +func (c *Client) Open(path string) (*File, error) { + return c.open(path, flags(os.O_RDONLY)) +} + +// OpenFile is the generalized open call; most users will use Open or +// Create instead. It opens the named file with specified flag (O_RDONLY +// etc.). If successful, methods on the returned File can be used for I/O. +func (c *Client) OpenFile(path string, f int) (*File, error) { + return c.open(path, flags(f)) +} + +func (c *Client) open(path string, pflags uint32) (*File, error) { + id := c.nextID() + typ, data, err := c.sendPacket(sshFxpOpenPacket{ + ID: id, + Path: path, + Pflags: pflags, + }) + if err != nil { + return nil, err + } + switch typ { + case ssh_FXP_HANDLE: + sid, data := unmarshalUint32(data) + if sid != id { + return nil, &unexpectedIDErr{id, sid} + } + handle, _ := unmarshalString(data) + return &File{c: c, path: path, handle: handle}, nil + case ssh_FXP_STATUS: + return nil, normaliseError(unmarshalStatus(id, data)) + default: + return nil, unimplementedPacketErr(typ) + } +} + +// close closes a handle handle previously returned in the response +// to SSH_FXP_OPEN or SSH_FXP_OPENDIR. The handle becomes invalid +// immediately after this request has been sent. +func (c *Client) close(handle string) error { + id := c.nextID() + typ, data, err := c.sendPacket(sshFxpClosePacket{ + ID: id, + Handle: handle, + }) + if err != nil { + return err + } + switch typ { + case ssh_FXP_STATUS: + return normaliseError(unmarshalStatus(id, data)) + default: + return unimplementedPacketErr(typ) + } +} + +func (c *Client) fstat(handle string) (*FileStat, error) { + id := c.nextID() + typ, data, err := c.sendPacket(sshFxpFstatPacket{ + ID: id, + Handle: handle, + }) + if err != nil { + return nil, err + } + switch typ { + case ssh_FXP_ATTRS: + sid, data := unmarshalUint32(data) + if sid != id { + return nil, &unexpectedIDErr{id, sid} + } + attr, _ := unmarshalAttrs(data) + return attr, nil + case ssh_FXP_STATUS: + return nil, normaliseError(unmarshalStatus(id, data)) + default: + return nil, unimplementedPacketErr(typ) + } +} + +// StatVFS retrieves VFS statistics from a remote host. +// +// It implements the statvfs@openssh.com SSH_FXP_EXTENDED feature +// from http://www.opensource.apple.com/source/OpenSSH/OpenSSH-175/openssh/PROTOCOL?txt. +func (c *Client) StatVFS(path string) (*StatVFS, error) { + // send the StatVFS packet to the server + id := c.nextID() + typ, data, err := c.sendPacket(sshFxpStatvfsPacket{ + ID: id, + Path: path, + }) + if err != nil { + return nil, err + } + + switch typ { + // server responded with valid data + case ssh_FXP_EXTENDED_REPLY: + var response StatVFS + err = binary.Read(bytes.NewReader(data), binary.BigEndian, &response) + if err != nil { + return nil, errors.New("can not parse reply") + } + + return &response, nil + + // the resquest failed + case ssh_FXP_STATUS: + return nil, errors.New(fxp(ssh_FXP_STATUS).String()) + + default: + return nil, unimplementedPacketErr(typ) + } +} + +// Join joins any number of path elements into a single path, adding a +// separating slash if necessary. The result is Cleaned; in particular, all +// empty strings are ignored. +func (c *Client) Join(elem ...string) string { return path.Join(elem...) } + +// Remove removes the specified file or directory. An error will be returned if no +// file or directory with the specified path exists, or if the specified directory +// is not empty. +func (c *Client) Remove(path string) error { + err := c.removeFile(path) + if err, ok := err.(*StatusError); ok { + switch err.Code { + // some servers, *cough* osx *cough*, return EPERM, not ENODIR. + // serv-u returns ssh_FX_FILE_IS_A_DIRECTORY + case ssh_FX_PERMISSION_DENIED, ssh_FX_FAILURE, ssh_FX_FILE_IS_A_DIRECTORY: + return c.RemoveDirectory(path) + } + } + return err +} + +func (c *Client) removeFile(path string) error { + id := c.nextID() + typ, data, err := c.sendPacket(sshFxpRemovePacket{ + ID: id, + Filename: path, + }) + if err != nil { + return err + } + switch typ { + case ssh_FXP_STATUS: + return normaliseError(unmarshalStatus(id, data)) + default: + return unimplementedPacketErr(typ) + } +} + +// RemoveDirectory removes a directory path. +func (c *Client) RemoveDirectory(path string) error { + id := c.nextID() + typ, data, err := c.sendPacket(sshFxpRmdirPacket{ + ID: id, + Path: path, + }) + if err != nil { + return err + } + switch typ { + case ssh_FXP_STATUS: + return normaliseError(unmarshalStatus(id, data)) + default: + return unimplementedPacketErr(typ) + } +} + +// Rename renames a file. +func (c *Client) Rename(oldname, newname string) error { + id := c.nextID() + typ, data, err := c.sendPacket(sshFxpRenamePacket{ + ID: id, + Oldpath: oldname, + Newpath: newname, + }) + if err != nil { + return err + } + switch typ { + case ssh_FXP_STATUS: + return normaliseError(unmarshalStatus(id, data)) + default: + return unimplementedPacketErr(typ) + } +} + +// PosixRename renames a file using the posix-rename@openssh.com extension +// which will replace newname if it already exists. +func (c *Client) PosixRename(oldname, newname string) error { + id := c.nextID() + typ, data, err := c.sendPacket(sshFxpPosixRenamePacket{ + ID: id, + Oldpath: oldname, + Newpath: newname, + }) + if err != nil { + return err + } + switch typ { + case ssh_FXP_STATUS: + return normaliseError(unmarshalStatus(id, data)) + default: + return unimplementedPacketErr(typ) + } +} + +func (c *Client) realpath(path string) (string, error) { + id := c.nextID() + typ, data, err := c.sendPacket(sshFxpRealpathPacket{ + ID: id, + Path: path, + }) + if err != nil { + return "", err + } + switch typ { + case ssh_FXP_NAME: + sid, data := unmarshalUint32(data) + if sid != id { + return "", &unexpectedIDErr{id, sid} + } + count, data := unmarshalUint32(data) + if count != 1 { + return "", unexpectedCount(1, count) + } + filename, _ := unmarshalString(data) // ignore attributes + return filename, nil + case ssh_FXP_STATUS: + return "", normaliseError(unmarshalStatus(id, data)) + default: + return "", unimplementedPacketErr(typ) + } +} + +// Getwd returns the current working directory of the server. Operations +// involving relative paths will be based at this location. +func (c *Client) Getwd() (string, error) { + return c.realpath(".") +} + +// Mkdir creates the specified directory. An error will be returned if a file or +// directory with the specified path already exists, or if the directory's +// parent folder does not exist (the method cannot create complete paths). +func (c *Client) Mkdir(path string) error { + id := c.nextID() + typ, data, err := c.sendPacket(sshFxpMkdirPacket{ + ID: id, + Path: path, + }) + if err != nil { + return err + } + switch typ { + case ssh_FXP_STATUS: + return normaliseError(unmarshalStatus(id, data)) + default: + return unimplementedPacketErr(typ) + } +} + +// applyOptions applies options functions to the Client. +// If an error is encountered, option processing ceases. +func (c *Client) applyOptions(opts ...ClientOption) error { + for _, f := range opts { + if err := f(c); err != nil { + return err + } + } + return nil +} + +// File represents a remote file. +type File struct { + c *Client + path string + handle string + offset uint64 // current offset within remote file +} + +// Close closes the File, rendering it unusable for I/O. It returns an +// error, if any. +func (f *File) Close() error { + return f.c.close(f.handle) +} + +// Name returns the name of the file as presented to Open or Create. +func (f *File) Name() string { + return f.path +} + +const maxConcurrentRequests = 64 + +// Read reads up to len(b) bytes from the File. It returns the number of bytes +// read and an error, if any. Read follows io.Reader semantics, so when Read +// encounters an error or EOF condition after successfully reading n > 0 bytes, +// it returns the number of bytes read. +func (f *File) Read(b []byte) (int, error) { + // Split the read into multiple maxPacket sized concurrent reads + // bounded by maxConcurrentRequests. This allows reads with a suitably + // large buffer to transfer data at a much faster rate due to + // overlapping round trip times. + inFlight := 0 + desiredInFlight := 1 + offset := f.offset + // maxConcurrentRequests buffer to deal with broadcastErr() floods + // also must have a buffer of max value of (desiredInFlight - inFlight) + ch := make(chan result, maxConcurrentRequests) + type inflightRead struct { + b []byte + offset uint64 + } + reqs := map[uint32]inflightRead{} + type offsetErr struct { + offset uint64 + err error + } + var firstErr offsetErr + + sendReq := func(b []byte, offset uint64) { + reqID := f.c.nextID() + f.c.dispatchRequest(ch, sshFxpReadPacket{ + ID: reqID, + Handle: f.handle, + Offset: offset, + Len: uint32(len(b)), + }) + inFlight++ + reqs[reqID] = inflightRead{b: b, offset: offset} + } + + var read int + for len(b) > 0 || inFlight > 0 { + for inFlight < desiredInFlight && len(b) > 0 && firstErr.err == nil { + l := min(len(b), f.c.maxPacket) + rb := b[:l] + sendReq(rb, offset) + offset += uint64(l) + b = b[l:] + } + + if inFlight == 0 { + break + } + res := <-ch + inFlight-- + if res.err != nil { + firstErr = offsetErr{offset: 0, err: res.err} + continue + } + reqID, data := unmarshalUint32(res.data) + req, ok := reqs[reqID] + if !ok { + firstErr = offsetErr{offset: 0, err: errors.Errorf("sid: %v not found", reqID)} + continue + } + delete(reqs, reqID) + switch res.typ { + case ssh_FXP_STATUS: + if firstErr.err == nil || req.offset < firstErr.offset { + firstErr = offsetErr{ + offset: req.offset, + err: normaliseError(unmarshalStatus(reqID, res.data)), + } + } + case ssh_FXP_DATA: + l, data := unmarshalUint32(data) + n := copy(req.b, data[:l]) + read += n + if n < len(req.b) { + sendReq(req.b[l:], req.offset+uint64(l)) + } + if desiredInFlight < maxConcurrentRequests { + desiredInFlight++ + } + default: + firstErr = offsetErr{offset: 0, err: unimplementedPacketErr(res.typ)} + } + } + // If the error is anything other than EOF, then there + // may be gaps in the data copied to the buffer so it's + // best to return 0 so the caller can't make any + // incorrect assumptions about the state of the buffer. + if firstErr.err != nil && firstErr.err != io.EOF { + read = 0 + } + f.offset += uint64(read) + return read, firstErr.err +} + +// WriteTo writes the file to w. The return value is the number of bytes +// written. Any error encountered during the write is also returned. +func (f *File) WriteTo(w io.Writer) (int64, error) { + fi, err := f.Stat() + if err != nil { + return 0, err + } + inFlight := 0 + desiredInFlight := 1 + offset := f.offset + writeOffset := offset + fileSize := uint64(fi.Size()) + // see comment on same line in Read() above + ch := make(chan result, maxConcurrentRequests) + type inflightRead struct { + b []byte + offset uint64 + } + reqs := map[uint32]inflightRead{} + pendingWrites := map[uint64][]byte{} + type offsetErr struct { + offset uint64 + err error + } + var firstErr offsetErr + + sendReq := func(b []byte, offset uint64) { + reqID := f.c.nextID() + f.c.dispatchRequest(ch, sshFxpReadPacket{ + ID: reqID, + Handle: f.handle, + Offset: offset, + Len: uint32(len(b)), + }) + inFlight++ + reqs[reqID] = inflightRead{b: b, offset: offset} + } + + var copied int64 + for firstErr.err == nil || inFlight > 0 { + if firstErr.err == nil { + for inFlight+len(pendingWrites) < desiredInFlight { + b := make([]byte, f.c.maxPacket) + sendReq(b, offset) + offset += uint64(f.c.maxPacket) + if offset > fileSize { + desiredInFlight = 1 + } + } + } + + if inFlight == 0 { + if firstErr.err == nil && len(pendingWrites) > 0 { + return copied, InternalInconsistency + } + break + } + res := <-ch + inFlight-- + if res.err != nil { + firstErr = offsetErr{offset: 0, err: res.err} + continue + } + reqID, data := unmarshalUint32(res.data) + req, ok := reqs[reqID] + if !ok { + firstErr = offsetErr{offset: 0, err: errors.Errorf("sid: %v not found", reqID)} + continue + } + delete(reqs, reqID) + switch res.typ { + case ssh_FXP_STATUS: + if firstErr.err == nil || req.offset < firstErr.offset { + firstErr = offsetErr{offset: req.offset, err: normaliseError(unmarshalStatus(reqID, res.data))} + } + case ssh_FXP_DATA: + l, data := unmarshalUint32(data) + if req.offset == writeOffset { + nbytes, err := w.Write(data) + copied += int64(nbytes) + if err != nil { + // We will never receive another DATA with offset==writeOffset, so + // the loop will drain inFlight and then exit. + firstErr = offsetErr{offset: req.offset + uint64(nbytes), err: err} + break + } + if nbytes < int(l) { + firstErr = offsetErr{offset: req.offset + uint64(nbytes), err: io.ErrShortWrite} + break + } + switch { + case offset > fileSize: + desiredInFlight = 1 + case desiredInFlight < maxConcurrentRequests: + desiredInFlight++ + } + writeOffset += uint64(nbytes) + for { + pendingData, ok := pendingWrites[writeOffset] + if !ok { + break + } + // Give go a chance to free the memory. + delete(pendingWrites, writeOffset) + nbytes, err := w.Write(pendingData) + // Do not move writeOffset on error so subsequent iterations won't trigger + // any writes. + if err != nil { + firstErr = offsetErr{offset: writeOffset + uint64(nbytes), err: err} + break + } + if nbytes < len(pendingData) { + firstErr = offsetErr{offset: writeOffset + uint64(nbytes), err: io.ErrShortWrite} + break + } + writeOffset += uint64(nbytes) + } + } else { + // Don't write the data yet because + // this response came in out of order + // and we need to wait for responses + // for earlier segments of the file. + pendingWrites[req.offset] = data + } + default: + firstErr = offsetErr{offset: 0, err: unimplementedPacketErr(res.typ)} + } + } + if firstErr.err != io.EOF { + return copied, firstErr.err + } + return copied, nil +} + +// Stat returns the FileInfo structure describing file. If there is an +// error. +func (f *File) Stat() (os.FileInfo, error) { + fs, err := f.c.fstat(f.handle) + if err != nil { + return nil, err + } + return fileInfoFromStat(fs, path.Base(f.path)), nil +} + +// Write writes len(b) bytes to the File. It returns the number of bytes +// written and an error, if any. Write returns a non-nil error when n != +// len(b). +func (f *File) Write(b []byte) (int, error) { + // Split the write into multiple maxPacket sized concurrent writes + // bounded by maxConcurrentRequests. This allows writes with a suitably + // large buffer to transfer data at a much faster rate due to + // overlapping round trip times. + inFlight := 0 + desiredInFlight := 1 + offset := f.offset + // see comment on same line in Read() above + ch := make(chan result, maxConcurrentRequests) + var firstErr error + written := len(b) + for len(b) > 0 || inFlight > 0 { + for inFlight < desiredInFlight && len(b) > 0 && firstErr == nil { + l := min(len(b), f.c.maxPacket) + rb := b[:l] + f.c.dispatchRequest(ch, sshFxpWritePacket{ + ID: f.c.nextID(), + Handle: f.handle, + Offset: offset, + Length: uint32(len(rb)), + Data: rb, + }) + inFlight++ + offset += uint64(l) + b = b[l:] + } + + if inFlight == 0 { + break + } + res := <-ch + inFlight-- + if res.err != nil { + firstErr = res.err + continue + } + switch res.typ { + case ssh_FXP_STATUS: + id, _ := unmarshalUint32(res.data) + err := normaliseError(unmarshalStatus(id, res.data)) + if err != nil && firstErr == nil { + firstErr = err + break + } + if desiredInFlight < maxConcurrentRequests { + desiredInFlight++ + } + default: + firstErr = unimplementedPacketErr(res.typ) + } + } + // If error is non-nil, then there may be gaps in the data written to + // the file so it's best to return 0 so the caller can't make any + // incorrect assumptions about the state of the file. + if firstErr != nil { + written = 0 + } + f.offset += uint64(written) + return written, firstErr +} + +// ReadFrom reads data from r until EOF and writes it to the file. The return +// value is the number of bytes read. Any error except io.EOF encountered +// during the read is also returned. +func (f *File) ReadFrom(r io.Reader) (int64, error) { + inFlight := 0 + desiredInFlight := 1 + offset := f.offset + // see comment on same line in Read() above + ch := make(chan result, maxConcurrentRequests) + var firstErr error + read := int64(0) + b := make([]byte, f.c.maxPacket) + for inFlight > 0 || firstErr == nil { + for inFlight < desiredInFlight && firstErr == nil { + n, err := r.Read(b) + if err != nil { + firstErr = err + } + f.c.dispatchRequest(ch, sshFxpWritePacket{ + ID: f.c.nextID(), + Handle: f.handle, + Offset: offset, + Length: uint32(n), + Data: b[:n], + }) + inFlight++ + offset += uint64(n) + read += int64(n) + } + + if inFlight == 0 { + break + } + res := <-ch + inFlight-- + if res.err != nil { + firstErr = res.err + continue + } + switch res.typ { + case ssh_FXP_STATUS: + id, _ := unmarshalUint32(res.data) + err := normaliseError(unmarshalStatus(id, res.data)) + if err != nil && firstErr == nil { + firstErr = err + break + } + if desiredInFlight < maxConcurrentRequests { + desiredInFlight++ + } + default: + firstErr = unimplementedPacketErr(res.typ) + } + } + if firstErr == io.EOF { + firstErr = nil + } + // If error is non-nil, then there may be gaps in the data written to + // the file so it's best to return 0 so the caller can't make any + // incorrect assumptions about the state of the file. + if firstErr != nil { + read = 0 + } + f.offset += uint64(read) + return read, firstErr +} + +// Seek implements io.Seeker by setting the client offset for the next Read or +// Write. It returns the next offset read. Seeking before or after the end of +// the file is undefined. Seeking relative to the end calls Stat. +func (f *File) Seek(offset int64, whence int) (int64, error) { + switch whence { + case io.SeekStart: + f.offset = uint64(offset) + case io.SeekCurrent: + f.offset = uint64(int64(f.offset) + offset) + case io.SeekEnd: + fi, err := f.Stat() + if err != nil { + return int64(f.offset), err + } + f.offset = uint64(fi.Size() + offset) + default: + return int64(f.offset), unimplementedSeekWhence(whence) + } + return int64(f.offset), nil +} + +// Chown changes the uid/gid of the current file. +func (f *File) Chown(uid, gid int) error { + return f.c.Chown(f.path, uid, gid) +} + +// Chmod changes the permissions of the current file. +func (f *File) Chmod(mode os.FileMode) error { + return f.c.Chmod(f.path, mode) +} + +// Truncate sets the size of the current file. Although it may be safely assumed +// that if the size is less than its current size it will be truncated to fit, +// the SFTP protocol does not specify what behavior the server should do when setting +// size greater than the current size. +func (f *File) Truncate(size int64) error { + return f.c.Truncate(f.path, size) +} + +func min(a, b int) int { + if a > b { + return b + } + return a +} + +// normaliseError normalises an error into a more standard form that can be +// checked against stdlib errors like io.EOF or os.ErrNotExist. +func normaliseError(err error) error { + switch err := err.(type) { + case *StatusError: + switch err.Code { + case ssh_FX_EOF: + return io.EOF + case ssh_FX_NO_SUCH_FILE: + return os.ErrNotExist + case ssh_FX_OK: + return nil + default: + return err + } + default: + return err + } +} + +func unmarshalStatus(id uint32, data []byte) error { + sid, data := unmarshalUint32(data) + if sid != id { + return &unexpectedIDErr{id, sid} + } + code, data := unmarshalUint32(data) + msg, data, _ := unmarshalStringSafe(data) + lang, _, _ := unmarshalStringSafe(data) + return &StatusError{ + Code: code, + msg: msg, + lang: lang, + } +} + +func marshalStatus(b []byte, err StatusError) []byte { + b = marshalUint32(b, err.Code) + b = marshalString(b, err.msg) + b = marshalString(b, err.lang) + return b +} + +// flags converts the flags passed to OpenFile into ssh flags. +// Unsupported flags are ignored. +func flags(f int) uint32 { + var out uint32 + switch f & os.O_WRONLY { + case os.O_WRONLY: + out |= ssh_FXF_WRITE + case os.O_RDONLY: + out |= ssh_FXF_READ + } + if f&os.O_RDWR == os.O_RDWR { + out |= ssh_FXF_READ | ssh_FXF_WRITE + } + if f&os.O_APPEND == os.O_APPEND { + out |= ssh_FXF_APPEND + } + if f&os.O_CREATE == os.O_CREATE { + out |= ssh_FXF_CREAT + } + if f&os.O_TRUNC == os.O_TRUNC { + out |= ssh_FXF_TRUNC + } + if f&os.O_EXCL == os.O_EXCL { + out |= ssh_FXF_EXCL + } + return out +} diff --git a/vendor/github.com/pkg/sftp/conn.go b/vendor/github.com/pkg/sftp/conn.go new file mode 100644 index 00000000..f799715e --- /dev/null +++ b/vendor/github.com/pkg/sftp/conn.go @@ -0,0 +1,133 @@ +package sftp + +import ( + "encoding" + "io" + "sync" + + "github.com/pkg/errors" +) + +// conn implements a bidirectional channel on which client and server +// connections are multiplexed. +type conn struct { + io.Reader + io.WriteCloser + sync.Mutex // used to serialise writes to sendPacket + // sendPacketTest is needed to replicate packet issues in testing + sendPacketTest func(w io.Writer, m encoding.BinaryMarshaler) error +} + +func (c *conn) recvPacket() (uint8, []byte, error) { + return recvPacket(c) +} + +func (c *conn) sendPacket(m encoding.BinaryMarshaler) error { + c.Lock() + defer c.Unlock() + if c.sendPacketTest != nil { + return c.sendPacketTest(c, m) + } + return sendPacket(c, m) +} + +type clientConn struct { + conn + wg sync.WaitGroup + sync.Mutex // protects inflight + inflight map[uint32]chan<- result // outstanding requests +} + +// Close closes the SFTP session. +func (c *clientConn) Close() error { + defer c.wg.Wait() + return c.conn.Close() +} + +func (c *clientConn) loop() { + defer c.wg.Done() + err := c.recv() + if err != nil { + c.broadcastErr(err) + } +} + +// recv continuously reads from the server and forwards responses to the +// appropriate channel. +func (c *clientConn) recv() error { + defer func() { + c.conn.Lock() + c.conn.Close() + c.conn.Unlock() + }() + for { + typ, data, err := c.recvPacket() + if err != nil { + return err + } + sid, _ := unmarshalUint32(data) + c.Lock() + ch, ok := c.inflight[sid] + delete(c.inflight, sid) + c.Unlock() + if !ok { + // This is an unexpected occurrence. Send the error + // back to all listeners so that they terminate + // gracefully. + return errors.Errorf("sid: %v not fond", sid) + } + ch <- result{typ: typ, data: data} + } +} + +// result captures the result of receiving the a packet from the server +type result struct { + typ byte + data []byte + err error +} + +type idmarshaler interface { + id() uint32 + encoding.BinaryMarshaler +} + +func (c *clientConn) sendPacket(p idmarshaler) (byte, []byte, error) { + ch := make(chan result, 2) + c.dispatchRequest(ch, p) + s := <-ch + return s.typ, s.data, s.err +} + +func (c *clientConn) dispatchRequest(ch chan<- result, p idmarshaler) { + c.Lock() + c.inflight[p.id()] = ch + c.Unlock() + if err := c.conn.sendPacket(p); err != nil { + c.Lock() + delete(c.inflight, p.id()) + c.Unlock() + ch <- result{err: err} + } +} + +// broadcastErr sends an error to all goroutines waiting for a response. +func (c *clientConn) broadcastErr(err error) { + c.Lock() + listeners := make([]chan<- result, 0, len(c.inflight)) + for _, ch := range c.inflight { + listeners = append(listeners, ch) + } + c.Unlock() + for _, ch := range listeners { + ch <- result{err: err} + } +} + +type serverConn struct { + conn +} + +func (s *serverConn) sendError(p ider, err error) error { + return s.sendPacket(statusFromError(p, err)) +} diff --git a/vendor/github.com/pkg/sftp/debug.go b/vendor/github.com/pkg/sftp/debug.go new file mode 100644 index 00000000..3e264abe --- /dev/null +++ b/vendor/github.com/pkg/sftp/debug.go @@ -0,0 +1,9 @@ +// +build debug + +package sftp + +import "log" + +func debug(fmt string, args ...interface{}) { + log.Printf(fmt, args...) +} diff --git a/vendor/github.com/pkg/sftp/examples/buffered-read-benchmark/main.go b/vendor/github.com/pkg/sftp/examples/buffered-read-benchmark/main.go new file mode 100644 index 00000000..36ac6d72 --- /dev/null +++ b/vendor/github.com/pkg/sftp/examples/buffered-read-benchmark/main.go @@ -0,0 +1,78 @@ +// buffered-read-benchmark benchmarks the peformance of reading +// from /dev/zero on the server to a []byte on the client via io.Copy. +package main + +import ( + "flag" + "fmt" + "io" + "log" + "net" + "os" + "time" + + "golang.org/x/crypto/ssh" + "golang.org/x/crypto/ssh/agent" + + "github.com/pkg/sftp" +) + +var ( + USER = flag.String("user", os.Getenv("USER"), "ssh username") + HOST = flag.String("host", "localhost", "ssh server hostname") + PORT = flag.Int("port", 22, "ssh server port") + PASS = flag.String("pass", os.Getenv("SOCKSIE_SSH_PASSWORD"), "ssh password") + SIZE = flag.Int("s", 1<<15, "set max packet size") +) + +func init() { + flag.Parse() +} + +func main() { + var auths []ssh.AuthMethod + if aconn, err := net.Dial("unix", os.Getenv("SSH_AUTH_SOCK")); err == nil { + auths = append(auths, ssh.PublicKeysCallback(agent.NewClient(aconn).Signers)) + + } + if *PASS != "" { + auths = append(auths, ssh.Password(*PASS)) + } + + config := ssh.ClientConfig{ + User: *USER, + Auth: auths, + HostKeyCallback: ssh.InsecureIgnoreHostKey(), + } + addr := fmt.Sprintf("%s:%d", *HOST, *PORT) + conn, err := ssh.Dial("tcp", addr, &config) + if err != nil { + log.Fatalf("unable to connect to [%s]: %v", addr, err) + } + defer conn.Close() + + c, err := sftp.NewClient(conn, sftp.MaxPacket(*SIZE)) + if err != nil { + log.Fatalf("unable to start sftp subsytem: %v", err) + } + defer c.Close() + + r, err := c.Open("/dev/zero") + if err != nil { + log.Fatal(err) + } + defer r.Close() + + const size = 1e9 + + log.Printf("reading %v bytes", size) + t1 := time.Now() + n, err := io.ReadFull(r, make([]byte, size)) + if err != nil { + log.Fatal(err) + } + if n != size { + log.Fatalf("copy: expected %v bytes, got %d", size, n) + } + log.Printf("read %v bytes in %s", size, time.Since(t1)) +} diff --git a/vendor/github.com/pkg/sftp/examples/buffered-write-benchmark/main.go b/vendor/github.com/pkg/sftp/examples/buffered-write-benchmark/main.go new file mode 100644 index 00000000..d1babedb --- /dev/null +++ b/vendor/github.com/pkg/sftp/examples/buffered-write-benchmark/main.go @@ -0,0 +1,84 @@ +// buffered-write-benchmark benchmarks the peformance of writing +// a single large []byte on the client to /dev/null on the server via io.Copy. +package main + +import ( + "flag" + "fmt" + "log" + "net" + "os" + "syscall" + "time" + + "golang.org/x/crypto/ssh" + "golang.org/x/crypto/ssh/agent" + + "github.com/pkg/sftp" +) + +var ( + USER = flag.String("user", os.Getenv("USER"), "ssh username") + HOST = flag.String("host", "localhost", "ssh server hostname") + PORT = flag.Int("port", 22, "ssh server port") + PASS = flag.String("pass", os.Getenv("SOCKSIE_SSH_PASSWORD"), "ssh password") + SIZE = flag.Int("s", 1<<15, "set max packet size") +) + +func init() { + flag.Parse() +} + +func main() { + var auths []ssh.AuthMethod + if aconn, err := net.Dial("unix", os.Getenv("SSH_AUTH_SOCK")); err == nil { + auths = append(auths, ssh.PublicKeysCallback(agent.NewClient(aconn).Signers)) + + } + if *PASS != "" { + auths = append(auths, ssh.Password(*PASS)) + } + + config := ssh.ClientConfig{ + User: *USER, + Auth: auths, + HostKeyCallback: ssh.InsecureIgnoreHostKey(), + } + addr := fmt.Sprintf("%s:%d", *HOST, *PORT) + conn, err := ssh.Dial("tcp", addr, &config) + if err != nil { + log.Fatalf("unable to connect to [%s]: %v", addr, err) + } + defer conn.Close() + + c, err := sftp.NewClient(conn, sftp.MaxPacket(*SIZE)) + if err != nil { + log.Fatalf("unable to start sftp subsytem: %v", err) + } + defer c.Close() + + w, err := c.OpenFile("/dev/null", syscall.O_WRONLY) + if err != nil { + log.Fatal(err) + } + defer w.Close() + + f, err := os.Open("/dev/zero") + if err != nil { + log.Fatal(err) + } + defer f.Close() + + const size = 1e9 + + log.Printf("writing %v bytes", size) + t1 := time.Now() + n, err := w.Write(make([]byte, size)) + if err != nil { + log.Fatal(err) + } + if n != size { + log.Fatalf("copy: expected %v bytes, got %d", size, n) + } + log.Printf("wrote %v bytes in %s", size, time.Since(t1)) +} diff --git a/vendor/github.com/pkg/sftp/examples/request-server/main.go b/vendor/github.com/pkg/sftp/examples/request-server/main.go new file mode 100644 index 00000000..fd21b43e --- /dev/null +++ b/vendor/github.com/pkg/sftp/examples/request-server/main.go @@ -0,0 +1,131 @@ +// An example SFTP server implementation using the golang SSH package. +// Serves the whole filesystem visible to the user, and has a hard-coded username and password, +// so not for real use! +package main + +import ( + "flag" + "fmt" + "io" + "io/ioutil" + "log" + "net" + "os" + + "github.com/pkg/sftp" + "golang.org/x/crypto/ssh" +) + +// Based on example server code from golang.org/x/crypto/ssh and server_standalone +func main() { + + var ( + readOnly bool + debugStderr bool + ) + + flag.BoolVar(&readOnly, "R", false, "read-only server") + flag.BoolVar(&debugStderr, "e", false, "debug to stderr") + flag.Parse() + + debugStream := ioutil.Discard + if debugStderr { + debugStream = os.Stderr + } + + // An SSH server is represented by a ServerConfig, which holds + // certificate details and handles authentication of ServerConns. + config := &ssh.ServerConfig{ + PasswordCallback: func(c ssh.ConnMetadata, pass []byte) (*ssh.Permissions, error) { + // Should use constant-time compare (or better, salt+hash) in + // a production setting. + fmt.Fprintf(debugStream, "Login: %s\n", c.User()) + if c.User() == "testuser" && string(pass) == "tiger" { + return nil, nil + } + return nil, fmt.Errorf("password rejected for %q", c.User()) + }, + } + + privateBytes, err := ioutil.ReadFile("id_rsa") + if err != nil { + log.Fatal("Failed to load private key", err) + } + + private, err := ssh.ParsePrivateKey(privateBytes) + if err != nil { + log.Fatal("Failed to parse private key", err) + } + + config.AddHostKey(private) + + // Once a ServerConfig has been configured, connections can be + // accepted. + listener, err := net.Listen("tcp", "0.0.0.0:2022") + if err != nil { + log.Fatal("failed to listen for connection", err) + } + fmt.Printf("Listening on %v\n", listener.Addr()) + + nConn, err := listener.Accept() + if err != nil { + log.Fatal("failed to accept incoming connection", err) + } + + // Before use, a handshake must be performed on the incoming net.Conn. + sconn, chans, reqs, err := ssh.NewServerConn(nConn, config) + if err != nil { + log.Fatal("failed to handshake", err) + } + log.Println("login detected:", sconn.User()) + fmt.Fprintf(debugStream, "SSH server established\n") + + // The incoming Request channel must be serviced. + go ssh.DiscardRequests(reqs) + + // Service the incoming Channel channel. + for newChannel := range chans { + // Channels have a type, depending on the application level + // protocol intended. In the case of an SFTP session, this is "subsystem" + // with a payload string of "sftp" + fmt.Fprintf(debugStream, "Incoming channel: %s\n", newChannel.ChannelType()) + if newChannel.ChannelType() != "session" { + newChannel.Reject(ssh.UnknownChannelType, "unknown channel type") + fmt.Fprintf(debugStream, "Unknown channel type: %s\n", newChannel.ChannelType()) + continue + } + channel, requests, err := newChannel.Accept() + if err != nil { + log.Fatal("could not accept channel.", err) + } + fmt.Fprintf(debugStream, "Channel accepted\n") + + // Sessions have out-of-band requests such as "shell", + // "pty-req" and "env". Here we handle only the + // "subsystem" request. + go func(in <-chan *ssh.Request) { + for req := range in { + fmt.Fprintf(debugStream, "Request: %v\n", req.Type) + ok := false + switch req.Type { + case "subsystem": + fmt.Fprintf(debugStream, "Subsystem: %s\n", req.Payload[4:]) + if string(req.Payload[4:]) == "sftp" { + ok = true + } + } + fmt.Fprintf(debugStream, " - accepted: %v\n", ok) + req.Reply(ok, nil) + } + }(requests) + + root := sftp.InMemHandler() + server := sftp.NewRequestServer(channel, root) + if err := server.Serve(); err == io.EOF { + server.Close() + log.Print("sftp client exited session.") + } else if err != nil { + log.Fatal("sftp server completed with error:", err) + } + } +} diff --git a/vendor/github.com/pkg/sftp/examples/sftp-server/main.go b/vendor/github.com/pkg/sftp/examples/sftp-server/main.go new file mode 100644 index 00000000..48e0e868 --- /dev/null +++ b/vendor/github.com/pkg/sftp/examples/sftp-server/main.go @@ -0,0 +1,147 @@ +// An example SFTP server implementation using the golang SSH package. +// Serves the whole filesystem visible to the user, and has a hard-coded username and password, +// so not for real use! +package main + +import ( + "flag" + "fmt" + "io" + "io/ioutil" + "log" + "net" + "os" + + "github.com/pkg/sftp" + "golang.org/x/crypto/ssh" +) + +// Based on example server code from golang.org/x/crypto/ssh and server_standalone +func main() { + + var ( + readOnly bool + debugStderr bool + ) + + flag.BoolVar(&readOnly, "R", false, "read-only server") + flag.BoolVar(&debugStderr, "e", false, "debug to stderr") + flag.Parse() + + debugStream := ioutil.Discard + if debugStderr { + debugStream = os.Stderr + } + + // An SSH server is represented by a ServerConfig, which holds + // certificate details and handles authentication of ServerConns. + config := &ssh.ServerConfig{ + PasswordCallback: func(c ssh.ConnMetadata, pass []byte) (*ssh.Permissions, error) { + // Should use constant-time compare (or better, salt+hash) in + // a production setting. + fmt.Fprintf(debugStream, "Login: %s\n", c.User()) + if c.User() == "testuser" && string(pass) == "tiger" { + return nil, nil + } + return nil, fmt.Errorf("password rejected for %q", c.User()) + }, + } + + privateBytes, err := ioutil.ReadFile("id_rsa") + if err != nil { + log.Fatal("Failed to load private key", err) + } + + private, err := ssh.ParsePrivateKey(privateBytes) + if err != nil { + log.Fatal("Failed to parse private key", err) + } + + config.AddHostKey(private) + + // Once a ServerConfig has been configured, connections can be + // accepted. + listener, err := net.Listen("tcp", "0.0.0.0:2022") + if err != nil { + log.Fatal("failed to listen for connection", err) + } + fmt.Printf("Listening on %v\n", listener.Addr()) + + nConn, err := listener.Accept() + if err != nil { + log.Fatal("failed to accept incoming connection", err) + } + + // Before use, a handshake must be performed on the incoming + // net.Conn. + _, chans, reqs, err := ssh.NewServerConn(nConn, config) + if err != nil { + log.Fatal("failed to handshake", err) + } + fmt.Fprintf(debugStream, "SSH server established\n") + + // The incoming Request channel must be serviced. + go ssh.DiscardRequests(reqs) + + // Service the incoming Channel channel. + for newChannel := range chans { + // Channels have a type, depending on the application level + // protocol intended. In the case of an SFTP session, this is "subsystem" + // with a payload string of "sftp" + fmt.Fprintf(debugStream, "Incoming channel: %s\n", newChannel.ChannelType()) + if newChannel.ChannelType() != "session" { + newChannel.Reject(ssh.UnknownChannelType, "unknown channel type") + fmt.Fprintf(debugStream, "Unknown channel type: %s\n", newChannel.ChannelType()) + continue + } + channel, requests, err := newChannel.Accept() + if err != nil { + log.Fatal("could not accept channel.", err) + } + fmt.Fprintf(debugStream, "Channel accepted\n") + + // Sessions have out-of-band requests such as "shell", + // "pty-req" and "env". Here we handle only the + // "subsystem" request. + go func(in <-chan *ssh.Request) { + for req := range in { + fmt.Fprintf(debugStream, "Request: %v\n", req.Type) + ok := false + switch req.Type { + case "subsystem": + fmt.Fprintf(debugStream, "Subsystem: %s\n", req.Payload[4:]) + if string(req.Payload[4:]) == "sftp" { + ok = true + } + } + fmt.Fprintf(debugStream, " - accepted: %v\n", ok) + req.Reply(ok, nil) + } + }(requests) + + serverOptions := []sftp.ServerOption{ + sftp.WithDebug(debugStream), + } + + if readOnly { + serverOptions = append(serverOptions, sftp.ReadOnly()) + fmt.Fprintf(debugStream, "Read-only server\n") + } else { + fmt.Fprintf(debugStream, "Read write server\n") + } + + server, err := sftp.NewServer( + channel, + serverOptions..., + ) + if err != nil { + log.Fatal(err) + } + if err := server.Serve(); err == io.EOF { + server.Close() + log.Print("sftp client exited session.") + } else if err != nil { + log.Fatal("sftp server completed with error:", err) + } + } +} diff --git a/vendor/github.com/pkg/sftp/examples/streaming-read-benchmark/main.go b/vendor/github.com/pkg/sftp/examples/streaming-read-benchmark/main.go new file mode 100644 index 00000000..87afc5a3 --- /dev/null +++ b/vendor/github.com/pkg/sftp/examples/streaming-read-benchmark/main.go @@ -0,0 +1,85 @@ +// streaming-read-benchmark benchmarks the peformance of reading +// from /dev/zero on the server to /dev/null on the client via io.Copy. +package main + +import ( + "flag" + "fmt" + "io" + "log" + "net" + "os" + "syscall" + "time" + + "golang.org/x/crypto/ssh" + "golang.org/x/crypto/ssh/agent" + + "github.com/pkg/sftp" +) + +var ( + USER = flag.String("user", os.Getenv("USER"), "ssh username") + HOST = flag.String("host", "localhost", "ssh server hostname") + PORT = flag.Int("port", 22, "ssh server port") + PASS = flag.String("pass", os.Getenv("SOCKSIE_SSH_PASSWORD"), "ssh password") + SIZE = flag.Int("s", 1<<15, "set max packet size") +) + +func init() { + flag.Parse() +} + +func main() { + var auths []ssh.AuthMethod + if aconn, err := net.Dial("unix", os.Getenv("SSH_AUTH_SOCK")); err == nil { + auths = append(auths, ssh.PublicKeysCallback(agent.NewClient(aconn).Signers)) + + } + if *PASS != "" { + auths = append(auths, ssh.Password(*PASS)) + } + + config := ssh.ClientConfig{ + User: *USER, + Auth: auths, + HostKeyCallback: ssh.InsecureIgnoreHostKey(), + } + addr := fmt.Sprintf("%s:%d", *HOST, *PORT) + conn, err := ssh.Dial("tcp", addr, &config) + if err != nil { + log.Fatalf("unable to connect to [%s]: %v", addr, err) + } + defer conn.Close() + + c, err := sftp.NewClient(conn, sftp.MaxPacket(*SIZE)) + if err != nil { + log.Fatalf("unable to start sftp subsytem: %v", err) + } + defer c.Close() + + r, err := c.Open("/dev/zero") + if err != nil { + log.Fatal(err) + } + defer r.Close() + + w, err := os.OpenFile("/dev/null", syscall.O_WRONLY, 0600) + if err != nil { + log.Fatal(err) + } + defer w.Close() + + const size int64 = 1e9 + + log.Printf("reading %v bytes", size) + t1 := time.Now() + n, err := io.Copy(w, io.LimitReader(r, size)) + if err != nil { + log.Fatal(err) + } + if n != size { + log.Fatalf("copy: expected %v bytes, got %d", size, n) + } + log.Printf("read %v bytes in %s", size, time.Since(t1)) +} diff --git a/vendor/github.com/pkg/sftp/examples/streaming-write-benchmark/main.go b/vendor/github.com/pkg/sftp/examples/streaming-write-benchmark/main.go new file mode 100644 index 00000000..8f432d39 --- /dev/null +++ b/vendor/github.com/pkg/sftp/examples/streaming-write-benchmark/main.go @@ -0,0 +1,85 @@ +// streaming-write-benchmark benchmarks the peformance of writing +// from /dev/zero on the client to /dev/null on the server via io.Copy. +package main + +import ( + "flag" + "fmt" + "io" + "log" + "net" + "os" + "syscall" + "time" + + "golang.org/x/crypto/ssh" + "golang.org/x/crypto/ssh/agent" + + "github.com/pkg/sftp" +) + +var ( + USER = flag.String("user", os.Getenv("USER"), "ssh username") + HOST = flag.String("host", "localhost", "ssh server hostname") + PORT = flag.Int("port", 22, "ssh server port") + PASS = flag.String("pass", os.Getenv("SOCKSIE_SSH_PASSWORD"), "ssh password") + SIZE = flag.Int("s", 1<<15, "set max packet size") +) + +func init() { + flag.Parse() +} + +func main() { + var auths []ssh.AuthMethod + if aconn, err := net.Dial("unix", os.Getenv("SSH_AUTH_SOCK")); err == nil { + auths = append(auths, ssh.PublicKeysCallback(agent.NewClient(aconn).Signers)) + + } + if *PASS != "" { + auths = append(auths, ssh.Password(*PASS)) + } + + config := ssh.ClientConfig{ + User: *USER, + Auth: auths, + HostKeyCallback: ssh.InsecureIgnoreHostKey(), + } + addr := fmt.Sprintf("%s:%d", *HOST, *PORT) + conn, err := ssh.Dial("tcp", addr, &config) + if err != nil { + log.Fatalf("unable to connect to [%s]: %v", addr, err) + } + defer conn.Close() + + c, err := sftp.NewClient(conn, sftp.MaxPacket(*SIZE)) + if err != nil { + log.Fatalf("unable to start sftp subsytem: %v", err) + } + defer c.Close() + + w, err := c.OpenFile("/dev/null", syscall.O_WRONLY) + if err != nil { + log.Fatal(err) + } + defer w.Close() + + f, err := os.Open("/dev/zero") + if err != nil { + log.Fatal(err) + } + defer f.Close() + + const size int64 = 1e9 + + log.Printf("writing %v bytes", size) + t1 := time.Now() + n, err := io.Copy(w, io.LimitReader(f, size)) + if err != nil { + log.Fatal(err) + } + if n != size { + log.Fatalf("copy: expected %v bytes, got %d", size, n) + } + log.Printf("wrote %v bytes in %s", size, time.Since(t1)) +} diff --git a/vendor/github.com/pkg/sftp/match.go b/vendor/github.com/pkg/sftp/match.go new file mode 100644 index 00000000..e2f2ba40 --- /dev/null +++ b/vendor/github.com/pkg/sftp/match.go @@ -0,0 +1,295 @@ +package sftp + +import ( + "path" + "strings" + "unicode/utf8" +) + +// ErrBadPattern indicates a globbing pattern was malformed. +var ErrBadPattern = path.ErrBadPattern + +// Unix separator +const separator = "/" + +// Match reports whether name matches the shell file name pattern. +// The pattern syntax is: +// +// pattern: +// { term } +// term: +// '*' matches any sequence of non-Separator characters +// '?' matches any single non-Separator character +// '[' [ '^' ] { character-range } ']' +// character class (must be non-empty) +// c matches character c (c != '*', '?', '\\', '[') +// '\\' c matches character c +// +// character-range: +// c matches character c (c != '\\', '-', ']') +// '\\' c matches character c +// lo '-' hi matches character c for lo <= c <= hi +// +// Match requires pattern to match all of name, not just a substring. +// The only possible returned error is ErrBadPattern, when pattern +// is malformed. +// +// +func Match(pattern, name string) (matched bool, err error) { + return path.Match(pattern, name) +} + +// detect if byte(char) is path separator +func isPathSeparator(c byte) bool { + return string(c) == "/" +} + +// scanChunk gets the next segment of pattern, which is a non-star string +// possibly preceded by a star. +func scanChunk(pattern string) (star bool, chunk, rest string) { + for len(pattern) > 0 && pattern[0] == '*' { + pattern = pattern[1:] + star = true + } + inrange := false + var i int +Scan: + for i = 0; i < len(pattern); i++ { + switch pattern[i] { + case '\\': + + // error check handled in matchChunk: bad pattern. + if i+1 < len(pattern) { + i++ + } + case '[': + inrange = true + case ']': + inrange = false + case '*': + if !inrange { + break Scan + } + } + } + return star, pattern[0:i], pattern[i:] +} + +// matchChunk checks whether chunk matches the beginning of s. +// If so, it returns the remainder of s (after the match). +// Chunk is all single-character operators: literals, char classes, and ?. +func matchChunk(chunk, s string) (rest string, ok bool, err error) { + for len(chunk) > 0 { + if len(s) == 0 { + return + } + switch chunk[0] { + case '[': + // character class + r, n := utf8.DecodeRuneInString(s) + s = s[n:] + chunk = chunk[1:] + // We can't end right after '[', we're expecting at least + // a closing bracket and possibly a caret. + if len(chunk) == 0 { + err = ErrBadPattern + return + } + // possibly negated + negated := chunk[0] == '^' + if negated { + chunk = chunk[1:] + } + // parse all ranges + match := false + nrange := 0 + for { + if len(chunk) > 0 && chunk[0] == ']' && nrange > 0 { + chunk = chunk[1:] + break + } + var lo, hi rune + if lo, chunk, err = getEsc(chunk); err != nil { + return + } + hi = lo + if chunk[0] == '-' { + if hi, chunk, err = getEsc(chunk[1:]); err != nil { + return + } + } + if lo <= r && r <= hi { + match = true + } + nrange++ + } + if match == negated { + return + } + + case '?': + if isPathSeparator(s[0]) { + return + } + _, n := utf8.DecodeRuneInString(s) + s = s[n:] + chunk = chunk[1:] + + case '\\': + chunk = chunk[1:] + if len(chunk) == 0 { + err = ErrBadPattern + return + } + fallthrough + + default: + if chunk[0] != s[0] { + return + } + s = s[1:] + chunk = chunk[1:] + } + } + return s, true, nil +} + +// getEsc gets a possibly-escaped character from chunk, for a character class. +func getEsc(chunk string) (r rune, nchunk string, err error) { + if len(chunk) == 0 || chunk[0] == '-' || chunk[0] == ']' { + err = ErrBadPattern + return + } + if chunk[0] == '\\' { + chunk = chunk[1:] + if len(chunk) == 0 { + err = ErrBadPattern + return + } + } + r, n := utf8.DecodeRuneInString(chunk) + if r == utf8.RuneError && n == 1 { + err = ErrBadPattern + } + nchunk = chunk[n:] + if len(nchunk) == 0 { + err = ErrBadPattern + } + return +} + +// Split splits path immediately following the final Separator, +// separating it into a directory and file name component. +// If there is no Separator in path, Split returns an empty dir +// and file set to path. +// The returned values have the property that path = dir+file. +func Split(path string) (dir, file string) { + i := len(path) - 1 + for i >= 0 && !isPathSeparator(path[i]) { + i-- + } + return path[:i+1], path[i+1:] +} + +// Glob returns the names of all files matching pattern or nil +// if there is no matching file. The syntax of patterns is the same +// as in Match. The pattern may describe hierarchical names such as +// /usr/*/bin/ed (assuming the Separator is '/'). +// +// Glob ignores file system errors such as I/O errors reading directories. +// The only possible returned error is ErrBadPattern, when pattern +// is malformed. +func (c *Client) Glob(pattern string) (matches []string, err error) { + if !hasMeta(pattern) { + file, err := c.Lstat(pattern) + if err != nil { + return nil, nil + } + dir, _ := Split(pattern) + dir = cleanGlobPath(dir) + return []string{Join(dir, file.Name())}, nil + } + + dir, file := Split(pattern) + dir = cleanGlobPath(dir) + + if !hasMeta(dir) { + return c.glob(dir, file, nil) + } + + // Prevent infinite recursion. See issue 15879. + if dir == pattern { + return nil, ErrBadPattern + } + + var m []string + m, err = c.Glob(dir) + if err != nil { + return + } + for _, d := range m { + matches, err = c.glob(d, file, matches) + if err != nil { + return + } + } + return +} + +// cleanGlobPath prepares path for glob matching. +func cleanGlobPath(path string) string { + switch path { + case "": + return "." + case string(separator): + // do nothing to the path + return path + default: + return path[0 : len(path)-1] // chop off trailing separator + } +} + +// glob searches for files matching pattern in the directory dir +// and appends them to matches. If the directory cannot be +// opened, it returns the existing matches. New matches are +// added in lexicographical order. +func (c *Client) glob(dir, pattern string, matches []string) (m []string, e error) { + m = matches + fi, err := c.Stat(dir) + if err != nil { + return + } + if !fi.IsDir() { + return + } + names, err := c.ReadDir(dir) + if err != nil { + return + } + //sort.Strings(names) + + for _, n := range names { + matched, err := Match(pattern, n.Name()) + if err != nil { + return m, err + } + if matched { + m = append(m, Join(dir, n.Name())) + } + } + return +} + +// Join joins any number of path elements into a single path, adding +// a Separator if necessary. +// all empty strings are ignored. +func Join(elem ...string) string { + return path.Join(elem...) +} + +// hasMeta reports whether path contains any of the magic characters +// recognized by Match. +func hasMeta(path string) bool { + // TODO(niemeyer): Should other magic characters be added here? + return strings.ContainsAny(path, "*?[") +} diff --git a/vendor/github.com/pkg/sftp/packet-manager.go b/vendor/github.com/pkg/sftp/packet-manager.go new file mode 100644 index 00000000..bf822e67 --- /dev/null +++ b/vendor/github.com/pkg/sftp/packet-manager.go @@ -0,0 +1,173 @@ +package sftp + +import ( + "encoding" + "sort" + "sync" +) + +// The goal of the packetManager is to keep the outgoing packets in the same +// order as the incoming. This is due to some sftp clients requiring this +// behavior (eg. winscp). + +type packetSender interface { + sendPacket(encoding.BinaryMarshaler) error +} + +type packetManager struct { + requests chan requestPacket + responses chan responsePacket + fini chan struct{} + incoming requestPacketIDs + outgoing responsePackets + sender packetSender // connection object + working *sync.WaitGroup +} + +func newPktMgr(sender packetSender) *packetManager { + s := &packetManager{ + requests: make(chan requestPacket, SftpServerWorkerCount), + responses: make(chan responsePacket, SftpServerWorkerCount), + fini: make(chan struct{}), + incoming: make([]uint32, 0, SftpServerWorkerCount), + outgoing: make([]responsePacket, 0, SftpServerWorkerCount), + sender: sender, + working: &sync.WaitGroup{}, + } + go s.controller() + return s +} + +type responsePackets []responsePacket + +func (r responsePackets) Sort() { + sort.Slice(r, func(i, j int) bool { + return r[i].id() < r[j].id() + }) +} + +type requestPacketIDs []uint32 + +func (r requestPacketIDs) Sort() { + sort.Slice(r, func(i, j int) bool { + return r[i] < r[j] + }) +} + +// register incoming packets to be handled +// send id of 0 for packets without id +func (s *packetManager) incomingPacket(pkt requestPacket) { + s.working.Add(1) + s.requests <- pkt // buffer == SftpServerWorkerCount +} + +// register outgoing packets as being ready +func (s *packetManager) readyPacket(pkt responsePacket) { + s.responses <- pkt + s.working.Done() +} + +// shut down packetManager controller +func (s *packetManager) close() { + // pause until current packets are processed + s.working.Wait() + close(s.fini) +} + +// Passed a worker function, returns a channel for incoming packets. +// The goal is to process packets in the order they are received as is +// requires by section 7 of the RFC, while maximizing throughput of file +// transfers. +func (s *packetManager) workerChan(runWorker func(requestChan)) requestChan { + + rwChan := make(chan requestPacket, SftpServerWorkerCount) + for i := 0; i < SftpServerWorkerCount; i++ { + runWorker(rwChan) + } + + cmdChan := make(chan requestPacket) + runWorker(cmdChan) + + pktChan := make(chan requestPacket, SftpServerWorkerCount) + go func() { + // start with cmdChan + curChan := cmdChan + for pkt := range pktChan { + // on file open packet, switch to rwChan + switch pkt.(type) { + case *sshFxpOpenPacket: + curChan = rwChan + // on file close packet, switch back to cmdChan + // after waiting for any reads/writes to finish + case *sshFxpClosePacket: + // wait for rwChan to finish + s.working.Wait() + // stop using rwChan + curChan = cmdChan + } + s.incomingPacket(pkt) + curChan <- pkt + } + close(rwChan) + close(cmdChan) + s.close() + }() + + return pktChan +} + +// process packets +func (s *packetManager) controller() { + for { + select { + case pkt := <-s.requests: + debug("incoming id: %v", pkt.id()) + s.incoming = append(s.incoming, pkt.id()) + if len(s.incoming) > 1 { + s.incoming.Sort() + } + case pkt := <-s.responses: + debug("outgoing pkt: %v", pkt.id()) + s.outgoing = append(s.outgoing, pkt) + if len(s.outgoing) > 1 { + s.outgoing.Sort() + } + case <-s.fini: + return + } + s.maybeSendPackets() + } +} + +// send as many packets as are ready +func (s *packetManager) maybeSendPackets() { + for { + if len(s.outgoing) == 0 || len(s.incoming) == 0 { + debug("break! -- outgoing: %v; incoming: %v", + len(s.outgoing), len(s.incoming)) + break + } + out := s.outgoing[0] + in := s.incoming[0] + // debug("incoming: %v", s.incoming) + // debug("outgoing: %v", outfilter(s.outgoing)) + if in == out.id() { + s.sender.sendPacket(out) + // pop off heads + copy(s.incoming, s.incoming[1:]) // shift left + s.incoming = s.incoming[:len(s.incoming)-1] // remove last + copy(s.outgoing, s.outgoing[1:]) // shift left + s.outgoing = s.outgoing[:len(s.outgoing)-1] // remove last + } else { + break + } + } +} + +//func outfilter(o []responsePacket) []uint32 { +// res := make([]uint32, 0, len(o)) +// for _, v := range o { +// res = append(res, v.id()) +// } +// return res +//} diff --git a/vendor/github.com/pkg/sftp/packet-typing.go b/vendor/github.com/pkg/sftp/packet-typing.go new file mode 100644 index 00000000..9001aea6 --- /dev/null +++ b/vendor/github.com/pkg/sftp/packet-typing.go @@ -0,0 +1,133 @@ +package sftp + +import ( + "encoding" + + "github.com/pkg/errors" +) + +// all incoming packets +type requestPacket interface { + encoding.BinaryUnmarshaler + id() uint32 +} + +type requestChan chan requestPacket + +type responsePacket interface { + encoding.BinaryMarshaler + id() uint32 +} + +// interfaces to group types +type hasPath interface { + requestPacket + getPath() string +} + +type hasHandle interface { + requestPacket + getHandle() string +} + +type notReadOnly interface { + notReadOnly() +} + +//// define types by adding methods +// hasPath +func (p sshFxpLstatPacket) getPath() string { return p.Path } +func (p sshFxpStatPacket) getPath() string { return p.Path } +func (p sshFxpRmdirPacket) getPath() string { return p.Path } +func (p sshFxpReadlinkPacket) getPath() string { return p.Path } +func (p sshFxpRealpathPacket) getPath() string { return p.Path } +func (p sshFxpMkdirPacket) getPath() string { return p.Path } +func (p sshFxpSetstatPacket) getPath() string { return p.Path } +func (p sshFxpStatvfsPacket) getPath() string { return p.Path } +func (p sshFxpRemovePacket) getPath() string { return p.Filename } +func (p sshFxpRenamePacket) getPath() string { return p.Oldpath } +func (p sshFxpSymlinkPacket) getPath() string { return p.Targetpath } +func (p sshFxpOpendirPacket) getPath() string { return p.Path } +func (p sshFxpOpenPacket) getPath() string { return p.Path } + +func (p sshFxpExtendedPacketPosixRename) getPath() string { return p.Oldpath } + +// hasHandle +func (p sshFxpFstatPacket) getHandle() string { return p.Handle } +func (p sshFxpFsetstatPacket) getHandle() string { return p.Handle } +func (p sshFxpReadPacket) getHandle() string { return p.Handle } +func (p sshFxpWritePacket) getHandle() string { return p.Handle } +func (p sshFxpReaddirPacket) getHandle() string { return p.Handle } +func (p sshFxpClosePacket) getHandle() string { return p.Handle } + +// notReadOnly +func (p sshFxpWritePacket) notReadOnly() {} +func (p sshFxpSetstatPacket) notReadOnly() {} +func (p sshFxpFsetstatPacket) notReadOnly() {} +func (p sshFxpRemovePacket) notReadOnly() {} +func (p sshFxpMkdirPacket) notReadOnly() {} +func (p sshFxpRmdirPacket) notReadOnly() {} +func (p sshFxpRenamePacket) notReadOnly() {} +func (p sshFxpSymlinkPacket) notReadOnly() {} +func (p sshFxpExtendedPacketPosixRename) notReadOnly() {} + +// some packets with ID are missing id() +func (p sshFxpDataPacket) id() uint32 { return p.ID } +func (p sshFxpStatusPacket) id() uint32 { return p.ID } +func (p sshFxpStatResponse) id() uint32 { return p.ID } +func (p sshFxpNamePacket) id() uint32 { return p.ID } +func (p sshFxpHandlePacket) id() uint32 { return p.ID } +func (p sshFxVersionPacket) id() uint32 { return 0 } + +// take raw incoming packet data and build packet objects +func makePacket(p rxPacket) (requestPacket, error) { + var pkt requestPacket + switch p.pktType { + case ssh_FXP_INIT: + pkt = &sshFxInitPacket{} + case ssh_FXP_LSTAT: + pkt = &sshFxpLstatPacket{} + case ssh_FXP_OPEN: + pkt = &sshFxpOpenPacket{} + case ssh_FXP_CLOSE: + pkt = &sshFxpClosePacket{} + case ssh_FXP_READ: + pkt = &sshFxpReadPacket{} + case ssh_FXP_WRITE: + pkt = &sshFxpWritePacket{} + case ssh_FXP_FSTAT: + pkt = &sshFxpFstatPacket{} + case ssh_FXP_SETSTAT: + pkt = &sshFxpSetstatPacket{} + case ssh_FXP_FSETSTAT: + pkt = &sshFxpFsetstatPacket{} + case ssh_FXP_OPENDIR: + pkt = &sshFxpOpendirPacket{} + case ssh_FXP_READDIR: + pkt = &sshFxpReaddirPacket{} + case ssh_FXP_REMOVE: + pkt = &sshFxpRemovePacket{} + case ssh_FXP_MKDIR: + pkt = &sshFxpMkdirPacket{} + case ssh_FXP_RMDIR: + pkt = &sshFxpRmdirPacket{} + case ssh_FXP_REALPATH: + pkt = &sshFxpRealpathPacket{} + case ssh_FXP_STAT: + pkt = &sshFxpStatPacket{} + case ssh_FXP_RENAME: + pkt = &sshFxpRenamePacket{} + case ssh_FXP_READLINK: + pkt = &sshFxpReadlinkPacket{} + case ssh_FXP_SYMLINK: + pkt = &sshFxpSymlinkPacket{} + case ssh_FXP_EXTENDED: + pkt = &sshFxpExtendedPacket{} + default: + return nil, errors.Errorf("unhandled packet type: %s", p.pktType) + } + if err := pkt.UnmarshalBinary(p.pktBytes); err != nil { + return nil, err + } + return pkt, nil +} diff --git a/vendor/github.com/pkg/sftp/packet.go b/vendor/github.com/pkg/sftp/packet.go new file mode 100644 index 00000000..711ca50c --- /dev/null +++ b/vendor/github.com/pkg/sftp/packet.go @@ -0,0 +1,952 @@ +package sftp + +import ( + "bytes" + "encoding" + "encoding/binary" + "fmt" + "io" + "os" + "reflect" + + "github.com/pkg/errors" +) + +var ( + errShortPacket = errors.New("packet too short") + errUnknownExtendedPacket = errors.New("unknown extended packet") +) + +const ( + debugDumpTxPacket = false + debugDumpRxPacket = false + debugDumpTxPacketBytes = false + debugDumpRxPacketBytes = false +) + +func marshalUint32(b []byte, v uint32) []byte { + return append(b, byte(v>>24), byte(v>>16), byte(v>>8), byte(v)) +} + +func marshalUint64(b []byte, v uint64) []byte { + return marshalUint32(marshalUint32(b, uint32(v>>32)), uint32(v)) +} + +func marshalString(b []byte, v string) []byte { + return append(marshalUint32(b, uint32(len(v))), v...) +} + +func marshal(b []byte, v interface{}) []byte { + if v == nil { + return b + } + switch v := v.(type) { + case uint8: + return append(b, v) + case uint32: + return marshalUint32(b, v) + case uint64: + return marshalUint64(b, v) + case string: + return marshalString(b, v) + case os.FileInfo: + return marshalFileInfo(b, v) + default: + switch d := reflect.ValueOf(v); d.Kind() { + case reflect.Struct: + for i, n := 0, d.NumField(); i < n; i++ { + b = append(marshal(b, d.Field(i).Interface())) + } + return b + case reflect.Slice: + for i, n := 0, d.Len(); i < n; i++ { + b = append(marshal(b, d.Index(i).Interface())) + } + return b + default: + panic(fmt.Sprintf("marshal(%#v): cannot handle type %T", v, v)) + } + } +} + +func unmarshalUint32(b []byte) (uint32, []byte) { + v := uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24 + return v, b[4:] +} + +func unmarshalUint32Safe(b []byte) (uint32, []byte, error) { + var v uint32 + if len(b) < 4 { + return 0, nil, errShortPacket + } + v, b = unmarshalUint32(b) + return v, b, nil +} + +func unmarshalUint64(b []byte) (uint64, []byte) { + h, b := unmarshalUint32(b) + l, b := unmarshalUint32(b) + return uint64(h)<<32 | uint64(l), b +} + +func unmarshalUint64Safe(b []byte) (uint64, []byte, error) { + var v uint64 + if len(b) < 8 { + return 0, nil, errShortPacket + } + v, b = unmarshalUint64(b) + return v, b, nil +} + +func unmarshalString(b []byte) (string, []byte) { + n, b := unmarshalUint32(b) + return string(b[:n]), b[n:] +} + +func unmarshalStringSafe(b []byte) (string, []byte, error) { + n, b, err := unmarshalUint32Safe(b) + if err != nil { + return "", nil, err + } + if int64(n) > int64(len(b)) { + return "", nil, errShortPacket + } + return string(b[:n]), b[n:], nil +} + +// sendPacket marshals p according to RFC 4234. +func sendPacket(w io.Writer, m encoding.BinaryMarshaler) error { + bb, err := m.MarshalBinary() + if err != nil { + return errors.Errorf("binary marshaller failed: %v", err) + } + if debugDumpTxPacketBytes { + debug("send packet: %s %d bytes %x", fxp(bb[0]), len(bb), bb[1:]) + } else if debugDumpTxPacket { + debug("send packet: %s %d bytes", fxp(bb[0]), len(bb)) + } + l := uint32(len(bb)) + hdr := []byte{byte(l >> 24), byte(l >> 16), byte(l >> 8), byte(l)} + _, err = w.Write(hdr) + if err != nil { + return errors.Errorf("failed to send packet header: %v", err) + } + _, err = w.Write(bb) + if err != nil { + return errors.Errorf("failed to send packet body: %v", err) + } + return nil +} + +func recvPacket(r io.Reader) (uint8, []byte, error) { + var b = []byte{0, 0, 0, 0} + if _, err := io.ReadFull(r, b); err != nil { + return 0, nil, err + } + l, _ := unmarshalUint32(b) + b = make([]byte, l) + if _, err := io.ReadFull(r, b); err != nil { + debug("recv packet %d bytes: err %v", l, err) + return 0, nil, err + } + if debugDumpRxPacketBytes { + debug("recv packet: %s %d bytes %x", fxp(b[0]), l, b[1:]) + } else if debugDumpRxPacket { + debug("recv packet: %s %d bytes", fxp(b[0]), l) + } + return b[0], b[1:], nil +} + +type extensionPair struct { + Name string + Data string +} + +func unmarshalExtensionPair(b []byte) (extensionPair, []byte, error) { + var ep extensionPair + var err error + ep.Name, b, err = unmarshalStringSafe(b) + if err != nil { + return ep, b, err + } + ep.Data, b, err = unmarshalStringSafe(b) + return ep, b, err +} + +// Here starts the definition of packets along with their MarshalBinary +// implementations. +// Manually writing the marshalling logic wins us a lot of time and +// allocation. + +type sshFxInitPacket struct { + Version uint32 + Extensions []extensionPair +} + +func (p sshFxInitPacket) MarshalBinary() ([]byte, error) { + l := 1 + 4 // byte + uint32 + for _, e := range p.Extensions { + l += 4 + len(e.Name) + 4 + len(e.Data) + } + + b := make([]byte, 0, l) + b = append(b, ssh_FXP_INIT) + b = marshalUint32(b, p.Version) + for _, e := range p.Extensions { + b = marshalString(b, e.Name) + b = marshalString(b, e.Data) + } + return b, nil +} + +func (p *sshFxInitPacket) UnmarshalBinary(b []byte) error { + var err error + if p.Version, b, err = unmarshalUint32Safe(b); err != nil { + return err + } + for len(b) > 0 { + var ep extensionPair + ep, b, err = unmarshalExtensionPair(b) + if err != nil { + return err + } + p.Extensions = append(p.Extensions, ep) + } + return nil +} + +type sshFxVersionPacket struct { + Version uint32 + Extensions []struct { + Name, Data string + } +} + +func (p sshFxVersionPacket) MarshalBinary() ([]byte, error) { + l := 1 + 4 // byte + uint32 + for _, e := range p.Extensions { + l += 4 + len(e.Name) + 4 + len(e.Data) + } + + b := make([]byte, 0, l) + b = append(b, ssh_FXP_VERSION) + b = marshalUint32(b, p.Version) + for _, e := range p.Extensions { + b = marshalString(b, e.Name) + b = marshalString(b, e.Data) + } + return b, nil +} + +func marshalIDString(packetType byte, id uint32, str string) ([]byte, error) { + l := 1 + 4 + // type(byte) + uint32 + 4 + len(str) + + b := make([]byte, 0, l) + b = append(b, packetType) + b = marshalUint32(b, id) + b = marshalString(b, str) + return b, nil +} + +func unmarshalIDString(b []byte, id *uint32, str *string) error { + var err error + *id, b, err = unmarshalUint32Safe(b) + if err != nil { + return err + } + *str, _, err = unmarshalStringSafe(b) + return err +} + +type sshFxpReaddirPacket struct { + ID uint32 + Handle string +} + +func (p sshFxpReaddirPacket) id() uint32 { return p.ID } + +func (p sshFxpReaddirPacket) MarshalBinary() ([]byte, error) { + return marshalIDString(ssh_FXP_READDIR, p.ID, p.Handle) +} + +func (p *sshFxpReaddirPacket) UnmarshalBinary(b []byte) error { + return unmarshalIDString(b, &p.ID, &p.Handle) +} + +type sshFxpOpendirPacket struct { + ID uint32 + Path string +} + +func (p sshFxpOpendirPacket) id() uint32 { return p.ID } + +func (p sshFxpOpendirPacket) MarshalBinary() ([]byte, error) { + return marshalIDString(ssh_FXP_OPENDIR, p.ID, p.Path) +} + +func (p *sshFxpOpendirPacket) UnmarshalBinary(b []byte) error { + return unmarshalIDString(b, &p.ID, &p.Path) +} + +type sshFxpLstatPacket struct { + ID uint32 + Path string +} + +func (p sshFxpLstatPacket) id() uint32 { return p.ID } + +func (p sshFxpLstatPacket) MarshalBinary() ([]byte, error) { + return marshalIDString(ssh_FXP_LSTAT, p.ID, p.Path) +} + +func (p *sshFxpLstatPacket) UnmarshalBinary(b []byte) error { + return unmarshalIDString(b, &p.ID, &p.Path) +} + +type sshFxpStatPacket struct { + ID uint32 + Path string +} + +func (p sshFxpStatPacket) id() uint32 { return p.ID } + +func (p sshFxpStatPacket) MarshalBinary() ([]byte, error) { + return marshalIDString(ssh_FXP_STAT, p.ID, p.Path) +} + +func (p *sshFxpStatPacket) UnmarshalBinary(b []byte) error { + return unmarshalIDString(b, &p.ID, &p.Path) +} + +type sshFxpFstatPacket struct { + ID uint32 + Handle string +} + +func (p sshFxpFstatPacket) id() uint32 { return p.ID } + +func (p sshFxpFstatPacket) MarshalBinary() ([]byte, error) { + return marshalIDString(ssh_FXP_FSTAT, p.ID, p.Handle) +} + +func (p *sshFxpFstatPacket) UnmarshalBinary(b []byte) error { + return unmarshalIDString(b, &p.ID, &p.Handle) +} + +type sshFxpClosePacket struct { + ID uint32 + Handle string +} + +func (p sshFxpClosePacket) id() uint32 { return p.ID } + +func (p sshFxpClosePacket) MarshalBinary() ([]byte, error) { + return marshalIDString(ssh_FXP_CLOSE, p.ID, p.Handle) +} + +func (p *sshFxpClosePacket) UnmarshalBinary(b []byte) error { + return unmarshalIDString(b, &p.ID, &p.Handle) +} + +type sshFxpRemovePacket struct { + ID uint32 + Filename string +} + +func (p sshFxpRemovePacket) id() uint32 { return p.ID } + +func (p sshFxpRemovePacket) MarshalBinary() ([]byte, error) { + return marshalIDString(ssh_FXP_REMOVE, p.ID, p.Filename) +} + +func (p *sshFxpRemovePacket) UnmarshalBinary(b []byte) error { + return unmarshalIDString(b, &p.ID, &p.Filename) +} + +type sshFxpRmdirPacket struct { + ID uint32 + Path string +} + +func (p sshFxpRmdirPacket) id() uint32 { return p.ID } + +func (p sshFxpRmdirPacket) MarshalBinary() ([]byte, error) { + return marshalIDString(ssh_FXP_RMDIR, p.ID, p.Path) +} + +func (p *sshFxpRmdirPacket) UnmarshalBinary(b []byte) error { + return unmarshalIDString(b, &p.ID, &p.Path) +} + +type sshFxpSymlinkPacket struct { + ID uint32 + Targetpath string + Linkpath string +} + +func (p sshFxpSymlinkPacket) id() uint32 { return p.ID } + +func (p sshFxpSymlinkPacket) MarshalBinary() ([]byte, error) { + l := 1 + 4 + // type(byte) + uint32 + 4 + len(p.Targetpath) + + 4 + len(p.Linkpath) + + b := make([]byte, 0, l) + b = append(b, ssh_FXP_SYMLINK) + b = marshalUint32(b, p.ID) + b = marshalString(b, p.Targetpath) + b = marshalString(b, p.Linkpath) + return b, nil +} + +func (p *sshFxpSymlinkPacket) UnmarshalBinary(b []byte) error { + var err error + if p.ID, b, err = unmarshalUint32Safe(b); err != nil { + return err + } else if p.Targetpath, b, err = unmarshalStringSafe(b); err != nil { + return err + } else if p.Linkpath, _, err = unmarshalStringSafe(b); err != nil { + return err + } + return nil +} + +type sshFxpReadlinkPacket struct { + ID uint32 + Path string +} + +func (p sshFxpReadlinkPacket) id() uint32 { return p.ID } + +func (p sshFxpReadlinkPacket) MarshalBinary() ([]byte, error) { + return marshalIDString(ssh_FXP_READLINK, p.ID, p.Path) +} + +func (p *sshFxpReadlinkPacket) UnmarshalBinary(b []byte) error { + return unmarshalIDString(b, &p.ID, &p.Path) +} + +type sshFxpRealpathPacket struct { + ID uint32 + Path string +} + +func (p sshFxpRealpathPacket) id() uint32 { return p.ID } + +func (p sshFxpRealpathPacket) MarshalBinary() ([]byte, error) { + return marshalIDString(ssh_FXP_REALPATH, p.ID, p.Path) +} + +func (p *sshFxpRealpathPacket) UnmarshalBinary(b []byte) error { + return unmarshalIDString(b, &p.ID, &p.Path) +} + +type sshFxpNameAttr struct { + Name string + LongName string + Attrs []interface{} +} + +func (p sshFxpNameAttr) MarshalBinary() ([]byte, error) { + b := []byte{} + b = marshalString(b, p.Name) + b = marshalString(b, p.LongName) + for _, attr := range p.Attrs { + b = marshal(b, attr) + } + return b, nil +} + +type sshFxpNamePacket struct { + ID uint32 + NameAttrs []sshFxpNameAttr +} + +func (p sshFxpNamePacket) MarshalBinary() ([]byte, error) { + b := []byte{} + b = append(b, ssh_FXP_NAME) + b = marshalUint32(b, p.ID) + b = marshalUint32(b, uint32(len(p.NameAttrs))) + for _, na := range p.NameAttrs { + ab, err := na.MarshalBinary() + if err != nil { + return nil, err + } + + b = append(b, ab...) + } + return b, nil +} + +type sshFxpOpenPacket struct { + ID uint32 + Path string + Pflags uint32 + Flags uint32 // ignored +} + +func (p sshFxpOpenPacket) id() uint32 { return p.ID } + +func (p sshFxpOpenPacket) MarshalBinary() ([]byte, error) { + l := 1 + 4 + + 4 + len(p.Path) + + 4 + 4 + + b := make([]byte, 0, l) + b = append(b, ssh_FXP_OPEN) + b = marshalUint32(b, p.ID) + b = marshalString(b, p.Path) + b = marshalUint32(b, p.Pflags) + b = marshalUint32(b, p.Flags) + return b, nil +} + +func (p *sshFxpOpenPacket) UnmarshalBinary(b []byte) error { + var err error + if p.ID, b, err = unmarshalUint32Safe(b); err != nil { + return err + } else if p.Path, b, err = unmarshalStringSafe(b); err != nil { + return err + } else if p.Pflags, b, err = unmarshalUint32Safe(b); err != nil { + return err + } else if p.Flags, _, err = unmarshalUint32Safe(b); err != nil { + return err + } + return nil +} + +type sshFxpReadPacket struct { + ID uint32 + Handle string + Offset uint64 + Len uint32 +} + +func (p sshFxpReadPacket) id() uint32 { return p.ID } + +func (p sshFxpReadPacket) MarshalBinary() ([]byte, error) { + l := 1 + 4 + // type(byte) + uint32 + 4 + len(p.Handle) + + 8 + 4 // uint64 + uint32 + + b := make([]byte, 0, l) + b = append(b, ssh_FXP_READ) + b = marshalUint32(b, p.ID) + b = marshalString(b, p.Handle) + b = marshalUint64(b, p.Offset) + b = marshalUint32(b, p.Len) + return b, nil +} + +func (p *sshFxpReadPacket) UnmarshalBinary(b []byte) error { + var err error + if p.ID, b, err = unmarshalUint32Safe(b); err != nil { + return err + } else if p.Handle, b, err = unmarshalStringSafe(b); err != nil { + return err + } else if p.Offset, b, err = unmarshalUint64Safe(b); err != nil { + return err + } else if p.Len, _, err = unmarshalUint32Safe(b); err != nil { + return err + } + return nil +} + +type sshFxpRenamePacket struct { + ID uint32 + Oldpath string + Newpath string +} + +func (p sshFxpRenamePacket) id() uint32 { return p.ID } + +func (p sshFxpRenamePacket) MarshalBinary() ([]byte, error) { + l := 1 + 4 + // type(byte) + uint32 + 4 + len(p.Oldpath) + + 4 + len(p.Newpath) + + b := make([]byte, 0, l) + b = append(b, ssh_FXP_RENAME) + b = marshalUint32(b, p.ID) + b = marshalString(b, p.Oldpath) + b = marshalString(b, p.Newpath) + return b, nil +} + +func (p *sshFxpRenamePacket) UnmarshalBinary(b []byte) error { + var err error + if p.ID, b, err = unmarshalUint32Safe(b); err != nil { + return err + } else if p.Oldpath, b, err = unmarshalStringSafe(b); err != nil { + return err + } else if p.Newpath, _, err = unmarshalStringSafe(b); err != nil { + return err + } + return nil +} + +type sshFxpPosixRenamePacket struct { + ID uint32 + Oldpath string + Newpath string +} + +func (p sshFxpPosixRenamePacket) id() uint32 { return p.ID } + +func (p sshFxpPosixRenamePacket) MarshalBinary() ([]byte, error) { + const ext = "posix-rename@openssh.com" + l := 1 + 4 + // type(byte) + uint32 + 4 + len(ext) + + 4 + len(p.Oldpath) + + 4 + len(p.Newpath) + + b := make([]byte, 0, l) + b = append(b, ssh_FXP_EXTENDED) + b = marshalUint32(b, p.ID) + b = marshalString(b, ext) + b = marshalString(b, p.Oldpath) + b = marshalString(b, p.Newpath) + return b, nil +} + +type sshFxpWritePacket struct { + ID uint32 + Handle string + Offset uint64 + Length uint32 + Data []byte +} + +func (p sshFxpWritePacket) id() uint32 { return p.ID } + +func (p sshFxpWritePacket) MarshalBinary() ([]byte, error) { + l := 1 + 4 + // type(byte) + uint32 + 4 + len(p.Handle) + + 8 + 4 + // uint64 + uint32 + len(p.Data) + + b := make([]byte, 0, l) + b = append(b, ssh_FXP_WRITE) + b = marshalUint32(b, p.ID) + b = marshalString(b, p.Handle) + b = marshalUint64(b, p.Offset) + b = marshalUint32(b, p.Length) + b = append(b, p.Data...) + return b, nil +} + +func (p *sshFxpWritePacket) UnmarshalBinary(b []byte) error { + var err error + if p.ID, b, err = unmarshalUint32Safe(b); err != nil { + return err + } else if p.Handle, b, err = unmarshalStringSafe(b); err != nil { + return err + } else if p.Offset, b, err = unmarshalUint64Safe(b); err != nil { + return err + } else if p.Length, b, err = unmarshalUint32Safe(b); err != nil { + return err + } else if uint32(len(b)) < p.Length { + return errShortPacket + } + + p.Data = append([]byte{}, b[:p.Length]...) + return nil +} + +type sshFxpMkdirPacket struct { + ID uint32 + Path string + Flags uint32 // ignored +} + +func (p sshFxpMkdirPacket) id() uint32 { return p.ID } + +func (p sshFxpMkdirPacket) MarshalBinary() ([]byte, error) { + l := 1 + 4 + // type(byte) + uint32 + 4 + len(p.Path) + + 4 // uint32 + + b := make([]byte, 0, l) + b = append(b, ssh_FXP_MKDIR) + b = marshalUint32(b, p.ID) + b = marshalString(b, p.Path) + b = marshalUint32(b, p.Flags) + return b, nil +} + +func (p *sshFxpMkdirPacket) UnmarshalBinary(b []byte) error { + var err error + if p.ID, b, err = unmarshalUint32Safe(b); err != nil { + return err + } else if p.Path, b, err = unmarshalStringSafe(b); err != nil { + return err + } else if p.Flags, _, err = unmarshalUint32Safe(b); err != nil { + return err + } + return nil +} + +type sshFxpSetstatPacket struct { + ID uint32 + Path string + Flags uint32 + Attrs interface{} +} + +type sshFxpFsetstatPacket struct { + ID uint32 + Handle string + Flags uint32 + Attrs interface{} +} + +func (p sshFxpSetstatPacket) id() uint32 { return p.ID } +func (p sshFxpFsetstatPacket) id() uint32 { return p.ID } + +func (p sshFxpSetstatPacket) MarshalBinary() ([]byte, error) { + l := 1 + 4 + // type(byte) + uint32 + 4 + len(p.Path) + + 4 // uint32 + uint64 + + b := make([]byte, 0, l) + b = append(b, ssh_FXP_SETSTAT) + b = marshalUint32(b, p.ID) + b = marshalString(b, p.Path) + b = marshalUint32(b, p.Flags) + b = marshal(b, p.Attrs) + return b, nil +} + +func (p sshFxpFsetstatPacket) MarshalBinary() ([]byte, error) { + l := 1 + 4 + // type(byte) + uint32 + 4 + len(p.Handle) + + 4 // uint32 + uint64 + + b := make([]byte, 0, l) + b = append(b, ssh_FXP_FSETSTAT) + b = marshalUint32(b, p.ID) + b = marshalString(b, p.Handle) + b = marshalUint32(b, p.Flags) + b = marshal(b, p.Attrs) + return b, nil +} + +func (p *sshFxpSetstatPacket) UnmarshalBinary(b []byte) error { + var err error + if p.ID, b, err = unmarshalUint32Safe(b); err != nil { + return err + } else if p.Path, b, err = unmarshalStringSafe(b); err != nil { + return err + } else if p.Flags, b, err = unmarshalUint32Safe(b); err != nil { + return err + } + p.Attrs = b + return nil +} + +func (p *sshFxpFsetstatPacket) UnmarshalBinary(b []byte) error { + var err error + if p.ID, b, err = unmarshalUint32Safe(b); err != nil { + return err + } else if p.Handle, b, err = unmarshalStringSafe(b); err != nil { + return err + } else if p.Flags, b, err = unmarshalUint32Safe(b); err != nil { + return err + } + p.Attrs = b + return nil +} + +type sshFxpHandlePacket struct { + ID uint32 + Handle string +} + +func (p sshFxpHandlePacket) MarshalBinary() ([]byte, error) { + b := []byte{ssh_FXP_HANDLE} + b = marshalUint32(b, p.ID) + b = marshalString(b, p.Handle) + return b, nil +} + +type sshFxpStatusPacket struct { + ID uint32 + StatusError +} + +func (p sshFxpStatusPacket) MarshalBinary() ([]byte, error) { + b := []byte{ssh_FXP_STATUS} + b = marshalUint32(b, p.ID) + b = marshalStatus(b, p.StatusError) + return b, nil +} + +type sshFxpDataPacket struct { + ID uint32 + Length uint32 + Data []byte +} + +func (p sshFxpDataPacket) MarshalBinary() ([]byte, error) { + b := []byte{ssh_FXP_DATA} + b = marshalUint32(b, p.ID) + b = marshalUint32(b, p.Length) + b = append(b, p.Data[:p.Length]...) + return b, nil +} + +func (p *sshFxpDataPacket) UnmarshalBinary(b []byte) error { + var err error + if p.ID, b, err = unmarshalUint32Safe(b); err != nil { + return err + } else if p.Length, b, err = unmarshalUint32Safe(b); err != nil { + return err + } else if uint32(len(b)) < p.Length { + return errors.New("truncated packet") + } + + p.Data = make([]byte, p.Length) + copy(p.Data, b) + return nil +} + +type sshFxpStatvfsPacket struct { + ID uint32 + Path string +} + +func (p sshFxpStatvfsPacket) id() uint32 { return p.ID } + +func (p sshFxpStatvfsPacket) MarshalBinary() ([]byte, error) { + l := 1 + 4 + // type(byte) + uint32 + len(p.Path) + + len("statvfs@openssh.com") + + b := make([]byte, 0, l) + b = append(b, ssh_FXP_EXTENDED) + b = marshalUint32(b, p.ID) + b = marshalString(b, "statvfs@openssh.com") + b = marshalString(b, p.Path) + return b, nil +} + +// A StatVFS contains statistics about a filesystem. +type StatVFS struct { + ID uint32 + Bsize uint64 /* file system block size */ + Frsize uint64 /* fundamental fs block size */ + Blocks uint64 /* number of blocks (unit f_frsize) */ + Bfree uint64 /* free blocks in file system */ + Bavail uint64 /* free blocks for non-root */ + Files uint64 /* total file inodes */ + Ffree uint64 /* free file inodes */ + Favail uint64 /* free file inodes for to non-root */ + Fsid uint64 /* file system id */ + Flag uint64 /* bit mask of f_flag values */ + Namemax uint64 /* maximum filename length */ +} + +// TotalSpace calculates the amount of total space in a filesystem. +func (p *StatVFS) TotalSpace() uint64 { + return p.Frsize * p.Blocks +} + +// FreeSpace calculates the amount of free space in a filesystem. +func (p *StatVFS) FreeSpace() uint64 { + return p.Frsize * p.Bfree +} + +// Convert to ssh_FXP_EXTENDED_REPLY packet binary format +func (p *StatVFS) MarshalBinary() ([]byte, error) { + var buf bytes.Buffer + buf.Write([]byte{ssh_FXP_EXTENDED_REPLY}) + err := binary.Write(&buf, binary.BigEndian, p) + return buf.Bytes(), err +} + +type sshFxpExtendedPacket struct { + ID uint32 + ExtendedRequest string + SpecificPacket interface { + serverRespondablePacket + readonly() bool + } +} + +func (p sshFxpExtendedPacket) id() uint32 { return p.ID } +func (p sshFxpExtendedPacket) readonly() bool { return p.SpecificPacket.readonly() } + +func (p sshFxpExtendedPacket) respond(svr *Server) error { + return p.SpecificPacket.respond(svr) +} + +func (p *sshFxpExtendedPacket) UnmarshalBinary(b []byte) error { + var err error + bOrig := b + if p.ID, b, err = unmarshalUint32Safe(b); err != nil { + return err + } else if p.ExtendedRequest, _, err = unmarshalStringSafe(b); err != nil { + return err + } + + // specific unmarshalling + switch p.ExtendedRequest { + case "statvfs@openssh.com": + p.SpecificPacket = &sshFxpExtendedPacketStatVFS{} + case "posix-rename@openssh.com": + p.SpecificPacket = &sshFxpExtendedPacketPosixRename{} + default: + return errUnknownExtendedPacket + } + + return p.SpecificPacket.UnmarshalBinary(bOrig) +} + +type sshFxpExtendedPacketStatVFS struct { + ID uint32 + ExtendedRequest string + Path string +} + +func (p sshFxpExtendedPacketStatVFS) id() uint32 { return p.ID } +func (p sshFxpExtendedPacketStatVFS) readonly() bool { return true } +func (p *sshFxpExtendedPacketStatVFS) UnmarshalBinary(b []byte) error { + var err error + if p.ID, b, err = unmarshalUint32Safe(b); err != nil { + return err + } else if p.ExtendedRequest, b, err = unmarshalStringSafe(b); err != nil { + return err + } else if p.Path, _, err = unmarshalStringSafe(b); err != nil { + return err + } + return nil +} + +type sshFxpExtendedPacketPosixRename struct { + ID uint32 + ExtendedRequest string + Oldpath string + Newpath string +} + +func (p sshFxpExtendedPacketPosixRename) id() uint32 { return p.ID } +func (p sshFxpExtendedPacketPosixRename) readonly() bool { return false } +func (p *sshFxpExtendedPacketPosixRename) UnmarshalBinary(b []byte) error { + var err error + if p.ID, b, err = unmarshalUint32Safe(b); err != nil { + return err + } else if p.ExtendedRequest, b, err = unmarshalStringSafe(b); err != nil { + return err + } else if p.Oldpath, b, err = unmarshalStringSafe(b); err != nil { + return err + } else if p.Newpath, _, err = unmarshalStringSafe(b); err != nil { + return err + } + return nil +} + +func (p sshFxpExtendedPacketPosixRename) respond(s *Server) error { + err := os.Rename(p.Oldpath, p.Newpath) + return s.sendError(p, err) +} diff --git a/vendor/github.com/pkg/sftp/release.go b/vendor/github.com/pkg/sftp/release.go new file mode 100644 index 00000000..b695528f --- /dev/null +++ b/vendor/github.com/pkg/sftp/release.go @@ -0,0 +1,5 @@ +// +build !debug + +package sftp + +func debug(fmt string, args ...interface{}) {} diff --git a/vendor/github.com/pkg/sftp/request-attrs.go b/vendor/github.com/pkg/sftp/request-attrs.go new file mode 100644 index 00000000..0149d930 --- /dev/null +++ b/vendor/github.com/pkg/sftp/request-attrs.go @@ -0,0 +1,63 @@ +package sftp + +// Methods on the Request object to make working with the Flags bitmasks and +// Attr(ibutes) byte blob easier. Use Pflags() when working with an Open/Write +// request and AttrFlags() and Attributes() when working with SetStat requests. + +import "os" + +// Open packet pflags +type pflags struct { + Read, Write, Append, Creat, Trunc, Excl bool +} + +// testable constructor +func newPflags(flags uint32) pflags { + return pflags{ + Read: flags&ssh_FXF_READ != 0, + Write: flags&ssh_FXF_WRITE != 0, + Append: flags&ssh_FXF_APPEND != 0, + Creat: flags&ssh_FXF_CREAT != 0, + Trunc: flags&ssh_FXF_TRUNC != 0, + Excl: flags&ssh_FXF_EXCL != 0, + } +} + +// Check bitmap/uint32 for Open packet pflag values +func (r *Request) Pflags() pflags { + return newPflags(r.Flags) +} + +// File attribute flags +type aflags struct { + Size, UidGid, Permissions, Acmodtime bool +} + +// testable constructor +func newAflags(flags uint32) aflags { + return aflags{ + Size: (flags & ssh_FILEXFER_ATTR_SIZE) != 0, + UidGid: (flags & ssh_FILEXFER_ATTR_UIDGID) != 0, + Permissions: (flags & ssh_FILEXFER_ATTR_PERMISSIONS) != 0, + Acmodtime: (flags & ssh_FILEXFER_ATTR_ACMODTIME) != 0, + } +} + +// Check bitmap/uint32 for file attribute flags +func (r *Request) AttrFlags(flags uint32) aflags { + return newAflags(r.Flags) +} + +// File attributes +type fileattrs FileStat + +// Return Mode wrapped in os.FileMode +func (a fileattrs) FileMode() os.FileMode { + return os.FileMode(a.Mode) +} + +// Parse file attributes byte blob and return them in object +func (r *Request) Attributes() fileattrs { + fa, _ := getFileStat(r.Flags, r.Attrs) + return fileattrs(*fa) +} diff --git a/vendor/github.com/pkg/sftp/request-errors.go b/vendor/github.com/pkg/sftp/request-errors.go new file mode 100644 index 00000000..00451e74 --- /dev/null +++ b/vendor/github.com/pkg/sftp/request-errors.go @@ -0,0 +1,42 @@ +package sftp + +// Error types that match the SFTP's SSH_FXP_STATUS codes. Gives you more +// direct control of the errors being sent vs. letting the library work them +// out from the standard os/io errors. + +type fxerr uint32 + +const ( + ErrSshFxOk = fxerr(ssh_FX_OK) + ErrSshFxEof = fxerr(ssh_FX_EOF) + ErrSshFxNoSuchFile = fxerr(ssh_FX_NO_SUCH_FILE) + ErrSshFxPermissionDenied = fxerr(ssh_FX_PERMISSION_DENIED) + ErrSshFxFailure = fxerr(ssh_FX_FAILURE) + ErrSshFxBadMessage = fxerr(ssh_FX_BAD_MESSAGE) + ErrSshFxNoConnection = fxerr(ssh_FX_NO_CONNECTION) + ErrSshFxConnectionLost = fxerr(ssh_FX_CONNECTION_LOST) + ErrSshFxOpUnsupported = fxerr(ssh_FX_OP_UNSUPPORTED) +) + +func (e fxerr) Error() string { + switch e { + case ErrSshFxOk: + return "OK" + case ErrSshFxEof: + return "EOF" + case ErrSshFxNoSuchFile: + return "No Such File" + case ErrSshFxPermissionDenied: + return "Permission Denied" + case ErrSshFxBadMessage: + return "Bad Message" + case ErrSshFxNoConnection: + return "No Connection" + case ErrSshFxConnectionLost: + return "Connection Lost" + case ErrSshFxOpUnsupported: + return "Operation Unsupported" + default: + return "Failure" + } +} diff --git a/vendor/github.com/pkg/sftp/request-example.go b/vendor/github.com/pkg/sftp/request-example.go new file mode 100644 index 00000000..96191e0d --- /dev/null +++ b/vendor/github.com/pkg/sftp/request-example.go @@ -0,0 +1,261 @@ +package sftp + +// This serves as an example of how to implement the request server handler as +// well as a dummy backend for testing. It implements an in-memory backend that +// works as a very simple filesystem with simple flat key-value lookup system. + +import ( + "bytes" + "fmt" + "io" + "os" + "path/filepath" + "sort" + "sync" + "time" +) + +// InMemHandler returns a Hanlders object with the test handlers +func InMemHandler() Handlers { + root := &root{ + files: make(map[string]*memFile), + } + root.memFile = newMemFile("/", true) + return Handlers{root, root, root, root} +} + +// Handlers +func (fs *root) Fileread(r *Request) (io.ReaderAt, error) { + if fs.mockErr != nil { + return nil, fs.mockErr + } + fs.filesLock.Lock() + defer fs.filesLock.Unlock() + file, err := fs.fetch(r.Filepath) + if err != nil { + return nil, err + } + if file.symlink != "" { + file, err = fs.fetch(file.symlink) + if err != nil { + return nil, err + } + } + return file.ReaderAt() +} + +func (fs *root) Filewrite(r *Request) (io.WriterAt, error) { + if fs.mockErr != nil { + return nil, fs.mockErr + } + fs.filesLock.Lock() + defer fs.filesLock.Unlock() + file, err := fs.fetch(r.Filepath) + if err == os.ErrNotExist { + dir, err := fs.fetch(filepath.Dir(r.Filepath)) + if err != nil { + return nil, err + } + if !dir.isdir { + return nil, os.ErrInvalid + } + file = newMemFile(r.Filepath, false) + fs.files[r.Filepath] = file + } + return file.WriterAt() +} + +func (fs *root) Filecmd(r *Request) error { + if fs.mockErr != nil { + return fs.mockErr + } + fs.filesLock.Lock() + defer fs.filesLock.Unlock() + switch r.Method { + case "Setstat": + return nil + case "Rename": + file, err := fs.fetch(r.Filepath) + if err != nil { + return err + } + if _, ok := fs.files[r.Target]; ok { + return &os.LinkError{Op: "rename", Old: r.Filepath, New: r.Target, + Err: fmt.Errorf("dest file exists")} + } + fs.files[r.Target] = file + delete(fs.files, r.Filepath) + case "Rmdir", "Remove": + _, err := fs.fetch(filepath.Dir(r.Filepath)) + if err != nil { + return err + } + delete(fs.files, r.Filepath) + case "Mkdir": + _, err := fs.fetch(filepath.Dir(r.Filepath)) + if err != nil { + return err + } + fs.files[r.Filepath] = newMemFile(r.Filepath, true) + case "Symlink": + _, err := fs.fetch(r.Filepath) + if err != nil { + return err + } + link := newMemFile(r.Target, false) + link.symlink = r.Filepath + fs.files[r.Target] = link + } + return nil +} + +type listerat []os.FileInfo + +// Modeled after strings.Reader's ReadAt() implementation +func (f listerat) ListAt(ls []os.FileInfo, offset int64) (int, error) { + var n int + if offset >= int64(len(f)) { + return 0, io.EOF + } + n = copy(ls, f[offset:]) + if n < len(ls) { + return n, io.EOF + } + return n, nil +} + +func (fs *root) Filelist(r *Request) (ListerAt, error) { + if fs.mockErr != nil { + return nil, fs.mockErr + } + fs.filesLock.Lock() + defer fs.filesLock.Unlock() + + switch r.Method { + case "List": + ordered_names := []string{} + for fn, _ := range fs.files { + if filepath.Dir(fn) == r.Filepath { + ordered_names = append(ordered_names, fn) + } + } + sort.Strings(ordered_names) + list := make([]os.FileInfo, len(ordered_names)) + for i, fn := range ordered_names { + list[i] = fs.files[fn] + } + return listerat(list), nil + case "Stat": + file, err := fs.fetch(r.Filepath) + if err != nil { + return nil, err + } + return listerat([]os.FileInfo{file}), nil + case "Readlink": + file, err := fs.fetch(r.Filepath) + if err != nil { + return nil, err + } + if file.symlink != "" { + file, err = fs.fetch(file.symlink) + if err != nil { + return nil, err + } + } + return listerat([]os.FileInfo{file}), nil + } + return nil, nil +} + +// In memory file-system-y thing that the Hanlders live on +type root struct { + *memFile + files map[string]*memFile + filesLock sync.Mutex + mockErr error +} + +// Set a mocked error that the next handler call will return. +// Set to nil to reset for no error. +func (fs *root) returnErr(err error) { + fs.mockErr = err +} + +func (fs *root) fetch(path string) (*memFile, error) { + if path == "/" { + return fs.memFile, nil + } + if file, ok := fs.files[path]; ok { + return file, nil + } + return nil, os.ErrNotExist +} + +// Implements os.FileInfo, Reader and Writer interfaces. +// These are the 3 interfaces necessary for the Handlers. +type memFile struct { + name string + modtime time.Time + symlink string + isdir bool + content []byte + contentLock sync.RWMutex +} + +// factory to make sure modtime is set +func newMemFile(name string, isdir bool) *memFile { + return &memFile{ + name: name, + modtime: time.Now(), + isdir: isdir, + } +} + +// Have memFile fulfill os.FileInfo interface +func (f *memFile) Name() string { return filepath.Base(f.name) } +func (f *memFile) Size() int64 { return int64(len(f.content)) } +func (f *memFile) Mode() os.FileMode { + ret := os.FileMode(0644) + if f.isdir { + ret = os.FileMode(0755) | os.ModeDir + } + if f.symlink != "" { + ret = os.FileMode(0777) | os.ModeSymlink + } + return ret +} +func (f *memFile) ModTime() time.Time { return f.modtime } +func (f *memFile) IsDir() bool { return f.isdir } +func (f *memFile) Sys() interface{} { + return fakeFileInfoSys() +} + +// Read/Write +func (f *memFile) ReaderAt() (io.ReaderAt, error) { + if f.isdir { + return nil, os.ErrInvalid + } + return bytes.NewReader(f.content), nil +} + +func (f *memFile) WriterAt() (io.WriterAt, error) { + if f.isdir { + return nil, os.ErrInvalid + } + return f, nil +} +func (f *memFile) WriteAt(p []byte, off int64) (int, error) { + // fmt.Println(string(p), off) + // mimic write delays, should be optional + time.Sleep(time.Microsecond * time.Duration(len(p))) + f.contentLock.Lock() + defer f.contentLock.Unlock() + plen := len(p) + int(off) + if plen >= len(f.content) { + nc := make([]byte, plen) + copy(nc, f.content) + f.content = nc + } + copy(f.content[off:], p) + return len(p), nil +} diff --git a/vendor/github.com/pkg/sftp/request-interfaces.go b/vendor/github.com/pkg/sftp/request-interfaces.go new file mode 100644 index 00000000..44b8da10 --- /dev/null +++ b/vendor/github.com/pkg/sftp/request-interfaces.go @@ -0,0 +1,46 @@ +package sftp + +import ( + "io" + "os" +) + +// Interfaces are differentiated based on required returned values. +// All input arguments are to be pulled from Request (the only arg). + +// FileReader should return an io.ReaderAt for the filepath +// Note in cases of an error, the error text will be sent to the client. +type FileReader interface { + Fileread(*Request) (io.ReaderAt, error) +} + +// FileWriter should return an io.WriterAt for the filepath. +// +// The request server code will call Close() on the returned io.WriterAt +// ojbect if an io.Closer type assertion succeeds. +// Note in cases of an error, the error text will be sent to the client. +type FileWriter interface { + Filewrite(*Request) (io.WriterAt, error) +} + +// FileCmder should return an error +// Note in cases of an error, the error text will be sent to the client. +type FileCmder interface { + Filecmd(*Request) error +} + +// FileLister should return an object that fulfils the ListerAt interface +// Note in cases of an error, the error text will be sent to the client. +type FileLister interface { + Filelist(*Request) (ListerAt, error) +} + +// ListerAt does for file lists what io.ReaderAt does for files. +// ListAt should return the number of entries copied and an io.EOF +// error if at end of list. This is testable by comparing how many you +// copied to how many could be copied (eg. n < len(ls) below). +// The copy() builtin is best for the copying. +// Note in cases of an error, the error text will be sent to the client. +type ListerAt interface { + ListAt([]os.FileInfo, int64) (int, error) +} diff --git a/vendor/github.com/pkg/sftp/request-server.go b/vendor/github.com/pkg/sftp/request-server.go new file mode 100644 index 00000000..ec64a1ff --- /dev/null +++ b/vendor/github.com/pkg/sftp/request-server.go @@ -0,0 +1,238 @@ +package sftp + +import ( + "context" + "encoding" + "io" + "path" + "path/filepath" + "strconv" + "sync" + "syscall" + + "github.com/pkg/errors" +) + +var maxTxPacket uint32 = 1 << 15 + +// Handlers contains the 4 SFTP server request handlers. +type Handlers struct { + FileGet FileReader + FilePut FileWriter + FileCmd FileCmder + FileList FileLister +} + +// RequestServer abstracts the sftp protocol with an http request-like protocol +type RequestServer struct { + *serverConn + Handlers Handlers + pktMgr *packetManager + openRequests map[string]*Request + openRequestLock sync.RWMutex + handleCount int +} + +// NewRequestServer creates/allocates/returns new RequestServer. +// Normally there there will be one server per user-session. +func NewRequestServer(rwc io.ReadWriteCloser, h Handlers) *RequestServer { + svrConn := &serverConn{ + conn: conn{ + Reader: rwc, + WriteCloser: rwc, + }, + } + return &RequestServer{ + serverConn: svrConn, + Handlers: h, + pktMgr: newPktMgr(svrConn), + openRequests: make(map[string]*Request), + } +} + +// New Open packet/Request +func (rs *RequestServer) nextRequest(r *Request) string { + rs.openRequestLock.Lock() + defer rs.openRequestLock.Unlock() + rs.handleCount++ + handle := strconv.Itoa(rs.handleCount) + rs.openRequests[handle] = r + return handle +} + +// Returns Request from openRequests, bool is false if it is missing +// If the method is different, save/return a new Request w/ that Method. +// +// The Requests in openRequests work essentially as open file descriptors that +// you can do different things with. What you are doing with it are denoted by +// the first packet of that type (read/write/etc). We create a new Request when +// it changes to set the request.Method attribute in a thread safe way. +func (rs *RequestServer) getRequest(handle, method string) (*Request, bool) { + rs.openRequestLock.RLock() + r, ok := rs.openRequests[handle] + rs.openRequestLock.RUnlock() + if !ok || r.Method == method { + return r, ok + } + // if we make it here we need to replace the request + rs.openRequestLock.Lock() + defer rs.openRequestLock.Unlock() + r, ok = rs.openRequests[handle] + if !ok || r.Method == method { // re-check needed b/c lock race + return r, ok + } + r = r.copy() + r.Method = method + rs.openRequests[handle] = r + return r, ok +} + +func (rs *RequestServer) closeRequest(handle string) error { + rs.openRequestLock.Lock() + defer rs.openRequestLock.Unlock() + if r, ok := rs.openRequests[handle]; ok { + delete(rs.openRequests, handle) + return r.close() + } + return syscall.EBADF +} + +// Close the read/write/closer to trigger exiting the main server loop +func (rs *RequestServer) Close() error { return rs.conn.Close() } + +// Serve requests for user session +func (rs *RequestServer) Serve() error { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + var wg sync.WaitGroup + runWorker := func(ch requestChan) { + wg.Add(1) + go func() { + defer wg.Done() + if err := rs.packetWorker(ctx, ch); err != nil { + rs.conn.Close() // shuts down recvPacket + } + }() + } + pktChan := rs.pktMgr.workerChan(runWorker) + + var err error + var pkt requestPacket + var pktType uint8 + var pktBytes []byte + for { + pktType, pktBytes, err = rs.recvPacket() + if err != nil { + break + } + + pkt, err = makePacket(rxPacket{fxp(pktType), pktBytes}) + if err != nil { + debug("makePacket err: %v", err) + rs.conn.Close() // shuts down recvPacket + break + } + + pktChan <- pkt + } + + close(pktChan) // shuts down sftpServerWorkers + wg.Wait() // wait for all workers to exit + + // make sure all open requests are properly closed + // (eg. possible on dropped connections, client crashes, etc.) + for handle, req := range rs.openRequests { + delete(rs.openRequests, handle) + req.close() + } + + return err +} + +func (rs *RequestServer) packetWorker( + ctx context.Context, pktChan chan requestPacket, +) error { + for pkt := range pktChan { + var rpkt responsePacket + switch pkt := pkt.(type) { + case *sshFxInitPacket: + rpkt = sshFxVersionPacket{sftpProtocolVersion, nil} + case *sshFxpClosePacket: + handle := pkt.getHandle() + rpkt = statusFromError(pkt, rs.closeRequest(handle)) + case *sshFxpRealpathPacket: + rpkt = cleanPacketPath(pkt) + case *sshFxpOpendirPacket: + request := requestFromPacket(ctx, pkt) + handle := rs.nextRequest(request) + rpkt = sshFxpHandlePacket{pkt.id(), handle} + case *sshFxpOpenPacket: + request := requestFromPacket(ctx, pkt) + handle := rs.nextRequest(request) + rpkt = sshFxpHandlePacket{pkt.id(), handle} + if pkt.hasPflags(ssh_FXF_CREAT) { + if p := request.call(rs.Handlers, pkt); !isOk(p) { + rpkt = p // if error in write, return it + } + } + case hasHandle: + handle := pkt.getHandle() + request, ok := rs.getRequest(handle, requestMethod(pkt)) + if !ok { + rpkt = statusFromError(pkt, syscall.EBADF) + } else { + rpkt = request.call(rs.Handlers, pkt) + } + case hasPath: + request := requestFromPacket(ctx, pkt) + rpkt = request.call(rs.Handlers, pkt) + request.close() + default: + return errors.Errorf("unexpected packet type %T", pkt) + } + + err := rs.sendPacket(rpkt) + if err != nil { + return err + } + } + return nil +} + +// True is responsePacket is an OK status packet +func isOk(rpkt responsePacket) bool { + p, ok := rpkt.(sshFxpStatusPacket) + return ok && p.StatusError.Code == ssh_FX_OK +} + +// clean and return name packet for file +func cleanPacketPath(pkt *sshFxpRealpathPacket) responsePacket { + path := cleanPath(pkt.getPath()) + return &sshFxpNamePacket{ + ID: pkt.id(), + NameAttrs: []sshFxpNameAttr{{ + Name: path, + LongName: path, + Attrs: emptyFileStat, + }}, + } +} + +// Makes sure we have a clean POSIX (/) absolute path to work with +func cleanPath(p string) string { + p = filepath.ToSlash(p) + if !filepath.IsAbs(p) { + p = "/" + p + } + return path.Clean(p) +} + +// Wrap underlying connection methods to use packetManager +func (rs *RequestServer) sendPacket(m encoding.BinaryMarshaler) error { + if pkt, ok := m.(responsePacket); ok { + rs.pktMgr.readyPacket(pkt) + } else { + return errors.Errorf("unexpected packet type %T", m) + } + return nil +} diff --git a/vendor/github.com/pkg/sftp/request-unix.go b/vendor/github.com/pkg/sftp/request-unix.go new file mode 100644 index 00000000..a71a8980 --- /dev/null +++ b/vendor/github.com/pkg/sftp/request-unix.go @@ -0,0 +1,23 @@ +// +build !windows + +package sftp + +import ( + "errors" + "syscall" +) + +func fakeFileInfoSys() interface{} { + return &syscall.Stat_t{Uid: 65534, Gid: 65534} +} + +func testOsSys(sys interface{}) error { + fstat := sys.(*FileStat) + if fstat.UID != uint32(65534) { + return errors.New("Uid failed to match.") + } + if fstat.GID != uint32(65534) { + return errors.New("Gid failed to match:") + } + return nil +} diff --git a/vendor/github.com/pkg/sftp/request.go b/vendor/github.com/pkg/sftp/request.go new file mode 100644 index 00000000..d74d6fa3 --- /dev/null +++ b/vendor/github.com/pkg/sftp/request.go @@ -0,0 +1,353 @@ +package sftp + +import ( + "context" + "io" + "os" + "path" + "path/filepath" + "sync" + "syscall" + + "github.com/pkg/errors" +) + +// MaxFilelist is the max number of files to return in a readdir batch. +var MaxFilelist int64 = 100 + +// Request contains the data and state for the incoming service request. +type Request struct { + // Get, Put, Setstat, Stat, Rename, Remove + // Rmdir, Mkdir, List, Readlink, Symlink + Method string + Filepath string + Flags uint32 + Attrs []byte // convert to sub-struct + Target string // for renames and sym-links + // reader/writer/readdir from handlers + state state + // context lasts duration of request + ctx context.Context + cancelCtx context.CancelFunc +} + +type state struct { + *sync.RWMutex + writerAt io.WriterAt + readerAt io.ReaderAt + listerAt ListerAt + lsoffset int64 +} + +// New Request initialized based on packet data +func requestFromPacket(ctx context.Context, pkt hasPath) *Request { + method := requestMethod(pkt) + request := NewRequest(method, pkt.getPath()) + request.ctx, request.cancelCtx = context.WithCancel(ctx) + + switch p := pkt.(type) { + case *sshFxpOpenPacket: + request.Flags = p.Pflags + case *sshFxpSetstatPacket: + request.Flags = p.Flags + request.Attrs = p.Attrs.([]byte) + case *sshFxpRenamePacket: + request.Target = cleanPath(p.Newpath) + case *sshFxpSymlinkPacket: + request.Target = cleanPath(p.Linkpath) + } + return request +} + +// NewRequest creates a new Request object. +func NewRequest(method, path string) *Request { + return &Request{Method: method, Filepath: cleanPath(path), + state: state{RWMutex: new(sync.RWMutex)}} +} + +// shallow copy of existing request +func (r *Request) copy() *Request { + r.state.Lock() + defer r.state.Unlock() + r2 := new(Request) + *r2 = *r + return r2 +} + +// Context returns the request's context. To change the context, +// use WithContext. +// +// The returned context is always non-nil; it defaults to the +// background context. +// +// For incoming server requests, the context is canceled when the +// request is complete or the client's connection closes. +func (r *Request) Context() context.Context { + if r.ctx != nil { + return r.ctx + } + return context.Background() +} + +// WithContext returns a copy of r with its context changed to ctx. +// The provided ctx must be non-nil. +func (r *Request) WithContext(ctx context.Context) *Request { + if ctx == nil { + panic("nil context") + } + r2 := r.copy() + r2.ctx = ctx + r2.cancelCtx = nil + return r2 +} + +// Returns current offset for file list +func (r *Request) lsNext() int64 { + r.state.RLock() + defer r.state.RUnlock() + return r.state.lsoffset +} + +// Increases next offset +func (r *Request) lsInc(offset int64) { + r.state.Lock() + defer r.state.Unlock() + r.state.lsoffset = r.state.lsoffset + offset +} + +// manage file read/write state +func (r *Request) setWriterState(wa io.WriterAt) { + r.state.Lock() + defer r.state.Unlock() + r.state.writerAt = wa +} +func (r *Request) setReaderState(ra io.ReaderAt) { + r.state.Lock() + defer r.state.Unlock() + r.state.readerAt = ra +} +func (r *Request) setListerState(la ListerAt) { + r.state.Lock() + defer r.state.Unlock() + r.state.listerAt = la +} + +func (r *Request) getWriter() io.WriterAt { + r.state.RLock() + defer r.state.RUnlock() + return r.state.writerAt +} + +func (r *Request) getReader() io.ReaderAt { + r.state.RLock() + defer r.state.RUnlock() + return r.state.readerAt +} + +func (r *Request) getLister() ListerAt { + r.state.RLock() + defer r.state.RUnlock() + return r.state.listerAt +} + +// Close reader/writer if possible +func (r *Request) close() error { + if r.cancelCtx != nil { + r.cancelCtx() + } + rd := r.getReader() + if c, ok := rd.(io.Closer); ok { + return c.Close() + } + wt := r.getWriter() + if c, ok := wt.(io.Closer); ok { + return c.Close() + } + return nil +} + +// called from worker to handle packet/request +func (r *Request) call(handlers Handlers, pkt requestPacket) responsePacket { + switch r.Method { + case "Get": + return fileget(handlers.FileGet, r, pkt) + case "Put", "Open": + return fileput(handlers.FilePut, r, pkt) + case "Setstat", "Rename", "Rmdir", "Mkdir", "Symlink", "Remove": + return filecmd(handlers.FileCmd, r, pkt) + case "List", "Stat", "Readlink": + return filelist(handlers.FileList, r, pkt) + default: + return statusFromError(pkt, + errors.Errorf("unexpected method: %s", r.Method)) + } +} + +// file data for additional read/write packets +func packetData(p requestPacket) (data []byte, offset int64, length uint32) { + switch p := p.(type) { + case *sshFxpReadPacket: + length = p.Len + offset = int64(p.Offset) + case *sshFxpWritePacket: + data = p.Data + length = p.Length + offset = int64(p.Offset) + } + return +} + +// wrap FileReader handler +func fileget(h FileReader, r *Request, pkt requestPacket) responsePacket { + var err error + reader := r.getReader() + if reader == nil { + reader, err = h.Fileread(r) + if err != nil { + return statusFromError(pkt, err) + } + r.setReaderState(reader) + } + + _, offset, length := packetData(pkt) + data := make([]byte, clamp(length, maxTxPacket)) + n, err := reader.ReadAt(data, offset) + // only return EOF erro if no data left to read + if err != nil && (err != io.EOF || n == 0) { + return statusFromError(pkt, err) + } + return &sshFxpDataPacket{ + ID: pkt.id(), + Length: uint32(n), + Data: data[:n], + } +} + +// wrap FileWriter handler +func fileput(h FileWriter, r *Request, pkt requestPacket) responsePacket { + var err error + writer := r.getWriter() + if writer == nil { + writer, err = h.Filewrite(r) + if err != nil { + return statusFromError(pkt, err) + } + r.setWriterState(writer) + } + + data, offset, _ := packetData(pkt) + _, err = writer.WriteAt(data, offset) + return statusFromError(pkt, err) +} + +// wrap FileCmder handler +func filecmd(h FileCmder, r *Request, pkt requestPacket) responsePacket { + err := h.Filecmd(r) + return statusFromError(pkt, err) +} + +// wrap FileLister handler +func filelist(h FileLister, r *Request, pkt requestPacket) responsePacket { + var err error + lister := r.getLister() + if lister == nil { + lister, err = h.Filelist(r) + if err != nil { + return statusFromError(pkt, err) + } + r.setListerState(lister) + } + + offset := r.lsNext() + finfo := make([]os.FileInfo, MaxFilelist) + n, err := lister.ListAt(finfo, offset) + r.lsInc(int64(n)) + // ignore EOF as we only return it when there are no results + finfo = finfo[:n] // avoid need for nil tests below + + switch r.Method { + case "List": + if err != nil && err != io.EOF { + return statusFromError(pkt, err) + } + if n == 0 { + return statusFromError(pkt, io.EOF) + } + dirname := filepath.ToSlash(path.Base(r.Filepath)) + ret := &sshFxpNamePacket{ID: pkt.id()} + + for _, fi := range finfo { + ret.NameAttrs = append(ret.NameAttrs, sshFxpNameAttr{ + Name: fi.Name(), + LongName: runLs(dirname, fi), + Attrs: []interface{}{fi}, + }) + } + return ret + case "Stat": + if err != nil && err != io.EOF { + return statusFromError(pkt, err) + } + if n == 0 { + err = &os.PathError{Op: "stat", Path: r.Filepath, + Err: syscall.ENOENT} + return statusFromError(pkt, err) + } + return &sshFxpStatResponse{ + ID: pkt.id(), + info: finfo[0], + } + case "Readlink": + if err != nil && err != io.EOF { + return statusFromError(pkt, err) + } + if n == 0 { + err = &os.PathError{Op: "readlink", Path: r.Filepath, + Err: syscall.ENOENT} + return statusFromError(pkt, err) + } + filename := finfo[0].Name() + return &sshFxpNamePacket{ + ID: pkt.id(), + NameAttrs: []sshFxpNameAttr{{ + Name: filename, + LongName: filename, + Attrs: emptyFileStat, + }}, + } + default: + err = errors.Errorf("unexpected method: %s", r.Method) + return statusFromError(pkt, err) + } +} + +// init attributes of request object from packet data +func requestMethod(p requestPacket) (method string) { + switch p.(type) { + case *sshFxpReadPacket: + method = "Get" + case *sshFxpWritePacket: + method = "Put" + case *sshFxpReaddirPacket: + method = "List" + case *sshFxpOpenPacket, *sshFxpOpendirPacket: + method = "Open" + case *sshFxpSetstatPacket, *sshFxpFsetstatPacket: + method = "Setstat" + case *sshFxpRenamePacket: + method = "Rename" + case *sshFxpSymlinkPacket: + method = "Symlink" + case *sshFxpRemovePacket: + method = "Remove" + case *sshFxpStatPacket, *sshFxpLstatPacket, *sshFxpFstatPacket: + method = "Stat" + case *sshFxpRmdirPacket: + method = "Rmdir" + case *sshFxpReadlinkPacket: + method = "Readlink" + case *sshFxpMkdirPacket: + method = "Mkdir" + } + return method +} diff --git a/vendor/github.com/pkg/sftp/request_windows.go b/vendor/github.com/pkg/sftp/request_windows.go new file mode 100644 index 00000000..94d306b6 --- /dev/null +++ b/vendor/github.com/pkg/sftp/request_windows.go @@ -0,0 +1,11 @@ +package sftp + +import "syscall" + +func fakeFileInfoSys() interface{} { + return syscall.Win32FileAttributeData{} +} + +func testOsSys(sys interface{}) error { + return nil +} diff --git a/vendor/github.com/pkg/sftp/server.go b/vendor/github.com/pkg/sftp/server.go new file mode 100644 index 00000000..16678d1f --- /dev/null +++ b/vendor/github.com/pkg/sftp/server.go @@ -0,0 +1,675 @@ +package sftp + +// sftp server counterpart + +import ( + "encoding" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strconv" + "sync" + "syscall" + "time" + + "github.com/pkg/errors" +) + +const ( + SftpServerWorkerCount = 8 +) + +// Server is an SSH File Transfer Protocol (sftp) server. +// This is intended to provide the sftp subsystem to an ssh server daemon. +// This implementation currently supports most of sftp server protocol version 3, +// as specified at http://tools.ietf.org/html/draft-ietf-secsh-filexfer-02 +type Server struct { + *serverConn + debugStream io.Writer + readOnly bool + pktMgr *packetManager + openFiles map[string]*os.File + openFilesLock sync.RWMutex + handleCount int + maxTxPacket uint32 +} + +func (svr *Server) nextHandle(f *os.File) string { + svr.openFilesLock.Lock() + defer svr.openFilesLock.Unlock() + svr.handleCount++ + handle := strconv.Itoa(svr.handleCount) + svr.openFiles[handle] = f + return handle +} + +func (svr *Server) closeHandle(handle string) error { + svr.openFilesLock.Lock() + defer svr.openFilesLock.Unlock() + if f, ok := svr.openFiles[handle]; ok { + delete(svr.openFiles, handle) + return f.Close() + } + + return syscall.EBADF +} + +func (svr *Server) getHandle(handle string) (*os.File, bool) { + svr.openFilesLock.RLock() + defer svr.openFilesLock.RUnlock() + f, ok := svr.openFiles[handle] + return f, ok +} + +type serverRespondablePacket interface { + encoding.BinaryUnmarshaler + id() uint32 + respond(svr *Server) error +} + +// NewServer creates a new Server instance around the provided streams, serving +// content from the root of the filesystem. Optionally, ServerOption +// functions may be specified to further configure the Server. +// +// A subsequent call to Serve() is required to begin serving files over SFTP. +func NewServer(rwc io.ReadWriteCloser, options ...ServerOption) (*Server, error) { + svrConn := &serverConn{ + conn: conn{ + Reader: rwc, + WriteCloser: rwc, + }, + } + s := &Server{ + serverConn: svrConn, + debugStream: ioutil.Discard, + pktMgr: newPktMgr(svrConn), + openFiles: make(map[string]*os.File), + maxTxPacket: 1 << 15, + } + + for _, o := range options { + if err := o(s); err != nil { + return nil, err + } + } + + return s, nil +} + +// A ServerOption is a function which applies configuration to a Server. +type ServerOption func(*Server) error + +// WithDebug enables Server debugging output to the supplied io.Writer. +func WithDebug(w io.Writer) ServerOption { + return func(s *Server) error { + s.debugStream = w + return nil + } +} + +// ReadOnly configures a Server to serve files in read-only mode. +func ReadOnly() ServerOption { + return func(s *Server) error { + s.readOnly = true + return nil + } +} + +type rxPacket struct { + pktType fxp + pktBytes []byte +} + +// Up to N parallel servers +func (svr *Server) sftpServerWorker(pktChan chan requestPacket) error { + for pkt := range pktChan { + + // readonly checks + readonly := true + switch pkt := pkt.(type) { + case notReadOnly: + readonly = false + case *sshFxpOpenPacket: + readonly = pkt.readonly() + case *sshFxpExtendedPacket: + readonly = pkt.SpecificPacket.readonly() + } + + // If server is operating read-only and a write operation is requested, + // return permission denied + if !readonly && svr.readOnly { + if err := svr.sendError(pkt, syscall.EPERM); err != nil { + return errors.Wrap(err, "failed to send read only packet response") + } + continue + } + + if err := handlePacket(svr, pkt); err != nil { + return err + } + } + return nil +} + +func handlePacket(s *Server, p interface{}) error { + switch p := p.(type) { + case *sshFxInitPacket: + return s.sendPacket(sshFxVersionPacket{sftpProtocolVersion, nil}) + case *sshFxpStatPacket: + // stat the requested file + info, err := os.Stat(p.Path) + if err != nil { + return s.sendError(p, err) + } + return s.sendPacket(sshFxpStatResponse{ + ID: p.ID, + info: info, + }) + case *sshFxpLstatPacket: + // stat the requested file + info, err := os.Lstat(p.Path) + if err != nil { + return s.sendError(p, err) + } + return s.sendPacket(sshFxpStatResponse{ + ID: p.ID, + info: info, + }) + case *sshFxpFstatPacket: + f, ok := s.getHandle(p.Handle) + if !ok { + return s.sendError(p, syscall.EBADF) + } + + info, err := f.Stat() + if err != nil { + return s.sendError(p, err) + } + + return s.sendPacket(sshFxpStatResponse{ + ID: p.ID, + info: info, + }) + case *sshFxpMkdirPacket: + // TODO FIXME: ignore flags field + err := os.Mkdir(p.Path, 0755) + return s.sendError(p, err) + case *sshFxpRmdirPacket: + err := os.Remove(p.Path) + return s.sendError(p, err) + case *sshFxpRemovePacket: + err := os.Remove(p.Filename) + return s.sendError(p, err) + case *sshFxpRenamePacket: + err := os.Rename(p.Oldpath, p.Newpath) + return s.sendError(p, err) + case *sshFxpSymlinkPacket: + err := os.Symlink(p.Targetpath, p.Linkpath) + return s.sendError(p, err) + case *sshFxpClosePacket: + return s.sendError(p, s.closeHandle(p.Handle)) + case *sshFxpReadlinkPacket: + f, err := os.Readlink(p.Path) + if err != nil { + return s.sendError(p, err) + } + + return s.sendPacket(sshFxpNamePacket{ + ID: p.ID, + NameAttrs: []sshFxpNameAttr{{ + Name: f, + LongName: f, + Attrs: emptyFileStat, + }}, + }) + + case *sshFxpRealpathPacket: + f, err := filepath.Abs(p.Path) + if err != nil { + return s.sendError(p, err) + } + f = cleanPath(f) + return s.sendPacket(sshFxpNamePacket{ + ID: p.ID, + NameAttrs: []sshFxpNameAttr{{ + Name: f, + LongName: f, + Attrs: emptyFileStat, + }}, + }) + case *sshFxpOpendirPacket: + return sshFxpOpenPacket{ + ID: p.ID, + Path: p.Path, + Pflags: ssh_FXF_READ, + }.respond(s) + case *sshFxpReadPacket: + f, ok := s.getHandle(p.Handle) + if !ok { + return s.sendError(p, syscall.EBADF) + } + + data := make([]byte, clamp(p.Len, s.maxTxPacket)) + n, err := f.ReadAt(data, int64(p.Offset)) + if err != nil && (err != io.EOF || n == 0) { + return s.sendError(p, err) + } + return s.sendPacket(sshFxpDataPacket{ + ID: p.ID, + Length: uint32(n), + Data: data[:n], + }) + case *sshFxpWritePacket: + f, ok := s.getHandle(p.Handle) + if !ok { + return s.sendError(p, syscall.EBADF) + } + + _, err := f.WriteAt(p.Data, int64(p.Offset)) + return s.sendError(p, err) + case serverRespondablePacket: + err := p.respond(s) + return errors.Wrap(err, "pkt.respond failed") + default: + return errors.Errorf("unexpected packet type %T", p) + } +} + +// Serve serves SFTP connections until the streams stop or the SFTP subsystem +// is stopped. +func (svr *Server) Serve() error { + var wg sync.WaitGroup + runWorker := func(ch requestChan) { + wg.Add(1) + go func() { + defer wg.Done() + if err := svr.sftpServerWorker(ch); err != nil { + svr.conn.Close() // shuts down recvPacket + } + }() + } + pktChan := svr.pktMgr.workerChan(runWorker) + + var err error + var pkt requestPacket + var pktType uint8 + var pktBytes []byte + for { + pktType, pktBytes, err = svr.recvPacket() + if err != nil { + break + } + + pkt, err = makePacket(rxPacket{fxp(pktType), pktBytes}) + if err != nil { + debug("makePacket err: %v", err) + svr.conn.Close() // shuts down recvPacket + break + } + + pktChan <- pkt + } + + close(pktChan) // shuts down sftpServerWorkers + wg.Wait() // wait for all workers to exit + + // close any still-open files + for handle, file := range svr.openFiles { + fmt.Fprintf(svr.debugStream, "sftp server file with handle %q left open: %v\n", handle, file.Name()) + file.Close() + } + return err // error from recvPacket +} + +// Wrap underlying connection methods to use packetManager +func (svr *Server) sendPacket(m encoding.BinaryMarshaler) error { + if pkt, ok := m.(responsePacket); ok { + svr.pktMgr.readyPacket(pkt) + } else { + return errors.Errorf("unexpected packet type %T", m) + } + return nil +} + +func (svr *Server) sendError(p ider, err error) error { + return svr.sendPacket(statusFromError(p, err)) +} + +type ider interface { + id() uint32 +} + +// The init packet has no ID, so we just return a zero-value ID +func (p sshFxInitPacket) id() uint32 { return 0 } + +type sshFxpStatResponse struct { + ID uint32 + info os.FileInfo +} + +func (p sshFxpStatResponse) MarshalBinary() ([]byte, error) { + b := []byte{ssh_FXP_ATTRS} + b = marshalUint32(b, p.ID) + b = marshalFileInfo(b, p.info) + return b, nil +} + +var emptyFileStat = []interface{}{uint32(0)} + +func (p sshFxpOpenPacket) readonly() bool { + return !p.hasPflags(ssh_FXF_WRITE) +} + +func (p sshFxpOpenPacket) hasPflags(flags ...uint32) bool { + for _, f := range flags { + if p.Pflags&f == 0 { + return false + } + } + return true +} + +func (p sshFxpOpenPacket) respond(svr *Server) error { + var osFlags int + if p.hasPflags(ssh_FXF_READ, ssh_FXF_WRITE) { + osFlags |= os.O_RDWR + } else if p.hasPflags(ssh_FXF_WRITE) { + osFlags |= os.O_WRONLY + } else if p.hasPflags(ssh_FXF_READ) { + osFlags |= os.O_RDONLY + } else { + // how are they opening? + return svr.sendError(p, syscall.EINVAL) + } + + if p.hasPflags(ssh_FXF_APPEND) { + osFlags |= os.O_APPEND + } + if p.hasPflags(ssh_FXF_CREAT) { + osFlags |= os.O_CREATE + } + if p.hasPflags(ssh_FXF_TRUNC) { + osFlags |= os.O_TRUNC + } + if p.hasPflags(ssh_FXF_EXCL) { + osFlags |= os.O_EXCL + } + + f, err := os.OpenFile(p.Path, osFlags, 0644) + if err != nil { + return svr.sendError(p, err) + } + + handle := svr.nextHandle(f) + return svr.sendPacket(sshFxpHandlePacket{p.ID, handle}) +} + +func (p sshFxpReaddirPacket) respond(svr *Server) error { + f, ok := svr.getHandle(p.Handle) + if !ok { + return svr.sendError(p, syscall.EBADF) + } + + dirname := f.Name() + dirents, err := f.Readdir(128) + if err != nil { + return svr.sendError(p, err) + } + + ret := sshFxpNamePacket{ID: p.ID} + for _, dirent := range dirents { + ret.NameAttrs = append(ret.NameAttrs, sshFxpNameAttr{ + Name: dirent.Name(), + LongName: runLs(dirname, dirent), + Attrs: []interface{}{dirent}, + }) + } + return svr.sendPacket(ret) +} + +func (p sshFxpSetstatPacket) respond(svr *Server) error { + // additional unmarshalling is required for each possibility here + b := p.Attrs.([]byte) + var err error + + debug("setstat name \"%s\"", p.Path) + if (p.Flags & ssh_FILEXFER_ATTR_SIZE) != 0 { + var size uint64 + if size, b, err = unmarshalUint64Safe(b); err == nil { + err = os.Truncate(p.Path, int64(size)) + } + } + if (p.Flags & ssh_FILEXFER_ATTR_PERMISSIONS) != 0 { + var mode uint32 + if mode, b, err = unmarshalUint32Safe(b); err == nil { + err = os.Chmod(p.Path, os.FileMode(mode)) + } + } + if (p.Flags & ssh_FILEXFER_ATTR_ACMODTIME) != 0 { + var atime uint32 + var mtime uint32 + if atime, b, err = unmarshalUint32Safe(b); err != nil { + } else if mtime, b, err = unmarshalUint32Safe(b); err != nil { + } else { + atimeT := time.Unix(int64(atime), 0) + mtimeT := time.Unix(int64(mtime), 0) + err = os.Chtimes(p.Path, atimeT, mtimeT) + } + } + if (p.Flags & ssh_FILEXFER_ATTR_UIDGID) != 0 { + var uid uint32 + var gid uint32 + if uid, b, err = unmarshalUint32Safe(b); err != nil { + } else if gid, _, err = unmarshalUint32Safe(b); err != nil { + } else { + err = os.Chown(p.Path, int(uid), int(gid)) + } + } + + return svr.sendError(p, err) +} + +func (p sshFxpFsetstatPacket) respond(svr *Server) error { + f, ok := svr.getHandle(p.Handle) + if !ok { + return svr.sendError(p, syscall.EBADF) + } + + // additional unmarshalling is required for each possibility here + b := p.Attrs.([]byte) + var err error + + debug("fsetstat name \"%s\"", f.Name()) + if (p.Flags & ssh_FILEXFER_ATTR_SIZE) != 0 { + var size uint64 + if size, b, err = unmarshalUint64Safe(b); err == nil { + err = f.Truncate(int64(size)) + } + } + if (p.Flags & ssh_FILEXFER_ATTR_PERMISSIONS) != 0 { + var mode uint32 + if mode, b, err = unmarshalUint32Safe(b); err == nil { + err = f.Chmod(os.FileMode(mode)) + } + } + if (p.Flags & ssh_FILEXFER_ATTR_ACMODTIME) != 0 { + var atime uint32 + var mtime uint32 + if atime, b, err = unmarshalUint32Safe(b); err != nil { + } else if mtime, b, err = unmarshalUint32Safe(b); err != nil { + } else { + atimeT := time.Unix(int64(atime), 0) + mtimeT := time.Unix(int64(mtime), 0) + err = os.Chtimes(f.Name(), atimeT, mtimeT) + } + } + if (p.Flags & ssh_FILEXFER_ATTR_UIDGID) != 0 { + var uid uint32 + var gid uint32 + if uid, b, err = unmarshalUint32Safe(b); err != nil { + } else if gid, _, err = unmarshalUint32Safe(b); err != nil { + } else { + err = f.Chown(int(uid), int(gid)) + } + } + + return svr.sendError(p, err) +} + +// translateErrno translates a syscall error number to a SFTP error code. +func translateErrno(errno syscall.Errno) uint32 { + switch errno { + case 0: + return ssh_FX_OK + case syscall.ENOENT: + return ssh_FX_NO_SUCH_FILE + case syscall.EPERM: + return ssh_FX_PERMISSION_DENIED + } + + return ssh_FX_FAILURE +} + +func statusFromError(p ider, err error) sshFxpStatusPacket { + ret := sshFxpStatusPacket{ + ID: p.id(), + StatusError: StatusError{ + // ssh_FX_OK = 0 + // ssh_FX_EOF = 1 + // ssh_FX_NO_SUCH_FILE = 2 ENOENT + // ssh_FX_PERMISSION_DENIED = 3 + // ssh_FX_FAILURE = 4 + // ssh_FX_BAD_MESSAGE = 5 + // ssh_FX_NO_CONNECTION = 6 + // ssh_FX_CONNECTION_LOST = 7 + // ssh_FX_OP_UNSUPPORTED = 8 + Code: ssh_FX_OK, + }, + } + if err == nil { + return ret + } + + debug("statusFromError: error is %T %#v", err, err) + ret.StatusError.Code = ssh_FX_FAILURE + ret.StatusError.msg = err.Error() + + switch e := err.(type) { + case syscall.Errno: + ret.StatusError.Code = translateErrno(e) + case *os.PathError: + debug("statusFromError,pathError: error is %T %#v", e.Err, e.Err) + if errno, ok := e.Err.(syscall.Errno); ok { + ret.StatusError.Code = translateErrno(errno) + } + case fxerr: + ret.StatusError.Code = uint32(e) + default: + switch e { + case io.EOF: + ret.StatusError.Code = ssh_FX_EOF + case os.ErrNotExist: + ret.StatusError.Code = ssh_FX_NO_SUCH_FILE + } + } + + return ret +} + +func clamp(v, max uint32) uint32 { + if v > max { + return max + } + return v +} + +func runLsTypeWord(dirent os.FileInfo) string { + // find first character, the type char + // b Block special file. + // c Character special file. + // d Directory. + // l Symbolic link. + // s Socket link. + // p FIFO. + // - Regular file. + tc := '-' + mode := dirent.Mode() + if (mode & os.ModeDir) != 0 { + tc = 'd' + } else if (mode & os.ModeDevice) != 0 { + tc = 'b' + if (mode & os.ModeCharDevice) != 0 { + tc = 'c' + } + } else if (mode & os.ModeSymlink) != 0 { + tc = 'l' + } else if (mode & os.ModeSocket) != 0 { + tc = 's' + } else if (mode & os.ModeNamedPipe) != 0 { + tc = 'p' + } + + // owner + orc := '-' + if (mode & 0400) != 0 { + orc = 'r' + } + owc := '-' + if (mode & 0200) != 0 { + owc = 'w' + } + oxc := '-' + ox := (mode & 0100) != 0 + setuid := (mode & os.ModeSetuid) != 0 + if ox && setuid { + oxc = 's' + } else if setuid { + oxc = 'S' + } else if ox { + oxc = 'x' + } + + // group + grc := '-' + if (mode & 040) != 0 { + grc = 'r' + } + gwc := '-' + if (mode & 020) != 0 { + gwc = 'w' + } + gxc := '-' + gx := (mode & 010) != 0 + setgid := (mode & os.ModeSetgid) != 0 + if gx && setgid { + gxc = 's' + } else if setgid { + gxc = 'S' + } else if gx { + gxc = 'x' + } + + // all / others + arc := '-' + if (mode & 04) != 0 { + arc = 'r' + } + awc := '-' + if (mode & 02) != 0 { + awc = 'w' + } + axc := '-' + ax := (mode & 01) != 0 + sticky := (mode & os.ModeSticky) != 0 + if ax && sticky { + axc = 't' + } else if sticky { + axc = 'T' + } else if ax { + axc = 'x' + } + + return fmt.Sprintf("%c%c%c%c%c%c%c%c%c%c", tc, orc, owc, oxc, grc, gwc, gxc, arc, awc, axc) +} diff --git a/vendor/github.com/pkg/sftp/server_standalone/main.go b/vendor/github.com/pkg/sftp/server_standalone/main.go new file mode 100644 index 00000000..0b8e102a --- /dev/null +++ b/vendor/github.com/pkg/sftp/server_standalone/main.go @@ -0,0 +1,52 @@ +package main + +// small wrapper around sftp server that allows it to be used as a separate process subsystem call by the ssh server. +// in practice this will statically link; however this allows unit testing from the sftp client. + +import ( + "flag" + "fmt" + "io" + "io/ioutil" + "os" + + "github.com/pkg/sftp" +) + +func main() { + var ( + readOnly bool + debugStderr bool + debugLevel string + options []sftp.ServerOption + ) + + flag.BoolVar(&readOnly, "R", false, "read-only server") + flag.BoolVar(&debugStderr, "e", false, "debug to stderr") + flag.StringVar(&debugLevel, "l", "none", "debug level (ignored)") + flag.Parse() + + debugStream := ioutil.Discard + if debugStderr { + debugStream = os.Stderr + } + options = append(options, sftp.WithDebug(debugStream)) + + if readOnly { + options = append(options, sftp.ReadOnly()) + } + + svr, _ := sftp.NewServer( + struct { + io.Reader + io.WriteCloser + }{os.Stdin, + os.Stdout, + }, + options..., + ) + if err := svr.Serve(); err != nil { + fmt.Fprintf(debugStream, "sftp server completed with error: %v", err) + os.Exit(1) + } +} diff --git a/vendor/github.com/pkg/sftp/server_statvfs_darwin.go b/vendor/github.com/pkg/sftp/server_statvfs_darwin.go new file mode 100644 index 00000000..8c01dac5 --- /dev/null +++ b/vendor/github.com/pkg/sftp/server_statvfs_darwin.go @@ -0,0 +1,21 @@ +package sftp + +import ( + "syscall" +) + +func statvfsFromStatfst(stat *syscall.Statfs_t) (*StatVFS, error) { + return &StatVFS{ + Bsize: uint64(stat.Bsize), + Frsize: uint64(stat.Bsize), // fragment size is a linux thing; use block size here + Blocks: stat.Blocks, + Bfree: stat.Bfree, + Bavail: stat.Bavail, + Files: stat.Files, + Ffree: stat.Ffree, + Favail: stat.Ffree, // not sure how to calculate Favail + Fsid: uint64(uint64(stat.Fsid.Val[1])<<32 | uint64(stat.Fsid.Val[0])), // endianness? + Flag: uint64(stat.Flags), // assuming POSIX? + Namemax: 1024, // man 2 statfs shows: #define MAXPATHLEN 1024 + }, nil +} diff --git a/vendor/github.com/pkg/sftp/server_statvfs_impl.go b/vendor/github.com/pkg/sftp/server_statvfs_impl.go new file mode 100644 index 00000000..c37a34ac --- /dev/null +++ b/vendor/github.com/pkg/sftp/server_statvfs_impl.go @@ -0,0 +1,25 @@ +// +build darwin linux + +// fill in statvfs structure with OS specific values +// Statfs_t is different per-kernel, and only exists on some unixes (not Solaris for instance) + +package sftp + +import ( + "syscall" +) + +func (p sshFxpExtendedPacketStatVFS) respond(svr *Server) error { + stat := &syscall.Statfs_t{} + if err := syscall.Statfs(p.Path, stat); err != nil { + return svr.sendPacket(statusFromError(p, err)) + } + + retPkt, err := statvfsFromStatfst(stat) + if err != nil { + return svr.sendPacket(statusFromError(p, err)) + } + retPkt.ID = p.ID + + return svr.sendPacket(retPkt) +} diff --git a/vendor/github.com/pkg/sftp/server_statvfs_linux.go b/vendor/github.com/pkg/sftp/server_statvfs_linux.go new file mode 100644 index 00000000..1d180d47 --- /dev/null +++ b/vendor/github.com/pkg/sftp/server_statvfs_linux.go @@ -0,0 +1,22 @@ +// +build linux + +package sftp + +import ( + "syscall" +) + +func statvfsFromStatfst(stat *syscall.Statfs_t) (*StatVFS, error) { + return &StatVFS{ + Bsize: uint64(stat.Bsize), + Frsize: uint64(stat.Frsize), + Blocks: stat.Blocks, + Bfree: stat.Bfree, + Bavail: stat.Bavail, + Files: stat.Files, + Ffree: stat.Ffree, + Favail: stat.Ffree, // not sure how to calculate Favail + Flag: uint64(stat.Flags), // assuming POSIX? + Namemax: uint64(stat.Namelen), + }, nil +} diff --git a/vendor/github.com/pkg/sftp/server_statvfs_stubs.go b/vendor/github.com/pkg/sftp/server_statvfs_stubs.go new file mode 100644 index 00000000..3fe40788 --- /dev/null +++ b/vendor/github.com/pkg/sftp/server_statvfs_stubs.go @@ -0,0 +1,11 @@ +// +build !darwin,!linux + +package sftp + +import ( + "syscall" +) + +func (p sshFxpExtendedPacketStatVFS) respond(svr *Server) error { + return syscall.ENOTSUP +} diff --git a/vendor/github.com/pkg/sftp/server_stubs.go b/vendor/github.com/pkg/sftp/server_stubs.go new file mode 100644 index 00000000..a14c7348 --- /dev/null +++ b/vendor/github.com/pkg/sftp/server_stubs.go @@ -0,0 +1,32 @@ +// +build !cgo,!plan9 windows android + +package sftp + +import ( + "os" + "time" + "fmt" +) + +func runLs(dirname string, dirent os.FileInfo) string { + typeword := runLsTypeWord(dirent) + numLinks := 1 + if dirent.IsDir() { + numLinks = 0 + } + username := "root" + groupname := "root" + mtime := dirent.ModTime() + monthStr := mtime.Month().String()[0:3] + day := mtime.Day() + year := mtime.Year() + now := time.Now() + isOld := mtime.Before(now.Add(-time.Hour * 24 * 365 / 2)) + + yearOrTime := fmt.Sprintf("%02d:%02d", mtime.Hour(), mtime.Minute()) + if isOld { + yearOrTime = fmt.Sprintf("%d", year) + } + + return fmt.Sprintf("%s %4d %-8s %-8s %8d %s %2d %5s %s", typeword, numLinks, username, groupname, dirent.Size(), monthStr, day, yearOrTime, dirent.Name()) +} diff --git a/vendor/github.com/pkg/sftp/server_unix.go b/vendor/github.com/pkg/sftp/server_unix.go new file mode 100644 index 00000000..8d0c138c --- /dev/null +++ b/vendor/github.com/pkg/sftp/server_unix.go @@ -0,0 +1,54 @@ +// +build darwin dragonfly freebsd !android,linux netbsd openbsd solaris +// +build cgo + +package sftp + +import ( + "fmt" + "os" + "path" + "syscall" + "time" +) + +func runLsStatt(dirent os.FileInfo, statt *syscall.Stat_t) string { + // example from openssh sftp server: + // crw-rw-rw- 1 root wheel 0 Jul 31 20:52 ttyvd + // format: + // {directory / char device / etc}{rwxrwxrwx} {number of links} owner group size month day [time (this year) | year (otherwise)] name + + typeword := runLsTypeWord(dirent) + numLinks := statt.Nlink + uid := statt.Uid + gid := statt.Gid + username := fmt.Sprintf("%d", uid) + groupname := fmt.Sprintf("%d", gid) + // TODO FIXME: uid -> username, gid -> groupname lookup for ls -l format output + + mtime := dirent.ModTime() + monthStr := mtime.Month().String()[0:3] + day := mtime.Day() + year := mtime.Year() + now := time.Now() + isOld := mtime.Before(now.Add(-time.Hour * 24 * 365 / 2)) + + yearOrTime := fmt.Sprintf("%02d:%02d", mtime.Hour(), mtime.Minute()) + if isOld { + yearOrTime = fmt.Sprintf("%d", year) + } + + return fmt.Sprintf("%s %4d %-8s %-8s %8d %s %2d %5s %s", typeword, numLinks, username, groupname, dirent.Size(), monthStr, day, yearOrTime, dirent.Name()) +} + +// ls -l style output for a file, which is in the 'long output' section of a readdir response packet +// this is a very simple (lazy) implementation, just enough to look almost like openssh in a few basic cases +func runLs(dirname string, dirent os.FileInfo) string { + dsys := dirent.Sys() + if dsys == nil { + } else if statt, ok := dsys.(*syscall.Stat_t); !ok { + } else { + return runLsStatt(dirent, statt) + } + + return path.Join(dirname, dirent.Name()) +} diff --git a/vendor/github.com/pkg/sftp/sftp.go b/vendor/github.com/pkg/sftp/sftp.go new file mode 100644 index 00000000..3cdb14df --- /dev/null +++ b/vendor/github.com/pkg/sftp/sftp.go @@ -0,0 +1,217 @@ +// Package sftp implements the SSH File Transfer Protocol as described in +// https://tools.ietf.org/html/draft-ietf-secsh-filexfer-02 +package sftp + +import ( + "fmt" + + "github.com/pkg/errors" +) + +const ( + ssh_FXP_INIT = 1 + ssh_FXP_VERSION = 2 + ssh_FXP_OPEN = 3 + ssh_FXP_CLOSE = 4 + ssh_FXP_READ = 5 + ssh_FXP_WRITE = 6 + ssh_FXP_LSTAT = 7 + ssh_FXP_FSTAT = 8 + ssh_FXP_SETSTAT = 9 + ssh_FXP_FSETSTAT = 10 + ssh_FXP_OPENDIR = 11 + ssh_FXP_READDIR = 12 + ssh_FXP_REMOVE = 13 + ssh_FXP_MKDIR = 14 + ssh_FXP_RMDIR = 15 + ssh_FXP_REALPATH = 16 + ssh_FXP_STAT = 17 + ssh_FXP_RENAME = 18 + ssh_FXP_READLINK = 19 + ssh_FXP_SYMLINK = 20 + ssh_FXP_STATUS = 101 + ssh_FXP_HANDLE = 102 + ssh_FXP_DATA = 103 + ssh_FXP_NAME = 104 + ssh_FXP_ATTRS = 105 + ssh_FXP_EXTENDED = 200 + ssh_FXP_EXTENDED_REPLY = 201 +) + +const ( + ssh_FX_OK = 0 + ssh_FX_EOF = 1 + ssh_FX_NO_SUCH_FILE = 2 + ssh_FX_PERMISSION_DENIED = 3 + ssh_FX_FAILURE = 4 + ssh_FX_BAD_MESSAGE = 5 + ssh_FX_NO_CONNECTION = 6 + ssh_FX_CONNECTION_LOST = 7 + ssh_FX_OP_UNSUPPORTED = 8 + + // see draft-ietf-secsh-filexfer-13 + // https://tools.ietf.org/html/draft-ietf-secsh-filexfer-13#section-9.1 + ssh_FX_INVALID_HANDLE = 9 + ssh_FX_NO_SUCH_PATH = 10 + ssh_FX_FILE_ALREADY_EXISTS = 11 + ssh_FX_WRITE_PROTECT = 12 + ssh_FX_NO_MEDIA = 13 + ssh_FX_NO_SPACE_ON_FILESYSTEM = 14 + ssh_FX_QUOTA_EXCEEDED = 15 + ssh_FX_UNKNOWN_PRINCIPAL = 16 + ssh_FX_LOCK_CONFLICT = 17 + ssh_FX_DIR_NOT_EMPTY = 18 + ssh_FX_NOT_A_DIRECTORY = 19 + ssh_FX_INVALID_FILENAME = 20 + ssh_FX_LINK_LOOP = 21 + ssh_FX_CANNOT_DELETE = 22 + ssh_FX_INVALID_PARAMETER = 23 + ssh_FX_FILE_IS_A_DIRECTORY = 24 + ssh_FX_BYTE_RANGE_LOCK_CONFLICT = 25 + ssh_FX_BYTE_RANGE_LOCK_REFUSED = 26 + ssh_FX_DELETE_PENDING = 27 + ssh_FX_FILE_CORRUPT = 28 + ssh_FX_OWNER_INVALID = 29 + ssh_FX_GROUP_INVALID = 30 + ssh_FX_NO_MATCHING_BYTE_RANGE_LOCK = 31 +) + +const ( + ssh_FXF_READ = 0x00000001 + ssh_FXF_WRITE = 0x00000002 + ssh_FXF_APPEND = 0x00000004 + ssh_FXF_CREAT = 0x00000008 + ssh_FXF_TRUNC = 0x00000010 + ssh_FXF_EXCL = 0x00000020 +) + +type fxp uint8 + +func (f fxp) String() string { + switch f { + case ssh_FXP_INIT: + return "SSH_FXP_INIT" + case ssh_FXP_VERSION: + return "SSH_FXP_VERSION" + case ssh_FXP_OPEN: + return "SSH_FXP_OPEN" + case ssh_FXP_CLOSE: + return "SSH_FXP_CLOSE" + case ssh_FXP_READ: + return "SSH_FXP_READ" + case ssh_FXP_WRITE: + return "SSH_FXP_WRITE" + case ssh_FXP_LSTAT: + return "SSH_FXP_LSTAT" + case ssh_FXP_FSTAT: + return "SSH_FXP_FSTAT" + case ssh_FXP_SETSTAT: + return "SSH_FXP_SETSTAT" + case ssh_FXP_FSETSTAT: + return "SSH_FXP_FSETSTAT" + case ssh_FXP_OPENDIR: + return "SSH_FXP_OPENDIR" + case ssh_FXP_READDIR: + return "SSH_FXP_READDIR" + case ssh_FXP_REMOVE: + return "SSH_FXP_REMOVE" + case ssh_FXP_MKDIR: + return "SSH_FXP_MKDIR" + case ssh_FXP_RMDIR: + return "SSH_FXP_RMDIR" + case ssh_FXP_REALPATH: + return "SSH_FXP_REALPATH" + case ssh_FXP_STAT: + return "SSH_FXP_STAT" + case ssh_FXP_RENAME: + return "SSH_FXP_RENAME" + case ssh_FXP_READLINK: + return "SSH_FXP_READLINK" + case ssh_FXP_SYMLINK: + return "SSH_FXP_SYMLINK" + case ssh_FXP_STATUS: + return "SSH_FXP_STATUS" + case ssh_FXP_HANDLE: + return "SSH_FXP_HANDLE" + case ssh_FXP_DATA: + return "SSH_FXP_DATA" + case ssh_FXP_NAME: + return "SSH_FXP_NAME" + case ssh_FXP_ATTRS: + return "SSH_FXP_ATTRS" + case ssh_FXP_EXTENDED: + return "SSH_FXP_EXTENDED" + case ssh_FXP_EXTENDED_REPLY: + return "SSH_FXP_EXTENDED_REPLY" + default: + return "unknown" + } +} + +type fx uint8 + +func (f fx) String() string { + switch f { + case ssh_FX_OK: + return "SSH_FX_OK" + case ssh_FX_EOF: + return "SSH_FX_EOF" + case ssh_FX_NO_SUCH_FILE: + return "SSH_FX_NO_SUCH_FILE" + case ssh_FX_PERMISSION_DENIED: + return "SSH_FX_PERMISSION_DENIED" + case ssh_FX_FAILURE: + return "SSH_FX_FAILURE" + case ssh_FX_BAD_MESSAGE: + return "SSH_FX_BAD_MESSAGE" + case ssh_FX_NO_CONNECTION: + return "SSH_FX_NO_CONNECTION" + case ssh_FX_CONNECTION_LOST: + return "SSH_FX_CONNECTION_LOST" + case ssh_FX_OP_UNSUPPORTED: + return "SSH_FX_OP_UNSUPPORTED" + default: + return "unknown" + } +} + +type unexpectedPacketErr struct { + want, got uint8 +} + +func (u *unexpectedPacketErr) Error() string { + return fmt.Sprintf("sftp: unexpected packet: want %v, got %v", fxp(u.want), fxp(u.got)) +} + +func unimplementedPacketErr(u uint8) error { + return errors.Errorf("sftp: unimplemented packet type: got %v", fxp(u)) +} + +type unexpectedIDErr struct{ want, got uint32 } + +func (u *unexpectedIDErr) Error() string { + return fmt.Sprintf("sftp: unexpected id: want %v, got %v", u.want, u.got) +} + +func unimplementedSeekWhence(whence int) error { + return errors.Errorf("sftp: unimplemented seek whence %v", whence) +} + +func unexpectedCount(want, got uint32) error { + return errors.Errorf("sftp: unexpected count: want %v, got %v", want, got) +} + +type unexpectedVersionErr struct{ want, got uint32 } + +func (u *unexpectedVersionErr) Error() string { + return fmt.Sprintf("sftp: unexpected server version: want %v, got %v", u.want, u.got) +} + +// A StatusError is returned when an SFTP operation fails, and provides +// additional information about the failure. +type StatusError struct { + Code uint32 + msg, lang string +} + +func (s *StatusError) Error() string { return fmt.Sprintf("sftp: %q (%v)", s.msg, fx(s.Code)) } diff --git a/vendor/github.com/spf13/afero/LICENSE.txt b/vendor/github.com/spf13/afero/LICENSE.txt new file mode 100644 index 00000000..298f0e26 --- /dev/null +++ b/vendor/github.com/spf13/afero/LICENSE.txt @@ -0,0 +1,174 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. diff --git a/vendor/github.com/spf13/afero/afero.go b/vendor/github.com/spf13/afero/afero.go new file mode 100644 index 00000000..f5b5e127 --- /dev/null +++ b/vendor/github.com/spf13/afero/afero.go @@ -0,0 +1,108 @@ +// Copyright © 2014 Steve Francia . +// Copyright 2013 tsuru authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package afero provides types and methods for interacting with the filesystem, +// as an abstraction layer. + +// Afero also provides a few implementations that are mostly interoperable. One that +// uses the operating system filesystem, one that uses memory to store files +// (cross platform) and an interface that should be implemented if you want to +// provide your own filesystem. + +package afero + +import ( + "errors" + "io" + "os" + "time" +) + +type Afero struct { + Fs +} + +// File represents a file in the filesystem. +type File interface { + io.Closer + io.Reader + io.ReaderAt + io.Seeker + io.Writer + io.WriterAt + + Name() string + Readdir(count int) ([]os.FileInfo, error) + Readdirnames(n int) ([]string, error) + Stat() (os.FileInfo, error) + Sync() error + Truncate(size int64) error + WriteString(s string) (ret int, err error) +} + +// Fs is the filesystem interface. +// +// Any simulated or real filesystem should implement this interface. +type Fs interface { + // Create creates a file in the filesystem, returning the file and an + // error, if any happens. + Create(name string) (File, error) + + // Mkdir creates a directory in the filesystem, return an error if any + // happens. + Mkdir(name string, perm os.FileMode) error + + // MkdirAll creates a directory path and all parents that does not exist + // yet. + MkdirAll(path string, perm os.FileMode) error + + // Open opens a file, returning it or an error, if any happens. + Open(name string) (File, error) + + // OpenFile opens a file using the given flags and the given mode. + OpenFile(name string, flag int, perm os.FileMode) (File, error) + + // Remove removes a file identified by name, returning an error, if any + // happens. + Remove(name string) error + + // RemoveAll removes a directory path and any children it contains. It + // does not fail if the path does not exist (return nil). + RemoveAll(path string) error + + // Rename renames a file. + Rename(oldname, newname string) error + + // Stat returns a FileInfo describing the named file, or an error, if any + // happens. + Stat(name string) (os.FileInfo, error) + + // The name of this FileSystem + Name() string + + //Chmod changes the mode of the named file to mode. + Chmod(name string, mode os.FileMode) error + + //Chtimes changes the access and modification times of the named file + Chtimes(name string, atime time.Time, mtime time.Time) error +} + +var ( + ErrFileClosed = errors.New("File is closed") + ErrOutOfRange = errors.New("Out of range") + ErrTooLarge = errors.New("Too large") + ErrFileNotFound = os.ErrNotExist + ErrFileExists = os.ErrExist + ErrDestinationExists = os.ErrExist +) diff --git a/vendor/github.com/spf13/afero/basepath.go b/vendor/github.com/spf13/afero/basepath.go new file mode 100644 index 00000000..5e4fc2ec --- /dev/null +++ b/vendor/github.com/spf13/afero/basepath.go @@ -0,0 +1,145 @@ +package afero + +import ( + "errors" + "os" + "path/filepath" + "runtime" + "strings" + "time" +) + +// The BasePathFs restricts all operations to a given path within an Fs. +// The given file name to the operations on this Fs will be prepended with +// the base path before calling the base Fs. +// Any file name (after filepath.Clean()) outside this base path will be +// treated as non existing file. +// +// Note that it does not clean the error messages on return, so you may +// reveal the real path on errors. +type BasePathFs struct { + source Fs + path string +} + +func NewBasePathFs(source Fs, path string) Fs { + return &BasePathFs{source: source, path: path} +} + +// on a file outside the base path it returns the given file name and an error, +// else the given file with the base path prepended +func (b *BasePathFs) RealPath(name string) (path string, err error) { + if err := validateBasePathName(name); err != nil { + return "", err + } + + bpath := filepath.Clean(b.path) + path = filepath.Clean(filepath.Join(bpath, name)) + if !strings.HasPrefix(path, bpath) { + return name, os.ErrNotExist + } + + return path, nil +} + +func validateBasePathName(name string) error { + if runtime.GOOS != "windows" { + // Not much to do here; + // the virtual file paths all look absolute on *nix. + return nil + } + + // On Windows a common mistake would be to provide an absolute OS path + // We could strip out the base part, but that would not be very portable. + if filepath.IsAbs(name) { + return &os.PathError{Op: "realPath", Path: name, Err: errors.New("got a real OS path instead of a virtual")} + } + + return nil +} + +func (b *BasePathFs) Chtimes(name string, atime, mtime time.Time) (err error) { + if name, err = b.RealPath(name); err != nil { + return &os.PathError{Op: "chtimes", Path: name, Err: err} + } + return b.source.Chtimes(name, atime, mtime) +} + +func (b *BasePathFs) Chmod(name string, mode os.FileMode) (err error) { + if name, err = b.RealPath(name); err != nil { + return &os.PathError{Op: "chmod", Path: name, Err: err} + } + return b.source.Chmod(name, mode) +} + +func (b *BasePathFs) Name() string { + return "BasePathFs" +} + +func (b *BasePathFs) Stat(name string) (fi os.FileInfo, err error) { + if name, err = b.RealPath(name); err != nil { + return nil, &os.PathError{Op: "stat", Path: name, Err: err} + } + return b.source.Stat(name) +} + +func (b *BasePathFs) Rename(oldname, newname string) (err error) { + if oldname, err = b.RealPath(oldname); err != nil { + return &os.PathError{Op: "rename", Path: oldname, Err: err} + } + if newname, err = b.RealPath(newname); err != nil { + return &os.PathError{Op: "rename", Path: newname, Err: err} + } + return b.source.Rename(oldname, newname) +} + +func (b *BasePathFs) RemoveAll(name string) (err error) { + if name, err = b.RealPath(name); err != nil { + return &os.PathError{Op: "remove_all", Path: name, Err: err} + } + return b.source.RemoveAll(name) +} + +func (b *BasePathFs) Remove(name string) (err error) { + if name, err = b.RealPath(name); err != nil { + return &os.PathError{Op: "remove", Path: name, Err: err} + } + return b.source.Remove(name) +} + +func (b *BasePathFs) OpenFile(name string, flag int, mode os.FileMode) (f File, err error) { + if name, err = b.RealPath(name); err != nil { + return nil, &os.PathError{Op: "openfile", Path: name, Err: err} + } + return b.source.OpenFile(name, flag, mode) +} + +func (b *BasePathFs) Open(name string) (f File, err error) { + if name, err = b.RealPath(name); err != nil { + return nil, &os.PathError{Op: "open", Path: name, Err: err} + } + return b.source.Open(name) +} + +func (b *BasePathFs) Mkdir(name string, mode os.FileMode) (err error) { + if name, err = b.RealPath(name); err != nil { + return &os.PathError{Op: "mkdir", Path: name, Err: err} + } + return b.source.Mkdir(name, mode) +} + +func (b *BasePathFs) MkdirAll(name string, mode os.FileMode) (err error) { + if name, err = b.RealPath(name); err != nil { + return &os.PathError{Op: "mkdir", Path: name, Err: err} + } + return b.source.MkdirAll(name, mode) +} + +func (b *BasePathFs) Create(name string) (f File, err error) { + if name, err = b.RealPath(name); err != nil { + return nil, &os.PathError{Op: "create", Path: name, Err: err} + } + return b.source.Create(name) +} + +// vim: ts=4 sw=4 noexpandtab nolist syn=go diff --git a/vendor/github.com/spf13/afero/cacheOnReadFs.go b/vendor/github.com/spf13/afero/cacheOnReadFs.go new file mode 100644 index 00000000..b026e0de --- /dev/null +++ b/vendor/github.com/spf13/afero/cacheOnReadFs.go @@ -0,0 +1,290 @@ +package afero + +import ( + "os" + "syscall" + "time" +) + +// If the cache duration is 0, cache time will be unlimited, i.e. once +// a file is in the layer, the base will never be read again for this file. +// +// For cache times greater than 0, the modification time of a file is +// checked. Note that a lot of file system implementations only allow a +// resolution of a second for timestamps... or as the godoc for os.Chtimes() +// states: "The underlying filesystem may truncate or round the values to a +// less precise time unit." +// +// This caching union will forward all write calls also to the base file +// system first. To prevent writing to the base Fs, wrap it in a read-only +// filter - Note: this will also make the overlay read-only, for writing files +// in the overlay, use the overlay Fs directly, not via the union Fs. +type CacheOnReadFs struct { + base Fs + layer Fs + cacheTime time.Duration +} + +func NewCacheOnReadFs(base Fs, layer Fs, cacheTime time.Duration) Fs { + return &CacheOnReadFs{base: base, layer: layer, cacheTime: cacheTime} +} + +type cacheState int + +const ( + // not present in the overlay, unknown if it exists in the base: + cacheMiss cacheState = iota + // present in the overlay and in base, base file is newer: + cacheStale + // present in the overlay - with cache time == 0 it may exist in the base, + // with cacheTime > 0 it exists in the base and is same age or newer in the + // overlay + cacheHit + // happens if someone writes directly to the overlay without + // going through this union + cacheLocal +) + +func (u *CacheOnReadFs) cacheStatus(name string) (state cacheState, fi os.FileInfo, err error) { + var lfi, bfi os.FileInfo + lfi, err = u.layer.Stat(name) + if err == nil { + if u.cacheTime == 0 { + return cacheHit, lfi, nil + } + if lfi.ModTime().Add(u.cacheTime).Before(time.Now()) { + bfi, err = u.base.Stat(name) + if err != nil { + return cacheLocal, lfi, nil + } + if bfi.ModTime().After(lfi.ModTime()) { + return cacheStale, bfi, nil + } + } + return cacheHit, lfi, nil + } + + if err == syscall.ENOENT || os.IsNotExist(err) { + return cacheMiss, nil, nil + } + + return cacheMiss, nil, err +} + +func (u *CacheOnReadFs) copyToLayer(name string) error { + return copyToLayer(u.base, u.layer, name) +} + +func (u *CacheOnReadFs) Chtimes(name string, atime, mtime time.Time) error { + st, _, err := u.cacheStatus(name) + if err != nil { + return err + } + switch st { + case cacheLocal: + case cacheHit: + err = u.base.Chtimes(name, atime, mtime) + case cacheStale, cacheMiss: + if err := u.copyToLayer(name); err != nil { + return err + } + err = u.base.Chtimes(name, atime, mtime) + } + if err != nil { + return err + } + return u.layer.Chtimes(name, atime, mtime) +} + +func (u *CacheOnReadFs) Chmod(name string, mode os.FileMode) error { + st, _, err := u.cacheStatus(name) + if err != nil { + return err + } + switch st { + case cacheLocal: + case cacheHit: + err = u.base.Chmod(name, mode) + case cacheStale, cacheMiss: + if err := u.copyToLayer(name); err != nil { + return err + } + err = u.base.Chmod(name, mode) + } + if err != nil { + return err + } + return u.layer.Chmod(name, mode) +} + +func (u *CacheOnReadFs) Stat(name string) (os.FileInfo, error) { + st, fi, err := u.cacheStatus(name) + if err != nil { + return nil, err + } + switch st { + case cacheMiss: + return u.base.Stat(name) + default: // cacheStale has base, cacheHit and cacheLocal the layer os.FileInfo + return fi, nil + } +} + +func (u *CacheOnReadFs) Rename(oldname, newname string) error { + st, _, err := u.cacheStatus(oldname) + if err != nil { + return err + } + switch st { + case cacheLocal: + case cacheHit: + err = u.base.Rename(oldname, newname) + case cacheStale, cacheMiss: + if err := u.copyToLayer(oldname); err != nil { + return err + } + err = u.base.Rename(oldname, newname) + } + if err != nil { + return err + } + return u.layer.Rename(oldname, newname) +} + +func (u *CacheOnReadFs) Remove(name string) error { + st, _, err := u.cacheStatus(name) + if err != nil { + return err + } + switch st { + case cacheLocal: + case cacheHit, cacheStale, cacheMiss: + err = u.base.Remove(name) + } + if err != nil { + return err + } + return u.layer.Remove(name) +} + +func (u *CacheOnReadFs) RemoveAll(name string) error { + st, _, err := u.cacheStatus(name) + if err != nil { + return err + } + switch st { + case cacheLocal: + case cacheHit, cacheStale, cacheMiss: + err = u.base.RemoveAll(name) + } + if err != nil { + return err + } + return u.layer.RemoveAll(name) +} + +func (u *CacheOnReadFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { + st, _, err := u.cacheStatus(name) + if err != nil { + return nil, err + } + switch st { + case cacheLocal, cacheHit: + default: + if err := u.copyToLayer(name); err != nil { + return nil, err + } + } + if flag&(os.O_WRONLY|syscall.O_RDWR|os.O_APPEND|os.O_CREATE|os.O_TRUNC) != 0 { + bfi, err := u.base.OpenFile(name, flag, perm) + if err != nil { + return nil, err + } + lfi, err := u.layer.OpenFile(name, flag, perm) + if err != nil { + bfi.Close() // oops, what if O_TRUNC was set and file opening in the layer failed...? + return nil, err + } + return &UnionFile{base: bfi, layer: lfi}, nil + } + return u.layer.OpenFile(name, flag, perm) +} + +func (u *CacheOnReadFs) Open(name string) (File, error) { + st, fi, err := u.cacheStatus(name) + if err != nil { + return nil, err + } + + switch st { + case cacheLocal: + return u.layer.Open(name) + + case cacheMiss: + bfi, err := u.base.Stat(name) + if err != nil { + return nil, err + } + if bfi.IsDir() { + return u.base.Open(name) + } + if err := u.copyToLayer(name); err != nil { + return nil, err + } + return u.layer.Open(name) + + case cacheStale: + if !fi.IsDir() { + if err := u.copyToLayer(name); err != nil { + return nil, err + } + return u.layer.Open(name) + } + case cacheHit: + if !fi.IsDir() { + return u.layer.Open(name) + } + } + // the dirs from cacheHit, cacheStale fall down here: + bfile, _ := u.base.Open(name) + lfile, err := u.layer.Open(name) + if err != nil && bfile == nil { + return nil, err + } + return &UnionFile{base: bfile, layer: lfile}, nil +} + +func (u *CacheOnReadFs) Mkdir(name string, perm os.FileMode) error { + err := u.base.Mkdir(name, perm) + if err != nil { + return err + } + return u.layer.MkdirAll(name, perm) // yes, MkdirAll... we cannot assume it exists in the cache +} + +func (u *CacheOnReadFs) Name() string { + return "CacheOnReadFs" +} + +func (u *CacheOnReadFs) MkdirAll(name string, perm os.FileMode) error { + err := u.base.MkdirAll(name, perm) + if err != nil { + return err + } + return u.layer.MkdirAll(name, perm) +} + +func (u *CacheOnReadFs) Create(name string) (File, error) { + bfh, err := u.base.Create(name) + if err != nil { + return nil, err + } + lfh, err := u.layer.Create(name) + if err != nil { + // oops, see comment about OS_TRUNC above, should we remove? then we have to + // remember if the file did not exist before + bfh.Close() + return nil, err + } + return &UnionFile{base: bfh, layer: lfh}, nil +} diff --git a/vendor/github.com/spf13/afero/const_bsds.go b/vendor/github.com/spf13/afero/const_bsds.go new file mode 100644 index 00000000..5728243d --- /dev/null +++ b/vendor/github.com/spf13/afero/const_bsds.go @@ -0,0 +1,22 @@ +// Copyright © 2016 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build darwin openbsd freebsd netbsd dragonfly + +package afero + +import ( + "syscall" +) + +const BADFD = syscall.EBADF diff --git a/vendor/github.com/spf13/afero/const_win_unix.go b/vendor/github.com/spf13/afero/const_win_unix.go new file mode 100644 index 00000000..968fc278 --- /dev/null +++ b/vendor/github.com/spf13/afero/const_win_unix.go @@ -0,0 +1,25 @@ +// Copyright © 2016 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +build !darwin +// +build !openbsd +// +build !freebsd +// +build !dragonfly +// +build !netbsd + +package afero + +import ( + "syscall" +) + +const BADFD = syscall.EBADFD diff --git a/vendor/github.com/spf13/afero/copyOnWriteFs.go b/vendor/github.com/spf13/afero/copyOnWriteFs.go new file mode 100644 index 00000000..f2ebcd22 --- /dev/null +++ b/vendor/github.com/spf13/afero/copyOnWriteFs.go @@ -0,0 +1,253 @@ +package afero + +import ( + "fmt" + "os" + "path/filepath" + "syscall" + "time" +) + +// The CopyOnWriteFs is a union filesystem: a read only base file system with +// a possibly writeable layer on top. Changes to the file system will only +// be made in the overlay: Changing an existing file in the base layer which +// is not present in the overlay will copy the file to the overlay ("changing" +// includes also calls to e.g. Chtimes() and Chmod()). +// +// Reading directories is currently only supported via Open(), not OpenFile(). +type CopyOnWriteFs struct { + base Fs + layer Fs +} + +func NewCopyOnWriteFs(base Fs, layer Fs) Fs { + return &CopyOnWriteFs{base: base, layer: layer} +} + +// Returns true if the file is not in the overlay +func (u *CopyOnWriteFs) isBaseFile(name string) (bool, error) { + if _, err := u.layer.Stat(name); err == nil { + return false, nil + } + _, err := u.base.Stat(name) + if err != nil { + if oerr, ok := err.(*os.PathError); ok { + if oerr.Err == os.ErrNotExist || oerr.Err == syscall.ENOENT || oerr.Err == syscall.ENOTDIR { + return false, nil + } + } + if err == syscall.ENOENT { + return false, nil + } + } + return true, err +} + +func (u *CopyOnWriteFs) copyToLayer(name string) error { + return copyToLayer(u.base, u.layer, name) +} + +func (u *CopyOnWriteFs) Chtimes(name string, atime, mtime time.Time) error { + b, err := u.isBaseFile(name) + if err != nil { + return err + } + if b { + if err := u.copyToLayer(name); err != nil { + return err + } + } + return u.layer.Chtimes(name, atime, mtime) +} + +func (u *CopyOnWriteFs) Chmod(name string, mode os.FileMode) error { + b, err := u.isBaseFile(name) + if err != nil { + return err + } + if b { + if err := u.copyToLayer(name); err != nil { + return err + } + } + return u.layer.Chmod(name, mode) +} + +func (u *CopyOnWriteFs) Stat(name string) (os.FileInfo, error) { + fi, err := u.layer.Stat(name) + if err != nil { + origErr := err + if e, ok := err.(*os.PathError); ok { + err = e.Err + } + if err == os.ErrNotExist || err == syscall.ENOENT || err == syscall.ENOTDIR { + return u.base.Stat(name) + } + return nil, origErr + } + return fi, nil +} + +// Renaming files present only in the base layer is not permitted +func (u *CopyOnWriteFs) Rename(oldname, newname string) error { + b, err := u.isBaseFile(oldname) + if err != nil { + return err + } + if b { + return syscall.EPERM + } + return u.layer.Rename(oldname, newname) +} + +// Removing files present only in the base layer is not permitted. If +// a file is present in the base layer and the overlay, only the overlay +// will be removed. +func (u *CopyOnWriteFs) Remove(name string) error { + err := u.layer.Remove(name) + switch err { + case syscall.ENOENT: + _, err = u.base.Stat(name) + if err == nil { + return syscall.EPERM + } + return syscall.ENOENT + default: + return err + } +} + +func (u *CopyOnWriteFs) RemoveAll(name string) error { + err := u.layer.RemoveAll(name) + switch err { + case syscall.ENOENT: + _, err = u.base.Stat(name) + if err == nil { + return syscall.EPERM + } + return syscall.ENOENT + default: + return err + } +} + +func (u *CopyOnWriteFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { + b, err := u.isBaseFile(name) + if err != nil { + return nil, err + } + + if flag&(os.O_WRONLY|os.O_RDWR|os.O_APPEND|os.O_CREATE|os.O_TRUNC) != 0 { + if b { + if err = u.copyToLayer(name); err != nil { + return nil, err + } + return u.layer.OpenFile(name, flag, perm) + } + + dir := filepath.Dir(name) + isaDir, err := IsDir(u.base, dir) + if err != nil && !os.IsNotExist(err) { + return nil, err + } + if isaDir { + if err = u.layer.MkdirAll(dir, 0777); err != nil { + return nil, err + } + return u.layer.OpenFile(name, flag, perm) + } + + isaDir, err = IsDir(u.layer, dir) + if err != nil { + return nil, err + } + if isaDir { + return u.layer.OpenFile(name, flag, perm) + } + + return nil, &os.PathError{Op: "open", Path: name, Err: syscall.ENOTDIR} // ...or os.ErrNotExist? + } + if b { + return u.base.OpenFile(name, flag, perm) + } + return u.layer.OpenFile(name, flag, perm) +} + +// This function handles the 9 different possibilities caused +// by the union which are the intersection of the following... +// layer: doesn't exist, exists as a file, and exists as a directory +// base: doesn't exist, exists as a file, and exists as a directory +func (u *CopyOnWriteFs) Open(name string) (File, error) { + // Since the overlay overrides the base we check that first + b, err := u.isBaseFile(name) + if err != nil { + return nil, err + } + + // If overlay doesn't exist, return the base (base state irrelevant) + if b { + return u.base.Open(name) + } + + // If overlay is a file, return it (base state irrelevant) + dir, err := IsDir(u.layer, name) + if err != nil { + return nil, err + } + if !dir { + return u.layer.Open(name) + } + + // Overlay is a directory, base state now matters. + // Base state has 3 states to check but 2 outcomes: + // A. It's a file or non-readable in the base (return just the overlay) + // B. It's an accessible directory in the base (return a UnionFile) + + // If base is file or nonreadable, return overlay + dir, err = IsDir(u.base, name) + if !dir || err != nil { + return u.layer.Open(name) + } + + // Both base & layer are directories + // Return union file (if opens are without error) + bfile, bErr := u.base.Open(name) + lfile, lErr := u.layer.Open(name) + + // If either have errors at this point something is very wrong. Return nil and the errors + if bErr != nil || lErr != nil { + return nil, fmt.Errorf("BaseErr: %v\nOverlayErr: %v", bErr, lErr) + } + + return &UnionFile{base: bfile, layer: lfile}, nil +} + +func (u *CopyOnWriteFs) Mkdir(name string, perm os.FileMode) error { + dir, err := IsDir(u.base, name) + if err != nil { + return u.layer.MkdirAll(name, perm) + } + if dir { + return syscall.EEXIST + } + return u.layer.MkdirAll(name, perm) +} + +func (u *CopyOnWriteFs) Name() string { + return "CopyOnWriteFs" +} + +func (u *CopyOnWriteFs) MkdirAll(name string, perm os.FileMode) error { + dir, err := IsDir(u.base, name) + if err != nil { + return u.layer.MkdirAll(name, perm) + } + if dir { + return syscall.EEXIST + } + return u.layer.MkdirAll(name, perm) +} + +func (u *CopyOnWriteFs) Create(name string) (File, error) { + return u.OpenFile(name, os.O_CREATE|os.O_TRUNC|os.O_RDWR, 0666) +} diff --git a/vendor/github.com/spf13/afero/httpFs.go b/vendor/github.com/spf13/afero/httpFs.go new file mode 100644 index 00000000..c4219368 --- /dev/null +++ b/vendor/github.com/spf13/afero/httpFs.go @@ -0,0 +1,110 @@ +// Copyright © 2014 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package afero + +import ( + "errors" + "net/http" + "os" + "path" + "path/filepath" + "strings" + "time" +) + +type httpDir struct { + basePath string + fs HttpFs +} + +func (d httpDir) Open(name string) (http.File, error) { + if filepath.Separator != '/' && strings.IndexRune(name, filepath.Separator) >= 0 || + strings.Contains(name, "\x00") { + return nil, errors.New("http: invalid character in file path") + } + dir := string(d.basePath) + if dir == "" { + dir = "." + } + + f, err := d.fs.Open(filepath.Join(dir, filepath.FromSlash(path.Clean("/"+name)))) + if err != nil { + return nil, err + } + return f, nil +} + +type HttpFs struct { + source Fs +} + +func NewHttpFs(source Fs) *HttpFs { + return &HttpFs{source: source} +} + +func (h HttpFs) Dir(s string) *httpDir { + return &httpDir{basePath: s, fs: h} +} + +func (h HttpFs) Name() string { return "h HttpFs" } + +func (h HttpFs) Create(name string) (File, error) { + return h.source.Create(name) +} + +func (h HttpFs) Chmod(name string, mode os.FileMode) error { + return h.source.Chmod(name, mode) +} + +func (h HttpFs) Chtimes(name string, atime time.Time, mtime time.Time) error { + return h.source.Chtimes(name, atime, mtime) +} + +func (h HttpFs) Mkdir(name string, perm os.FileMode) error { + return h.source.Mkdir(name, perm) +} + +func (h HttpFs) MkdirAll(path string, perm os.FileMode) error { + return h.source.MkdirAll(path, perm) +} + +func (h HttpFs) Open(name string) (http.File, error) { + f, err := h.source.Open(name) + if err == nil { + if httpfile, ok := f.(http.File); ok { + return httpfile, nil + } + } + return nil, err +} + +func (h HttpFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { + return h.source.OpenFile(name, flag, perm) +} + +func (h HttpFs) Remove(name string) error { + return h.source.Remove(name) +} + +func (h HttpFs) RemoveAll(path string) error { + return h.source.RemoveAll(path) +} + +func (h HttpFs) Rename(oldname, newname string) error { + return h.source.Rename(oldname, newname) +} + +func (h HttpFs) Stat(name string) (os.FileInfo, error) { + return h.source.Stat(name) +} diff --git a/vendor/github.com/spf13/afero/ioutil.go b/vendor/github.com/spf13/afero/ioutil.go new file mode 100644 index 00000000..5c3a3d8f --- /dev/null +++ b/vendor/github.com/spf13/afero/ioutil.go @@ -0,0 +1,230 @@ +// Copyright ©2015 The Go Authors +// Copyright ©2015 Steve Francia +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package afero + +import ( + "bytes" + "io" + "os" + "path/filepath" + "sort" + "strconv" + "sync" + "time" +) + +// byName implements sort.Interface. +type byName []os.FileInfo + +func (f byName) Len() int { return len(f) } +func (f byName) Less(i, j int) bool { return f[i].Name() < f[j].Name() } +func (f byName) Swap(i, j int) { f[i], f[j] = f[j], f[i] } + +// ReadDir reads the directory named by dirname and returns +// a list of sorted directory entries. +func (a Afero) ReadDir(dirname string) ([]os.FileInfo, error) { + return ReadDir(a.Fs, dirname) +} + +func ReadDir(fs Fs, dirname string) ([]os.FileInfo, error) { + f, err := fs.Open(dirname) + if err != nil { + return nil, err + } + list, err := f.Readdir(-1) + f.Close() + if err != nil { + return nil, err + } + sort.Sort(byName(list)) + return list, nil +} + +// ReadFile reads the file named by filename and returns the contents. +// A successful call returns err == nil, not err == EOF. Because ReadFile +// reads the whole file, it does not treat an EOF from Read as an error +// to be reported. +func (a Afero) ReadFile(filename string) ([]byte, error) { + return ReadFile(a.Fs, filename) +} + +func ReadFile(fs Fs, filename string) ([]byte, error) { + f, err := fs.Open(filename) + if err != nil { + return nil, err + } + defer f.Close() + // It's a good but not certain bet that FileInfo will tell us exactly how much to + // read, so let's try it but be prepared for the answer to be wrong. + var n int64 + + if fi, err := f.Stat(); err == nil { + // Don't preallocate a huge buffer, just in case. + if size := fi.Size(); size < 1e9 { + n = size + } + } + // As initial capacity for readAll, use n + a little extra in case Size is zero, + // and to avoid another allocation after Read has filled the buffer. The readAll + // call will read into its allocated internal buffer cheaply. If the size was + // wrong, we'll either waste some space off the end or reallocate as needed, but + // in the overwhelmingly common case we'll get it just right. + return readAll(f, n+bytes.MinRead) +} + +// readAll reads from r until an error or EOF and returns the data it read +// from the internal buffer allocated with a specified capacity. +func readAll(r io.Reader, capacity int64) (b []byte, err error) { + buf := bytes.NewBuffer(make([]byte, 0, capacity)) + // If the buffer overflows, we will get bytes.ErrTooLarge. + // Return that as an error. Any other panic remains. + defer func() { + e := recover() + if e == nil { + return + } + if panicErr, ok := e.(error); ok && panicErr == bytes.ErrTooLarge { + err = panicErr + } else { + panic(e) + } + }() + _, err = buf.ReadFrom(r) + return buf.Bytes(), err +} + +// ReadAll reads from r until an error or EOF and returns the data it read. +// A successful call returns err == nil, not err == EOF. Because ReadAll is +// defined to read from src until EOF, it does not treat an EOF from Read +// as an error to be reported. +func ReadAll(r io.Reader) ([]byte, error) { + return readAll(r, bytes.MinRead) +} + +// WriteFile writes data to a file named by filename. +// If the file does not exist, WriteFile creates it with permissions perm; +// otherwise WriteFile truncates it before writing. +func (a Afero) WriteFile(filename string, data []byte, perm os.FileMode) error { + return WriteFile(a.Fs, filename, data, perm) +} + +func WriteFile(fs Fs, filename string, data []byte, perm os.FileMode) error { + f, err := fs.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm) + if err != nil { + return err + } + n, err := f.Write(data) + if err == nil && n < len(data) { + err = io.ErrShortWrite + } + if err1 := f.Close(); err == nil { + err = err1 + } + return err +} + +// Random number state. +// We generate random temporary file names so that there's a good +// chance the file doesn't exist yet - keeps the number of tries in +// TempFile to a minimum. +var rand uint32 +var randmu sync.Mutex + +func reseed() uint32 { + return uint32(time.Now().UnixNano() + int64(os.Getpid())) +} + +func nextSuffix() string { + randmu.Lock() + r := rand + if r == 0 { + r = reseed() + } + r = r*1664525 + 1013904223 // constants from Numerical Recipes + rand = r + randmu.Unlock() + return strconv.Itoa(int(1e9 + r%1e9))[1:] +} + +// TempFile creates a new temporary file in the directory dir +// with a name beginning with prefix, opens the file for reading +// and writing, and returns the resulting *File. +// If dir is the empty string, TempFile uses the default directory +// for temporary files (see os.TempDir). +// Multiple programs calling TempFile simultaneously +// will not choose the same file. The caller can use f.Name() +// to find the pathname of the file. It is the caller's responsibility +// to remove the file when no longer needed. +func (a Afero) TempFile(dir, prefix string) (f File, err error) { + return TempFile(a.Fs, dir, prefix) +} + +func TempFile(fs Fs, dir, prefix string) (f File, err error) { + if dir == "" { + dir = os.TempDir() + } + + nconflict := 0 + for i := 0; i < 10000; i++ { + name := filepath.Join(dir, prefix+nextSuffix()) + f, err = fs.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) + if os.IsExist(err) { + if nconflict++; nconflict > 10 { + randmu.Lock() + rand = reseed() + randmu.Unlock() + } + continue + } + break + } + return +} + +// TempDir creates a new temporary directory in the directory dir +// with a name beginning with prefix and returns the path of the +// new directory. If dir is the empty string, TempDir uses the +// default directory for temporary files (see os.TempDir). +// Multiple programs calling TempDir simultaneously +// will not choose the same directory. It is the caller's responsibility +// to remove the directory when no longer needed. +func (a Afero) TempDir(dir, prefix string) (name string, err error) { + return TempDir(a.Fs, dir, prefix) +} +func TempDir(fs Fs, dir, prefix string) (name string, err error) { + if dir == "" { + dir = os.TempDir() + } + + nconflict := 0 + for i := 0; i < 10000; i++ { + try := filepath.Join(dir, prefix+nextSuffix()) + err = fs.Mkdir(try, 0700) + if os.IsExist(err) { + if nconflict++; nconflict > 10 { + randmu.Lock() + rand = reseed() + randmu.Unlock() + } + continue + } + if err == nil { + name = try + } + break + } + return +} diff --git a/vendor/github.com/spf13/afero/match.go b/vendor/github.com/spf13/afero/match.go new file mode 100644 index 00000000..08b3b7e0 --- /dev/null +++ b/vendor/github.com/spf13/afero/match.go @@ -0,0 +1,110 @@ +// Copyright © 2014 Steve Francia . +// Copyright 2009 The Go Authors. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package afero + +import ( + "path/filepath" + "sort" + "strings" +) + +// Glob returns the names of all files matching pattern or nil +// if there is no matching file. The syntax of patterns is the same +// as in Match. The pattern may describe hierarchical names such as +// /usr/*/bin/ed (assuming the Separator is '/'). +// +// Glob ignores file system errors such as I/O errors reading directories. +// The only possible returned error is ErrBadPattern, when pattern +// is malformed. +// +// This was adapted from (http://golang.org/pkg/path/filepath) and uses several +// built-ins from that package. +func Glob(fs Fs, pattern string) (matches []string, err error) { + if !hasMeta(pattern) { + // afero does not support Lstat directly. + if _, err = lstatIfOs(fs, pattern); err != nil { + return nil, nil + } + return []string{pattern}, nil + } + + dir, file := filepath.Split(pattern) + switch dir { + case "": + dir = "." + case string(filepath.Separator): + // nothing + default: + dir = dir[0 : len(dir)-1] // chop off trailing separator + } + + if !hasMeta(dir) { + return glob(fs, dir, file, nil) + } + + var m []string + m, err = Glob(fs, dir) + if err != nil { + return + } + for _, d := range m { + matches, err = glob(fs, d, file, matches) + if err != nil { + return + } + } + return +} + +// glob searches for files matching pattern in the directory dir +// and appends them to matches. If the directory cannot be +// opened, it returns the existing matches. New matches are +// added in lexicographical order. +func glob(fs Fs, dir, pattern string, matches []string) (m []string, e error) { + m = matches + fi, err := fs.Stat(dir) + if err != nil { + return + } + if !fi.IsDir() { + return + } + d, err := fs.Open(dir) + if err != nil { + return + } + defer d.Close() + + names, _ := d.Readdirnames(-1) + sort.Strings(names) + + for _, n := range names { + matched, err := filepath.Match(pattern, n) + if err != nil { + return m, err + } + if matched { + m = append(m, filepath.Join(dir, n)) + } + } + return +} + +// hasMeta reports whether path contains any of the magic characters +// recognized by Match. +func hasMeta(path string) bool { + // TODO(niemeyer): Should other magic characters be added here? + return strings.IndexAny(path, "*?[") >= 0 +} diff --git a/vendor/github.com/spf13/afero/mem/dir.go b/vendor/github.com/spf13/afero/mem/dir.go new file mode 100644 index 00000000..e104013f --- /dev/null +++ b/vendor/github.com/spf13/afero/mem/dir.go @@ -0,0 +1,37 @@ +// Copyright © 2014 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mem + +type Dir interface { + Len() int + Names() []string + Files() []*FileData + Add(*FileData) + Remove(*FileData) +} + +func RemoveFromMemDir(dir *FileData, f *FileData) { + dir.memDir.Remove(f) +} + +func AddToMemDir(dir *FileData, f *FileData) { + dir.memDir.Add(f) +} + +func InitializeDir(d *FileData) { + if d.memDir == nil { + d.dir = true + d.memDir = &DirMap{} + } +} diff --git a/vendor/github.com/spf13/afero/mem/dirmap.go b/vendor/github.com/spf13/afero/mem/dirmap.go new file mode 100644 index 00000000..03a57ee5 --- /dev/null +++ b/vendor/github.com/spf13/afero/mem/dirmap.go @@ -0,0 +1,43 @@ +// Copyright © 2015 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mem + +import "sort" + +type DirMap map[string]*FileData + +func (m DirMap) Len() int { return len(m) } +func (m DirMap) Add(f *FileData) { m[f.name] = f } +func (m DirMap) Remove(f *FileData) { delete(m, f.name) } +func (m DirMap) Files() (files []*FileData) { + for _, f := range m { + files = append(files, f) + } + sort.Sort(filesSorter(files)) + return files +} + +// implement sort.Interface for []*FileData +type filesSorter []*FileData + +func (s filesSorter) Len() int { return len(s) } +func (s filesSorter) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s filesSorter) Less(i, j int) bool { return s[i].name < s[j].name } + +func (m DirMap) Names() (names []string) { + for x := range m { + names = append(names, x) + } + return names +} diff --git a/vendor/github.com/spf13/afero/mem/file.go b/vendor/github.com/spf13/afero/mem/file.go new file mode 100644 index 00000000..885e5542 --- /dev/null +++ b/vendor/github.com/spf13/afero/mem/file.go @@ -0,0 +1,314 @@ +// Copyright © 2015 Steve Francia . +// Copyright 2013 tsuru authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mem + +import ( + "bytes" + "errors" + "io" + "os" + "path/filepath" + "sync" + "sync/atomic" +) + +import "time" + +const FilePathSeparator = string(filepath.Separator) + +type File struct { + // atomic requires 64-bit alignment for struct field access + at int64 + readDirCount int64 + closed bool + readOnly bool + fileData *FileData +} + +func NewFileHandle(data *FileData) *File { + return &File{fileData: data} +} + +func NewReadOnlyFileHandle(data *FileData) *File { + return &File{fileData: data, readOnly: true} +} + +func (f File) Data() *FileData { + return f.fileData +} + +type FileData struct { + sync.Mutex + name string + data []byte + memDir Dir + dir bool + mode os.FileMode + modtime time.Time +} + +func (d *FileData) Name() string { + d.Lock() + defer d.Unlock() + return d.name +} + +func CreateFile(name string) *FileData { + return &FileData{name: name, mode: os.ModeTemporary, modtime: time.Now()} +} + +func CreateDir(name string) *FileData { + return &FileData{name: name, memDir: &DirMap{}, dir: true} +} + +func ChangeFileName(f *FileData, newname string) { + f.Lock() + f.name = newname + f.Unlock() +} + +func SetMode(f *FileData, mode os.FileMode) { + f.Lock() + f.mode = mode + f.Unlock() +} + +func SetModTime(f *FileData, mtime time.Time) { + f.Lock() + setModTime(f, mtime) + f.Unlock() +} + +func setModTime(f *FileData, mtime time.Time) { + f.modtime = mtime +} + +func GetFileInfo(f *FileData) *FileInfo { + return &FileInfo{f} +} + +func (f *File) Open() error { + atomic.StoreInt64(&f.at, 0) + atomic.StoreInt64(&f.readDirCount, 0) + f.fileData.Lock() + f.closed = false + f.fileData.Unlock() + return nil +} + +func (f *File) Close() error { + f.fileData.Lock() + f.closed = true + if !f.readOnly { + setModTime(f.fileData, time.Now()) + } + f.fileData.Unlock() + return nil +} + +func (f *File) Name() string { + return f.fileData.Name() +} + +func (f *File) Stat() (os.FileInfo, error) { + return &FileInfo{f.fileData}, nil +} + +func (f *File) Sync() error { + return nil +} + +func (f *File) Readdir(count int) (res []os.FileInfo, err error) { + var outLength int64 + + f.fileData.Lock() + files := f.fileData.memDir.Files()[f.readDirCount:] + if count > 0 { + if len(files) < count { + outLength = int64(len(files)) + } else { + outLength = int64(count) + } + if len(files) == 0 { + err = io.EOF + } + } else { + outLength = int64(len(files)) + } + f.readDirCount += outLength + f.fileData.Unlock() + + res = make([]os.FileInfo, outLength) + for i := range res { + res[i] = &FileInfo{files[i]} + } + + return res, err +} + +func (f *File) Readdirnames(n int) (names []string, err error) { + fi, err := f.Readdir(n) + names = make([]string, len(fi)) + for i, f := range fi { + _, names[i] = filepath.Split(f.Name()) + } + return names, err +} + +func (f *File) Read(b []byte) (n int, err error) { + f.fileData.Lock() + defer f.fileData.Unlock() + if f.closed == true { + return 0, ErrFileClosed + } + if len(b) > 0 && int(f.at) == len(f.fileData.data) { + return 0, io.EOF + } + if int(f.at) > len(f.fileData.data) { + return 0, io.ErrUnexpectedEOF + } + if len(f.fileData.data)-int(f.at) >= len(b) { + n = len(b) + } else { + n = len(f.fileData.data) - int(f.at) + } + copy(b, f.fileData.data[f.at:f.at+int64(n)]) + atomic.AddInt64(&f.at, int64(n)) + return +} + +func (f *File) ReadAt(b []byte, off int64) (n int, err error) { + atomic.StoreInt64(&f.at, off) + return f.Read(b) +} + +func (f *File) Truncate(size int64) error { + if f.closed == true { + return ErrFileClosed + } + if f.readOnly { + return &os.PathError{Op: "truncate", Path: f.fileData.name, Err: errors.New("file handle is read only")} + } + if size < 0 { + return ErrOutOfRange + } + if size > int64(len(f.fileData.data)) { + diff := size - int64(len(f.fileData.data)) + f.fileData.data = append(f.fileData.data, bytes.Repeat([]byte{00}, int(diff))...) + } else { + f.fileData.data = f.fileData.data[0:size] + } + setModTime(f.fileData, time.Now()) + return nil +} + +func (f *File) Seek(offset int64, whence int) (int64, error) { + if f.closed == true { + return 0, ErrFileClosed + } + switch whence { + case 0: + atomic.StoreInt64(&f.at, offset) + case 1: + atomic.AddInt64(&f.at, int64(offset)) + case 2: + atomic.StoreInt64(&f.at, int64(len(f.fileData.data))+offset) + } + return f.at, nil +} + +func (f *File) Write(b []byte) (n int, err error) { + if f.readOnly { + return 0, &os.PathError{Op: "write", Path: f.fileData.name, Err: errors.New("file handle is read only")} + } + n = len(b) + cur := atomic.LoadInt64(&f.at) + f.fileData.Lock() + defer f.fileData.Unlock() + diff := cur - int64(len(f.fileData.data)) + var tail []byte + if n+int(cur) < len(f.fileData.data) { + tail = f.fileData.data[n+int(cur):] + } + if diff > 0 { + f.fileData.data = append(bytes.Repeat([]byte{00}, int(diff)), b...) + f.fileData.data = append(f.fileData.data, tail...) + } else { + f.fileData.data = append(f.fileData.data[:cur], b...) + f.fileData.data = append(f.fileData.data, tail...) + } + setModTime(f.fileData, time.Now()) + + atomic.StoreInt64(&f.at, int64(len(f.fileData.data))) + return +} + +func (f *File) WriteAt(b []byte, off int64) (n int, err error) { + atomic.StoreInt64(&f.at, off) + return f.Write(b) +} + +func (f *File) WriteString(s string) (ret int, err error) { + return f.Write([]byte(s)) +} + +func (f *File) Info() *FileInfo { + return &FileInfo{f.fileData} +} + +type FileInfo struct { + *FileData +} + +// Implements os.FileInfo +func (s *FileInfo) Name() string { + s.Lock() + _, name := filepath.Split(s.name) + s.Unlock() + return name +} +func (s *FileInfo) Mode() os.FileMode { + s.Lock() + defer s.Unlock() + return s.mode +} +func (s *FileInfo) ModTime() time.Time { + s.Lock() + defer s.Unlock() + return s.modtime +} +func (s *FileInfo) IsDir() bool { + s.Lock() + defer s.Unlock() + return s.dir +} +func (s *FileInfo) Sys() interface{} { return nil } +func (s *FileInfo) Size() int64 { + if s.IsDir() { + return int64(42) + } + s.Lock() + defer s.Unlock() + return int64(len(s.data)) +} + +var ( + ErrFileClosed = errors.New("File is closed") + ErrOutOfRange = errors.New("Out of range") + ErrTooLarge = errors.New("Too large") + ErrFileNotFound = os.ErrNotExist + ErrFileExists = os.ErrExist + ErrDestinationExists = os.ErrExist +) diff --git a/vendor/github.com/spf13/afero/memmap.go b/vendor/github.com/spf13/afero/memmap.go new file mode 100644 index 00000000..09498e70 --- /dev/null +++ b/vendor/github.com/spf13/afero/memmap.go @@ -0,0 +1,365 @@ +// Copyright © 2014 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package afero + +import ( + "fmt" + "log" + "os" + "path/filepath" + "strings" + "sync" + "time" + + "github.com/spf13/afero/mem" +) + +type MemMapFs struct { + mu sync.RWMutex + data map[string]*mem.FileData + init sync.Once +} + +func NewMemMapFs() Fs { + return &MemMapFs{} +} + +func (m *MemMapFs) getData() map[string]*mem.FileData { + m.init.Do(func() { + m.data = make(map[string]*mem.FileData) + // Root should always exist, right? + // TODO: what about windows? + m.data[FilePathSeparator] = mem.CreateDir(FilePathSeparator) + }) + return m.data +} + +func (*MemMapFs) Name() string { return "MemMapFS" } + +func (m *MemMapFs) Create(name string) (File, error) { + name = normalizePath(name) + m.mu.Lock() + file := mem.CreateFile(name) + m.getData()[name] = file + m.registerWithParent(file) + m.mu.Unlock() + return mem.NewFileHandle(file), nil +} + +func (m *MemMapFs) unRegisterWithParent(fileName string) error { + f, err := m.lockfreeOpen(fileName) + if err != nil { + return err + } + parent := m.findParent(f) + if parent == nil { + log.Panic("parent of ", f.Name(), " is nil") + } + + parent.Lock() + mem.RemoveFromMemDir(parent, f) + parent.Unlock() + return nil +} + +func (m *MemMapFs) findParent(f *mem.FileData) *mem.FileData { + pdir, _ := filepath.Split(f.Name()) + pdir = filepath.Clean(pdir) + pfile, err := m.lockfreeOpen(pdir) + if err != nil { + return nil + } + return pfile +} + +func (m *MemMapFs) registerWithParent(f *mem.FileData) { + if f == nil { + return + } + parent := m.findParent(f) + if parent == nil { + pdir := filepath.Dir(filepath.Clean(f.Name())) + err := m.lockfreeMkdir(pdir, 0777) + if err != nil { + //log.Println("Mkdir error:", err) + return + } + parent, err = m.lockfreeOpen(pdir) + if err != nil { + //log.Println("Open after Mkdir error:", err) + return + } + } + + parent.Lock() + mem.InitializeDir(parent) + mem.AddToMemDir(parent, f) + parent.Unlock() +} + +func (m *MemMapFs) lockfreeMkdir(name string, perm os.FileMode) error { + name = normalizePath(name) + x, ok := m.getData()[name] + if ok { + // Only return ErrFileExists if it's a file, not a directory. + i := mem.FileInfo{FileData: x} + if !i.IsDir() { + return ErrFileExists + } + } else { + item := mem.CreateDir(name) + m.getData()[name] = item + m.registerWithParent(item) + } + return nil +} + +func (m *MemMapFs) Mkdir(name string, perm os.FileMode) error { + name = normalizePath(name) + + m.mu.RLock() + _, ok := m.getData()[name] + m.mu.RUnlock() + if ok { + return &os.PathError{Op: "mkdir", Path: name, Err: ErrFileExists} + } + + m.mu.Lock() + item := mem.CreateDir(name) + m.getData()[name] = item + m.registerWithParent(item) + m.mu.Unlock() + + m.Chmod(name, perm|os.ModeDir) + + return nil +} + +func (m *MemMapFs) MkdirAll(path string, perm os.FileMode) error { + err := m.Mkdir(path, perm) + if err != nil { + if err.(*os.PathError).Err == ErrFileExists { + return nil + } + return err + } + return nil +} + +// Handle some relative paths +func normalizePath(path string) string { + path = filepath.Clean(path) + + switch path { + case ".": + return FilePathSeparator + case "..": + return FilePathSeparator + default: + return path + } +} + +func (m *MemMapFs) Open(name string) (File, error) { + f, err := m.open(name) + if f != nil { + return mem.NewReadOnlyFileHandle(f), err + } + return nil, err +} + +func (m *MemMapFs) openWrite(name string) (File, error) { + f, err := m.open(name) + if f != nil { + return mem.NewFileHandle(f), err + } + return nil, err +} + +func (m *MemMapFs) open(name string) (*mem.FileData, error) { + name = normalizePath(name) + + m.mu.RLock() + f, ok := m.getData()[name] + m.mu.RUnlock() + if !ok { + return nil, &os.PathError{Op: "open", Path: name, Err: ErrFileNotFound} + } + return f, nil +} + +func (m *MemMapFs) lockfreeOpen(name string) (*mem.FileData, error) { + name = normalizePath(name) + f, ok := m.getData()[name] + if ok { + return f, nil + } else { + return nil, ErrFileNotFound + } +} + +func (m *MemMapFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { + chmod := false + file, err := m.openWrite(name) + if os.IsNotExist(err) && (flag&os.O_CREATE > 0) { + file, err = m.Create(name) + chmod = true + } + if err != nil { + return nil, err + } + if flag == os.O_RDONLY { + file = mem.NewReadOnlyFileHandle(file.(*mem.File).Data()) + } + if flag&os.O_APPEND > 0 { + _, err = file.Seek(0, os.SEEK_END) + if err != nil { + file.Close() + return nil, err + } + } + if flag&os.O_TRUNC > 0 && flag&(os.O_RDWR|os.O_WRONLY) > 0 { + err = file.Truncate(0) + if err != nil { + file.Close() + return nil, err + } + } + if chmod { + m.Chmod(name, perm) + } + return file, nil +} + +func (m *MemMapFs) Remove(name string) error { + name = normalizePath(name) + + m.mu.Lock() + defer m.mu.Unlock() + + if _, ok := m.getData()[name]; ok { + err := m.unRegisterWithParent(name) + if err != nil { + return &os.PathError{Op: "remove", Path: name, Err: err} + } + delete(m.getData(), name) + } else { + return &os.PathError{Op: "remove", Path: name, Err: os.ErrNotExist} + } + return nil +} + +func (m *MemMapFs) RemoveAll(path string) error { + path = normalizePath(path) + m.mu.Lock() + m.unRegisterWithParent(path) + m.mu.Unlock() + + m.mu.RLock() + defer m.mu.RUnlock() + + for p, _ := range m.getData() { + if strings.HasPrefix(p, path) { + m.mu.RUnlock() + m.mu.Lock() + delete(m.getData(), p) + m.mu.Unlock() + m.mu.RLock() + } + } + return nil +} + +func (m *MemMapFs) Rename(oldname, newname string) error { + oldname = normalizePath(oldname) + newname = normalizePath(newname) + + if oldname == newname { + return nil + } + + m.mu.RLock() + defer m.mu.RUnlock() + if _, ok := m.getData()[oldname]; ok { + m.mu.RUnlock() + m.mu.Lock() + m.unRegisterWithParent(oldname) + fileData := m.getData()[oldname] + delete(m.getData(), oldname) + mem.ChangeFileName(fileData, newname) + m.getData()[newname] = fileData + m.registerWithParent(fileData) + m.mu.Unlock() + m.mu.RLock() + } else { + return &os.PathError{Op: "rename", Path: oldname, Err: ErrFileNotFound} + } + return nil +} + +func (m *MemMapFs) Stat(name string) (os.FileInfo, error) { + f, err := m.Open(name) + if err != nil { + return nil, err + } + fi := mem.GetFileInfo(f.(*mem.File).Data()) + return fi, nil +} + +func (m *MemMapFs) Chmod(name string, mode os.FileMode) error { + name = normalizePath(name) + + m.mu.RLock() + f, ok := m.getData()[name] + m.mu.RUnlock() + if !ok { + return &os.PathError{Op: "chmod", Path: name, Err: ErrFileNotFound} + } + + m.mu.Lock() + mem.SetMode(f, mode) + m.mu.Unlock() + + return nil +} + +func (m *MemMapFs) Chtimes(name string, atime time.Time, mtime time.Time) error { + name = normalizePath(name) + + m.mu.RLock() + f, ok := m.getData()[name] + m.mu.RUnlock() + if !ok { + return &os.PathError{Op: "chtimes", Path: name, Err: ErrFileNotFound} + } + + m.mu.Lock() + mem.SetModTime(f, mtime) + m.mu.Unlock() + + return nil +} + +func (m *MemMapFs) List() { + for _, x := range m.data { + y := mem.FileInfo{FileData: x} + fmt.Println(x.Name(), y.Size()) + } +} + +// func debugMemMapList(fs Fs) { +// if x, ok := fs.(*MemMapFs); ok { +// x.List() +// } +// } diff --git a/vendor/github.com/spf13/afero/os.go b/vendor/github.com/spf13/afero/os.go new file mode 100644 index 00000000..6b8bce1c --- /dev/null +++ b/vendor/github.com/spf13/afero/os.go @@ -0,0 +1,94 @@ +// Copyright © 2014 Steve Francia . +// Copyright 2013 tsuru authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package afero + +import ( + "os" + "time" +) + +// OsFs is a Fs implementation that uses functions provided by the os package. +// +// For details in any method, check the documentation of the os package +// (http://golang.org/pkg/os/). +type OsFs struct{} + +func NewOsFs() Fs { + return &OsFs{} +} + +func (OsFs) Name() string { return "OsFs" } + +func (OsFs) Create(name string) (File, error) { + f, e := os.Create(name) + if f == nil { + // while this looks strange, we need to return a bare nil (of type nil) not + // a nil value of type *os.File or nil won't be nil + return nil, e + } + return f, e +} + +func (OsFs) Mkdir(name string, perm os.FileMode) error { + return os.Mkdir(name, perm) +} + +func (OsFs) MkdirAll(path string, perm os.FileMode) error { + return os.MkdirAll(path, perm) +} + +func (OsFs) Open(name string) (File, error) { + f, e := os.Open(name) + if f == nil { + // while this looks strange, we need to return a bare nil (of type nil) not + // a nil value of type *os.File or nil won't be nil + return nil, e + } + return f, e +} + +func (OsFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { + f, e := os.OpenFile(name, flag, perm) + if f == nil { + // while this looks strange, we need to return a bare nil (of type nil) not + // a nil value of type *os.File or nil won't be nil + return nil, e + } + return f, e +} + +func (OsFs) Remove(name string) error { + return os.Remove(name) +} + +func (OsFs) RemoveAll(path string) error { + return os.RemoveAll(path) +} + +func (OsFs) Rename(oldname, newname string) error { + return os.Rename(oldname, newname) +} + +func (OsFs) Stat(name string) (os.FileInfo, error) { + return os.Stat(name) +} + +func (OsFs) Chmod(name string, mode os.FileMode) error { + return os.Chmod(name, mode) +} + +func (OsFs) Chtimes(name string, atime time.Time, mtime time.Time) error { + return os.Chtimes(name, atime, mtime) +} diff --git a/vendor/github.com/spf13/afero/path.go b/vendor/github.com/spf13/afero/path.go new file mode 100644 index 00000000..1d90e46d --- /dev/null +++ b/vendor/github.com/spf13/afero/path.go @@ -0,0 +1,108 @@ +// Copyright ©2015 The Go Authors +// Copyright ©2015 Steve Francia +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package afero + +import ( + "os" + "path/filepath" + "sort" +) + +// readDirNames reads the directory named by dirname and returns +// a sorted list of directory entries. +// adapted from https://golang.org/src/path/filepath/path.go +func readDirNames(fs Fs, dirname string) ([]string, error) { + f, err := fs.Open(dirname) + if err != nil { + return nil, err + } + names, err := f.Readdirnames(-1) + f.Close() + if err != nil { + return nil, err + } + sort.Strings(names) + return names, nil +} + +// walk recursively descends path, calling walkFn +// adapted from https://golang.org/src/path/filepath/path.go +func walk(fs Fs, path string, info os.FileInfo, walkFn filepath.WalkFunc) error { + err := walkFn(path, info, nil) + if err != nil { + if info.IsDir() && err == filepath.SkipDir { + return nil + } + return err + } + + if !info.IsDir() { + return nil + } + + names, err := readDirNames(fs, path) + if err != nil { + return walkFn(path, info, err) + } + + for _, name := range names { + filename := filepath.Join(path, name) + fileInfo, err := lstatIfOs(fs, filename) + if err != nil { + if err := walkFn(filename, fileInfo, err); err != nil && err != filepath.SkipDir { + return err + } + } else { + err = walk(fs, filename, fileInfo, walkFn) + if err != nil { + if !fileInfo.IsDir() || err != filepath.SkipDir { + return err + } + } + } + } + return nil +} + +// if the filesystem is OsFs use Lstat, else use fs.Stat +func lstatIfOs(fs Fs, path string) (info os.FileInfo, err error) { + _, ok := fs.(*OsFs) + if ok { + info, err = os.Lstat(path) + } else { + info, err = fs.Stat(path) + } + return +} + +// Walk walks the file tree rooted at root, calling walkFn for each file or +// directory in the tree, including root. All errors that arise visiting files +// and directories are filtered by walkFn. The files are walked in lexical +// order, which makes the output deterministic but means that for very +// large directories Walk can be inefficient. +// Walk does not follow symbolic links. + +func (a Afero) Walk(root string, walkFn filepath.WalkFunc) error { + return Walk(a.Fs, root, walkFn) +} + +func Walk(fs Fs, root string, walkFn filepath.WalkFunc) error { + info, err := lstatIfOs(fs, root) + if err != nil { + return walkFn(root, nil, err) + } + return walk(fs, root, info, walkFn) +} diff --git a/vendor/github.com/spf13/afero/readonlyfs.go b/vendor/github.com/spf13/afero/readonlyfs.go new file mode 100644 index 00000000..f1fa55bc --- /dev/null +++ b/vendor/github.com/spf13/afero/readonlyfs.go @@ -0,0 +1,70 @@ +package afero + +import ( + "os" + "syscall" + "time" +) + +type ReadOnlyFs struct { + source Fs +} + +func NewReadOnlyFs(source Fs) Fs { + return &ReadOnlyFs{source: source} +} + +func (r *ReadOnlyFs) ReadDir(name string) ([]os.FileInfo, error) { + return ReadDir(r.source, name) +} + +func (r *ReadOnlyFs) Chtimes(n string, a, m time.Time) error { + return syscall.EPERM +} + +func (r *ReadOnlyFs) Chmod(n string, m os.FileMode) error { + return syscall.EPERM +} + +func (r *ReadOnlyFs) Name() string { + return "ReadOnlyFilter" +} + +func (r *ReadOnlyFs) Stat(name string) (os.FileInfo, error) { + return r.source.Stat(name) +} + +func (r *ReadOnlyFs) Rename(o, n string) error { + return syscall.EPERM +} + +func (r *ReadOnlyFs) RemoveAll(p string) error { + return syscall.EPERM +} + +func (r *ReadOnlyFs) Remove(n string) error { + return syscall.EPERM +} + +func (r *ReadOnlyFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { + if flag&(os.O_WRONLY|syscall.O_RDWR|os.O_APPEND|os.O_CREATE|os.O_TRUNC) != 0 { + return nil, syscall.EPERM + } + return r.source.OpenFile(name, flag, perm) +} + +func (r *ReadOnlyFs) Open(n string) (File, error) { + return r.source.Open(n) +} + +func (r *ReadOnlyFs) Mkdir(n string, p os.FileMode) error { + return syscall.EPERM +} + +func (r *ReadOnlyFs) MkdirAll(n string, p os.FileMode) error { + return syscall.EPERM +} + +func (r *ReadOnlyFs) Create(n string) (File, error) { + return nil, syscall.EPERM +} diff --git a/vendor/github.com/spf13/afero/regexpfs.go b/vendor/github.com/spf13/afero/regexpfs.go new file mode 100644 index 00000000..9d92dbc0 --- /dev/null +++ b/vendor/github.com/spf13/afero/regexpfs.go @@ -0,0 +1,214 @@ +package afero + +import ( + "os" + "regexp" + "syscall" + "time" +) + +// The RegexpFs filters files (not directories) by regular expression. Only +// files matching the given regexp will be allowed, all others get a ENOENT error ( +// "No such file or directory"). +// +type RegexpFs struct { + re *regexp.Regexp + source Fs +} + +func NewRegexpFs(source Fs, re *regexp.Regexp) Fs { + return &RegexpFs{source: source, re: re} +} + +type RegexpFile struct { + f File + re *regexp.Regexp +} + +func (r *RegexpFs) matchesName(name string) error { + if r.re == nil { + return nil + } + if r.re.MatchString(name) { + return nil + } + return syscall.ENOENT +} + +func (r *RegexpFs) dirOrMatches(name string) error { + dir, err := IsDir(r.source, name) + if err != nil { + return err + } + if dir { + return nil + } + return r.matchesName(name) +} + +func (r *RegexpFs) Chtimes(name string, a, m time.Time) error { + if err := r.dirOrMatches(name); err != nil { + return err + } + return r.source.Chtimes(name, a, m) +} + +func (r *RegexpFs) Chmod(name string, mode os.FileMode) error { + if err := r.dirOrMatches(name); err != nil { + return err + } + return r.source.Chmod(name, mode) +} + +func (r *RegexpFs) Name() string { + return "RegexpFs" +} + +func (r *RegexpFs) Stat(name string) (os.FileInfo, error) { + if err := r.dirOrMatches(name); err != nil { + return nil, err + } + return r.source.Stat(name) +} + +func (r *RegexpFs) Rename(oldname, newname string) error { + dir, err := IsDir(r.source, oldname) + if err != nil { + return err + } + if dir { + return nil + } + if err := r.matchesName(oldname); err != nil { + return err + } + if err := r.matchesName(newname); err != nil { + return err + } + return r.source.Rename(oldname, newname) +} + +func (r *RegexpFs) RemoveAll(p string) error { + dir, err := IsDir(r.source, p) + if err != nil { + return err + } + if !dir { + if err := r.matchesName(p); err != nil { + return err + } + } + return r.source.RemoveAll(p) +} + +func (r *RegexpFs) Remove(name string) error { + if err := r.dirOrMatches(name); err != nil { + return err + } + return r.source.Remove(name) +} + +func (r *RegexpFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { + if err := r.dirOrMatches(name); err != nil { + return nil, err + } + return r.source.OpenFile(name, flag, perm) +} + +func (r *RegexpFs) Open(name string) (File, error) { + dir, err := IsDir(r.source, name) + if err != nil { + return nil, err + } + if !dir { + if err := r.matchesName(name); err != nil { + return nil, err + } + } + f, err := r.source.Open(name) + return &RegexpFile{f: f, re: r.re}, nil +} + +func (r *RegexpFs) Mkdir(n string, p os.FileMode) error { + return r.source.Mkdir(n, p) +} + +func (r *RegexpFs) MkdirAll(n string, p os.FileMode) error { + return r.source.MkdirAll(n, p) +} + +func (r *RegexpFs) Create(name string) (File, error) { + if err := r.matchesName(name); err != nil { + return nil, err + } + return r.source.Create(name) +} + +func (f *RegexpFile) Close() error { + return f.f.Close() +} + +func (f *RegexpFile) Read(s []byte) (int, error) { + return f.f.Read(s) +} + +func (f *RegexpFile) ReadAt(s []byte, o int64) (int, error) { + return f.f.ReadAt(s, o) +} + +func (f *RegexpFile) Seek(o int64, w int) (int64, error) { + return f.f.Seek(o, w) +} + +func (f *RegexpFile) Write(s []byte) (int, error) { + return f.f.Write(s) +} + +func (f *RegexpFile) WriteAt(s []byte, o int64) (int, error) { + return f.f.WriteAt(s, o) +} + +func (f *RegexpFile) Name() string { + return f.f.Name() +} + +func (f *RegexpFile) Readdir(c int) (fi []os.FileInfo, err error) { + var rfi []os.FileInfo + rfi, err = f.f.Readdir(c) + if err != nil { + return nil, err + } + for _, i := range rfi { + if i.IsDir() || f.re.MatchString(i.Name()) { + fi = append(fi, i) + } + } + return fi, nil +} + +func (f *RegexpFile) Readdirnames(c int) (n []string, err error) { + fi, err := f.Readdir(c) + if err != nil { + return nil, err + } + for _, s := range fi { + n = append(n, s.Name()) + } + return n, nil +} + +func (f *RegexpFile) Stat() (os.FileInfo, error) { + return f.f.Stat() +} + +func (f *RegexpFile) Sync() error { + return f.f.Sync() +} + +func (f *RegexpFile) Truncate(s int64) error { + return f.f.Truncate(s) +} + +func (f *RegexpFile) WriteString(s string) (int, error) { + return f.f.WriteString(s) +} diff --git a/vendor/github.com/spf13/afero/sftpfs/file.go b/vendor/github.com/spf13/afero/sftpfs/file.go new file mode 100644 index 00000000..e4ccb55c --- /dev/null +++ b/vendor/github.com/spf13/afero/sftpfs/file.go @@ -0,0 +1,95 @@ +// Copyright © 2015 Jerry Jacobs . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package sftpfs + +import ( + "github.com/pkg/sftp" + "os" +) + +type File struct { + fd *sftp.File +} + +func FileOpen(s *sftp.Client, name string) (*File, error) { + fd, err := s.Open(name) + if err != nil { + return &File{}, err + } + return &File{fd: fd}, nil +} + +func FileCreate(s *sftp.Client, name string) (*File, error) { + fd, err := s.Create(name) + if err != nil { + return &File{}, err + } + return &File{fd: fd}, nil +} + +func (f *File) Close() error { + return f.fd.Close() +} + +func (f *File) Name() string { + return f.fd.Name() +} + +func (f *File) Stat() (os.FileInfo, error) { + return f.fd.Stat() +} + +func (f *File) Sync() error { + return nil +} + +func (f *File) Truncate(size int64) error { + return f.fd.Truncate(size) +} + +func (f *File) Read(b []byte) (n int, err error) { + return f.fd.Read(b) +} + +// TODO +func (f *File) ReadAt(b []byte, off int64) (n int, err error) { + return 0, nil +} + +// TODO +func (f *File) Readdir(count int) (res []os.FileInfo, err error) { + return nil, nil +} + +// TODO +func (f *File) Readdirnames(n int) (names []string, err error) { + return nil, nil +} + +func (f *File) Seek(offset int64, whence int) (int64, error) { + return f.fd.Seek(offset, whence) +} + +func (f *File) Write(b []byte) (n int, err error) { + return f.fd.Write(b) +} + +// TODO +func (f *File) WriteAt(b []byte, off int64) (n int, err error) { + return 0, nil +} + +func (f *File) WriteString(s string) (ret int, err error) { + return f.fd.Write([]byte(s)) +} diff --git a/vendor/github.com/spf13/afero/sftpfs/sftp.go b/vendor/github.com/spf13/afero/sftpfs/sftp.go new file mode 100644 index 00000000..28721da7 --- /dev/null +++ b/vendor/github.com/spf13/afero/sftpfs/sftp.go @@ -0,0 +1,129 @@ +// Copyright © 2015 Jerry Jacobs . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package sftpfs + +import ( + "os" + "time" + + "github.com/pkg/sftp" + "github.com/spf13/afero" +) + +// Fs is a afero.Fs implementation that uses functions provided by the sftp package. +// +// For details in any method, check the documentation of the sftp package +// (github.com/pkg/sftp). +type Fs struct { + client *sftp.Client +} + +func New(client *sftp.Client) afero.Fs { + return &Fs{client: client} +} + +func (s Fs) Name() string { return "sftpfs" } + +func (s Fs) Create(name string) (afero.File, error) { + return FileCreate(s.client, name) +} + +func (s Fs) Mkdir(name string, perm os.FileMode) error { + err := s.client.Mkdir(name) + if err != nil { + return err + } + return s.client.Chmod(name, perm) +} + +func (s Fs) MkdirAll(path string, perm os.FileMode) error { + // Fast path: if we can tell whether path is a directory or file, stop with success or error. + dir, err := s.Stat(path) + if err == nil { + if dir.IsDir() { + return nil + } + return err + } + + // Slow path: make sure parent exists and then call Mkdir for path. + i := len(path) + for i > 0 && os.IsPathSeparator(path[i-1]) { // Skip trailing path separator. + i-- + } + + j := i + for j > 0 && !os.IsPathSeparator(path[j-1]) { // Scan backward over element. + j-- + } + + if j > 1 { + // Create parent + err = s.MkdirAll(path[0:j-1], perm) + if err != nil { + return err + } + } + + // Parent now exists; invoke Mkdir and use its result. + err = s.Mkdir(path, perm) + if err != nil { + // Handle arguments like "foo/." by + // double-checking that directory doesn't exist. + dir, err1 := s.Lstat(path) + if err1 == nil && dir.IsDir() { + return nil + } + return err + } + return nil +} + +func (s Fs) Open(name string) (afero.File, error) { + return FileOpen(s.client, name) +} + +func (s Fs) OpenFile(name string, flag int, perm os.FileMode) (afero.File, error) { + return nil, nil +} + +func (s Fs) Remove(name string) error { + return s.client.Remove(name) +} + +func (s Fs) RemoveAll(path string) error { + // TODO have a look at os.RemoveAll + // https://github.com/golang/go/blob/master/src/os/path.go#L66 + return nil +} + +func (s Fs) Rename(oldname, newname string) error { + return s.client.Rename(oldname, newname) +} + +func (s Fs) Stat(name string) (os.FileInfo, error) { + return s.client.Stat(name) +} + +func (s Fs) Lstat(p string) (os.FileInfo, error) { + return s.client.Lstat(p) +} + +func (s Fs) Chmod(name string, mode os.FileMode) error { + return s.client.Chmod(name, mode) +} + +func (s Fs) Chtimes(name string, atime time.Time, mtime time.Time) error { + return s.client.Chtimes(name, atime, mtime) +} diff --git a/vendor/github.com/spf13/afero/unionFile.go b/vendor/github.com/spf13/afero/unionFile.go new file mode 100644 index 00000000..99f9e5db --- /dev/null +++ b/vendor/github.com/spf13/afero/unionFile.go @@ -0,0 +1,274 @@ +package afero + +import ( + "io" + "os" + "path/filepath" + "syscall" +) + +// The UnionFile implements the afero.File interface and will be returned +// when reading a directory present at least in the overlay or opening a file +// for writing. +// +// The calls to +// Readdir() and Readdirnames() merge the file os.FileInfo / names from the +// base and the overlay - for files present in both layers, only those +// from the overlay will be used. +// +// When opening files for writing (Create() / OpenFile() with the right flags) +// the operations will be done in both layers, starting with the overlay. A +// successful read in the overlay will move the cursor position in the base layer +// by the number of bytes read. +type UnionFile struct { + base File + layer File + off int + files []os.FileInfo +} + +func (f *UnionFile) Close() error { + // first close base, so we have a newer timestamp in the overlay. If we'd close + // the overlay first, we'd get a cacheStale the next time we access this file + // -> cache would be useless ;-) + if f.base != nil { + f.base.Close() + } + if f.layer != nil { + return f.layer.Close() + } + return BADFD +} + +func (f *UnionFile) Read(s []byte) (int, error) { + if f.layer != nil { + n, err := f.layer.Read(s) + if (err == nil || err == io.EOF) && f.base != nil { + // advance the file position also in the base file, the next + // call may be a write at this position (or a seek with SEEK_CUR) + if _, seekErr := f.base.Seek(int64(n), os.SEEK_CUR); seekErr != nil { + // only overwrite err in case the seek fails: we need to + // report an eventual io.EOF to the caller + err = seekErr + } + } + return n, err + } + if f.base != nil { + return f.base.Read(s) + } + return 0, BADFD +} + +func (f *UnionFile) ReadAt(s []byte, o int64) (int, error) { + if f.layer != nil { + n, err := f.layer.ReadAt(s, o) + if (err == nil || err == io.EOF) && f.base != nil { + _, err = f.base.Seek(o+int64(n), os.SEEK_SET) + } + return n, err + } + if f.base != nil { + return f.base.ReadAt(s, o) + } + return 0, BADFD +} + +func (f *UnionFile) Seek(o int64, w int) (pos int64, err error) { + if f.layer != nil { + pos, err = f.layer.Seek(o, w) + if (err == nil || err == io.EOF) && f.base != nil { + _, err = f.base.Seek(o, w) + } + return pos, err + } + if f.base != nil { + return f.base.Seek(o, w) + } + return 0, BADFD +} + +func (f *UnionFile) Write(s []byte) (n int, err error) { + if f.layer != nil { + n, err = f.layer.Write(s) + if err == nil && f.base != nil { // hmm, do we have fixed size files where a write may hit the EOF mark? + _, err = f.base.Write(s) + } + return n, err + } + if f.base != nil { + return f.base.Write(s) + } + return 0, BADFD +} + +func (f *UnionFile) WriteAt(s []byte, o int64) (n int, err error) { + if f.layer != nil { + n, err = f.layer.WriteAt(s, o) + if err == nil && f.base != nil { + _, err = f.base.WriteAt(s, o) + } + return n, err + } + if f.base != nil { + return f.base.WriteAt(s, o) + } + return 0, BADFD +} + +func (f *UnionFile) Name() string { + if f.layer != nil { + return f.layer.Name() + } + return f.base.Name() +} + +// Readdir will weave the two directories together and +// return a single view of the overlayed directories +func (f *UnionFile) Readdir(c int) (ofi []os.FileInfo, err error) { + if f.off == 0 { + var files = make(map[string]os.FileInfo) + var rfi []os.FileInfo + if f.layer != nil { + rfi, err = f.layer.Readdir(-1) + if err != nil { + return nil, err + } + for _, fi := range rfi { + files[fi.Name()] = fi + } + } + + if f.base != nil { + rfi, err = f.base.Readdir(-1) + if err != nil { + return nil, err + } + for _, fi := range rfi { + if _, exists := files[fi.Name()]; !exists { + files[fi.Name()] = fi + } + } + } + for _, fi := range files { + f.files = append(f.files, fi) + } + } + if c == -1 { + return f.files[f.off:], nil + } + defer func() { f.off += c }() + return f.files[f.off:c], nil +} + +func (f *UnionFile) Readdirnames(c int) ([]string, error) { + rfi, err := f.Readdir(c) + if err != nil { + return nil, err + } + var names []string + for _, fi := range rfi { + names = append(names, fi.Name()) + } + return names, nil +} + +func (f *UnionFile) Stat() (os.FileInfo, error) { + if f.layer != nil { + return f.layer.Stat() + } + if f.base != nil { + return f.base.Stat() + } + return nil, BADFD +} + +func (f *UnionFile) Sync() (err error) { + if f.layer != nil { + err = f.layer.Sync() + if err == nil && f.base != nil { + err = f.base.Sync() + } + return err + } + if f.base != nil { + return f.base.Sync() + } + return BADFD +} + +func (f *UnionFile) Truncate(s int64) (err error) { + if f.layer != nil { + err = f.layer.Truncate(s) + if err == nil && f.base != nil { + err = f.base.Truncate(s) + } + return err + } + if f.base != nil { + return f.base.Truncate(s) + } + return BADFD +} + +func (f *UnionFile) WriteString(s string) (n int, err error) { + if f.layer != nil { + n, err = f.layer.WriteString(s) + if err == nil && f.base != nil { + _, err = f.base.WriteString(s) + } + return n, err + } + if f.base != nil { + return f.base.WriteString(s) + } + return 0, BADFD +} + +func copyToLayer(base Fs, layer Fs, name string) error { + bfh, err := base.Open(name) + if err != nil { + return err + } + defer bfh.Close() + + // First make sure the directory exists + exists, err := Exists(layer, filepath.Dir(name)) + if err != nil { + return err + } + if !exists { + err = layer.MkdirAll(filepath.Dir(name), 0777) // FIXME? + if err != nil { + return err + } + } + + // Create the file on the overlay + lfh, err := layer.Create(name) + if err != nil { + return err + } + n, err := io.Copy(lfh, bfh) + if err != nil { + // If anything fails, clean up the file + layer.Remove(name) + lfh.Close() + return err + } + + bfi, err := bfh.Stat() + if err != nil || bfi.Size() != n { + layer.Remove(name) + lfh.Close() + return syscall.EIO + } + + err = lfh.Close() + if err != nil { + layer.Remove(name) + lfh.Close() + return err + } + return layer.Chtimes(name, bfi.ModTime(), bfi.ModTime()) +} diff --git a/vendor/github.com/spf13/afero/util.go b/vendor/github.com/spf13/afero/util.go new file mode 100644 index 00000000..4f253f48 --- /dev/null +++ b/vendor/github.com/spf13/afero/util.go @@ -0,0 +1,330 @@ +// Copyright ©2015 Steve Francia +// Portions Copyright ©2015 The Hugo Authors +// Portions Copyright 2016-present Bjørn Erik Pedersen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package afero + +import ( + "bytes" + "fmt" + "io" + "os" + "path/filepath" + "strings" + "unicode" + + "golang.org/x/text/transform" + "golang.org/x/text/unicode/norm" +) + +// Filepath separator defined by os.Separator. +const FilePathSeparator = string(filepath.Separator) + +// Takes a reader and a path and writes the content +func (a Afero) WriteReader(path string, r io.Reader) (err error) { + return WriteReader(a.Fs, path, r) +} + +func WriteReader(fs Fs, path string, r io.Reader) (err error) { + dir, _ := filepath.Split(path) + ospath := filepath.FromSlash(dir) + + if ospath != "" { + err = fs.MkdirAll(ospath, 0777) // rwx, rw, r + if err != nil { + if err != os.ErrExist { + return err + } + } + } + + file, err := fs.Create(path) + if err != nil { + return + } + defer file.Close() + + _, err = io.Copy(file, r) + return +} + +// Same as WriteReader but checks to see if file/directory already exists. +func (a Afero) SafeWriteReader(path string, r io.Reader) (err error) { + return SafeWriteReader(a.Fs, path, r) +} + +func SafeWriteReader(fs Fs, path string, r io.Reader) (err error) { + dir, _ := filepath.Split(path) + ospath := filepath.FromSlash(dir) + + if ospath != "" { + err = fs.MkdirAll(ospath, 0777) // rwx, rw, r + if err != nil { + return + } + } + + exists, err := Exists(fs, path) + if err != nil { + return + } + if exists { + return fmt.Errorf("%v already exists", path) + } + + file, err := fs.Create(path) + if err != nil { + return + } + defer file.Close() + + _, err = io.Copy(file, r) + return +} + +func (a Afero) GetTempDir(subPath string) string { + return GetTempDir(a.Fs, subPath) +} + +// GetTempDir returns the default temp directory with trailing slash +// if subPath is not empty then it will be created recursively with mode 777 rwx rwx rwx +func GetTempDir(fs Fs, subPath string) string { + addSlash := func(p string) string { + if FilePathSeparator != p[len(p)-1:] { + p = p + FilePathSeparator + } + return p + } + dir := addSlash(os.TempDir()) + + if subPath != "" { + // preserve windows backslash :-( + if FilePathSeparator == "\\" { + subPath = strings.Replace(subPath, "\\", "____", -1) + } + dir = dir + UnicodeSanitize((subPath)) + if FilePathSeparator == "\\" { + dir = strings.Replace(dir, "____", "\\", -1) + } + + if exists, _ := Exists(fs, dir); exists { + return addSlash(dir) + } + + err := fs.MkdirAll(dir, 0777) + if err != nil { + panic(err) + } + dir = addSlash(dir) + } + return dir +} + +// Rewrite string to remove non-standard path characters +func UnicodeSanitize(s string) string { + source := []rune(s) + target := make([]rune, 0, len(source)) + + for _, r := range source { + if unicode.IsLetter(r) || + unicode.IsDigit(r) || + unicode.IsMark(r) || + r == '.' || + r == '/' || + r == '\\' || + r == '_' || + r == '-' || + r == '%' || + r == ' ' || + r == '#' { + target = append(target, r) + } + } + + return string(target) +} + +// Transform characters with accents into plain forms. +func NeuterAccents(s string) string { + t := transform.Chain(norm.NFD, transform.RemoveFunc(isMn), norm.NFC) + result, _, _ := transform.String(t, string(s)) + + return result +} + +func isMn(r rune) bool { + return unicode.Is(unicode.Mn, r) // Mn: nonspacing marks +} + +func (a Afero) FileContainsBytes(filename string, subslice []byte) (bool, error) { + return FileContainsBytes(a.Fs, filename, subslice) +} + +// Check if a file contains a specified byte slice. +func FileContainsBytes(fs Fs, filename string, subslice []byte) (bool, error) { + f, err := fs.Open(filename) + if err != nil { + return false, err + } + defer f.Close() + + return readerContainsAny(f, subslice), nil +} + +func (a Afero) FileContainsAnyBytes(filename string, subslices [][]byte) (bool, error) { + return FileContainsAnyBytes(a.Fs, filename, subslices) +} + +// Check if a file contains any of the specified byte slices. +func FileContainsAnyBytes(fs Fs, filename string, subslices [][]byte) (bool, error) { + f, err := fs.Open(filename) + if err != nil { + return false, err + } + defer f.Close() + + return readerContainsAny(f, subslices...), nil +} + +// readerContains reports whether any of the subslices is within r. +func readerContainsAny(r io.Reader, subslices ...[]byte) bool { + + if r == nil || len(subslices) == 0 { + return false + } + + largestSlice := 0 + + for _, sl := range subslices { + if len(sl) > largestSlice { + largestSlice = len(sl) + } + } + + if largestSlice == 0 { + return false + } + + bufflen := largestSlice * 4 + halflen := bufflen / 2 + buff := make([]byte, bufflen) + var err error + var n, i int + + for { + i++ + if i == 1 { + n, err = io.ReadAtLeast(r, buff[:halflen], halflen) + } else { + if i != 2 { + // shift left to catch overlapping matches + copy(buff[:], buff[halflen:]) + } + n, err = io.ReadAtLeast(r, buff[halflen:], halflen) + } + + if n > 0 { + for _, sl := range subslices { + if bytes.Contains(buff, sl) { + return true + } + } + } + + if err != nil { + break + } + } + return false +} + +func (a Afero) DirExists(path string) (bool, error) { + return DirExists(a.Fs, path) +} + +// DirExists checks if a path exists and is a directory. +func DirExists(fs Fs, path string) (bool, error) { + fi, err := fs.Stat(path) + if err == nil && fi.IsDir() { + return true, nil + } + if os.IsNotExist(err) { + return false, nil + } + return false, err +} + +func (a Afero) IsDir(path string) (bool, error) { + return IsDir(a.Fs, path) +} + +// IsDir checks if a given path is a directory. +func IsDir(fs Fs, path string) (bool, error) { + fi, err := fs.Stat(path) + if err != nil { + return false, err + } + return fi.IsDir(), nil +} + +func (a Afero) IsEmpty(path string) (bool, error) { + return IsEmpty(a.Fs, path) +} + +// IsEmpty checks if a given file or directory is empty. +func IsEmpty(fs Fs, path string) (bool, error) { + if b, _ := Exists(fs, path); !b { + return false, fmt.Errorf("%q path does not exist", path) + } + fi, err := fs.Stat(path) + if err != nil { + return false, err + } + if fi.IsDir() { + f, err := fs.Open(path) + if err != nil { + return false, err + } + defer f.Close() + list, err := f.Readdir(-1) + return len(list) == 0, nil + } + return fi.Size() == 0, nil +} + +func (a Afero) Exists(path string) (bool, error) { + return Exists(a.Fs, path) +} + +// Check if a file or directory exists. +func Exists(fs Fs, path string) (bool, error) { + _, err := fs.Stat(path) + if err == nil { + return true, nil + } + if os.IsNotExist(err) { + return false, nil + } + return false, err +} + +func FullBaseFsPath(basePathFs *BasePathFs, relativePath string) string { + combinedPath := filepath.Join(basePathFs.path, relativePath) + if parent, ok := basePathFs.source.(*BasePathFs); ok { + return FullBaseFsPath(parent, combinedPath) + } + + return combinedPath +} diff --git a/vendor/github.com/spf13/cast/LICENSE b/vendor/github.com/spf13/cast/LICENSE new file mode 100644 index 00000000..4527efb9 --- /dev/null +++ b/vendor/github.com/spf13/cast/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Steve Francia + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/spf13/cast/cast.go b/vendor/github.com/spf13/cast/cast.go new file mode 100644 index 00000000..8b8c208b --- /dev/null +++ b/vendor/github.com/spf13/cast/cast.go @@ -0,0 +1,159 @@ +// Copyright © 2014 Steve Francia . +// +// Use of this source code is governed by an MIT-style +// license that can be found in the LICENSE file. + +// Package cast provides easy and safe casting in Go. +package cast + +import "time" + +// ToBool casts an interface to a bool type. +func ToBool(i interface{}) bool { + v, _ := ToBoolE(i) + return v +} + +// ToTime casts an interface to a time.Time type. +func ToTime(i interface{}) time.Time { + v, _ := ToTimeE(i) + return v +} + +// ToDuration casts an interface to a time.Duration type. +func ToDuration(i interface{}) time.Duration { + v, _ := ToDurationE(i) + return v +} + +// ToFloat64 casts an interface to a float64 type. +func ToFloat64(i interface{}) float64 { + v, _ := ToFloat64E(i) + return v +} + +// ToFloat32 casts an interface to a float32 type. +func ToFloat32(i interface{}) float32 { + v, _ := ToFloat32E(i) + return v +} + +// ToInt64 casts an interface to an int64 type. +func ToInt64(i interface{}) int64 { + v, _ := ToInt64E(i) + return v +} + +// ToInt32 casts an interface to an int32 type. +func ToInt32(i interface{}) int32 { + v, _ := ToInt32E(i) + return v +} + +// ToInt16 casts an interface to an int16 type. +func ToInt16(i interface{}) int16 { + v, _ := ToInt16E(i) + return v +} + +// ToInt8 casts an interface to an int8 type. +func ToInt8(i interface{}) int8 { + v, _ := ToInt8E(i) + return v +} + +// ToInt casts an interface to an int type. +func ToInt(i interface{}) int { + v, _ := ToIntE(i) + return v +} + +// ToUint casts an interface to a uint type. +func ToUint(i interface{}) uint { + v, _ := ToUintE(i) + return v +} + +// ToUint64 casts an interface to a uint64 type. +func ToUint64(i interface{}) uint64 { + v, _ := ToUint64E(i) + return v +} + +// ToUint32 casts an interface to a uint32 type. +func ToUint32(i interface{}) uint32 { + v, _ := ToUint32E(i) + return v +} + +// ToUint16 casts an interface to a uint16 type. +func ToUint16(i interface{}) uint16 { + v, _ := ToUint16E(i) + return v +} + +// ToUint8 casts an interface to a uint8 type. +func ToUint8(i interface{}) uint8 { + v, _ := ToUint8E(i) + return v +} + +// ToString casts an interface to a string type. +func ToString(i interface{}) string { + v, _ := ToStringE(i) + return v +} + +// ToStringMapString casts an interface to a map[string]string type. +func ToStringMapString(i interface{}) map[string]string { + v, _ := ToStringMapStringE(i) + return v +} + +// ToStringMapStringSlice casts an interface to a map[string][]string type. +func ToStringMapStringSlice(i interface{}) map[string][]string { + v, _ := ToStringMapStringSliceE(i) + return v +} + +// ToStringMapBool casts an interface to a map[string]bool type. +func ToStringMapBool(i interface{}) map[string]bool { + v, _ := ToStringMapBoolE(i) + return v +} + +// ToStringMap casts an interface to a map[string]interface{} type. +func ToStringMap(i interface{}) map[string]interface{} { + v, _ := ToStringMapE(i) + return v +} + +// ToSlice casts an interface to a []interface{} type. +func ToSlice(i interface{}) []interface{} { + v, _ := ToSliceE(i) + return v +} + +// ToBoolSlice casts an interface to a []bool type. +func ToBoolSlice(i interface{}) []bool { + v, _ := ToBoolSliceE(i) + return v +} + +// ToStringSlice casts an interface to a []string type. +func ToStringSlice(i interface{}) []string { + v, _ := ToStringSliceE(i) + return v +} + +// ToIntSlice casts an interface to a []int type. +func ToIntSlice(i interface{}) []int { + v, _ := ToIntSliceE(i) + return v +} + +// ToDurationSlice casts an interface to a []time.Duration type. +func ToDurationSlice(i interface{}) []time.Duration { + v, _ := ToDurationSliceE(i) + return v +} diff --git a/vendor/github.com/spf13/cast/caste.go b/vendor/github.com/spf13/cast/caste.go new file mode 100644 index 00000000..4fe19289 --- /dev/null +++ b/vendor/github.com/spf13/cast/caste.go @@ -0,0 +1,1166 @@ +// Copyright © 2014 Steve Francia . +// +// Use of this source code is governed by an MIT-style +// license that can be found in the LICENSE file. + +package cast + +import ( + "encoding/json" + "errors" + "fmt" + "html/template" + "reflect" + "strconv" + "strings" + "time" +) + +var errNegativeNotAllowed = errors.New("unable to cast negative value") + +// ToTimeE casts an interface to a time.Time type. +func ToTimeE(i interface{}) (tim time.Time, err error) { + i = indirect(i) + + switch v := i.(type) { + case time.Time: + return v, nil + case string: + return StringToDate(v) + case int: + return time.Unix(int64(v), 0), nil + case int64: + return time.Unix(v, 0), nil + case int32: + return time.Unix(int64(v), 0), nil + case uint: + return time.Unix(int64(v), 0), nil + case uint64: + return time.Unix(int64(v), 0), nil + case uint32: + return time.Unix(int64(v), 0), nil + default: + return time.Time{}, fmt.Errorf("unable to cast %#v of type %T to Time", i, i) + } +} + +// ToDurationE casts an interface to a time.Duration type. +func ToDurationE(i interface{}) (d time.Duration, err error) { + i = indirect(i) + + switch s := i.(type) { + case time.Duration: + return s, nil + case int, int64, int32, int16, int8, uint, uint64, uint32, uint16, uint8: + d = time.Duration(ToInt64(s)) + return + case float32, float64: + d = time.Duration(ToFloat64(s)) + return + case string: + if strings.ContainsAny(s, "nsuµmh") { + d, err = time.ParseDuration(s) + } else { + d, err = time.ParseDuration(s + "ns") + } + return + default: + err = fmt.Errorf("unable to cast %#v of type %T to Duration", i, i) + return + } +} + +// ToBoolE casts an interface to a bool type. +func ToBoolE(i interface{}) (bool, error) { + i = indirect(i) + + switch b := i.(type) { + case bool: + return b, nil + case nil: + return false, nil + case int: + if i.(int) != 0 { + return true, nil + } + return false, nil + case string: + return strconv.ParseBool(i.(string)) + default: + return false, fmt.Errorf("unable to cast %#v of type %T to bool", i, i) + } +} + +// ToFloat64E casts an interface to a float64 type. +func ToFloat64E(i interface{}) (float64, error) { + i = indirect(i) + + switch s := i.(type) { + case float64: + return s, nil + case float32: + return float64(s), nil + case int: + return float64(s), nil + case int64: + return float64(s), nil + case int32: + return float64(s), nil + case int16: + return float64(s), nil + case int8: + return float64(s), nil + case uint: + return float64(s), nil + case uint64: + return float64(s), nil + case uint32: + return float64(s), nil + case uint16: + return float64(s), nil + case uint8: + return float64(s), nil + case string: + v, err := strconv.ParseFloat(s, 64) + if err == nil { + return v, nil + } + return 0, fmt.Errorf("unable to cast %#v of type %T to float64", i, i) + case bool: + if s { + return 1, nil + } + return 0, nil + default: + return 0, fmt.Errorf("unable to cast %#v of type %T to float64", i, i) + } +} + +// ToFloat32E casts an interface to a float32 type. +func ToFloat32E(i interface{}) (float32, error) { + i = indirect(i) + + switch s := i.(type) { + case float64: + return float32(s), nil + case float32: + return s, nil + case int: + return float32(s), nil + case int64: + return float32(s), nil + case int32: + return float32(s), nil + case int16: + return float32(s), nil + case int8: + return float32(s), nil + case uint: + return float32(s), nil + case uint64: + return float32(s), nil + case uint32: + return float32(s), nil + case uint16: + return float32(s), nil + case uint8: + return float32(s), nil + case string: + v, err := strconv.ParseFloat(s, 32) + if err == nil { + return float32(v), nil + } + return 0, fmt.Errorf("unable to cast %#v of type %T to float32", i, i) + case bool: + if s { + return 1, nil + } + return 0, nil + default: + return 0, fmt.Errorf("unable to cast %#v of type %T to float32", i, i) + } +} + +// ToInt64E casts an interface to an int64 type. +func ToInt64E(i interface{}) (int64, error) { + i = indirect(i) + + switch s := i.(type) { + case int: + return int64(s), nil + case int64: + return s, nil + case int32: + return int64(s), nil + case int16: + return int64(s), nil + case int8: + return int64(s), nil + case uint: + return int64(s), nil + case uint64: + return int64(s), nil + case uint32: + return int64(s), nil + case uint16: + return int64(s), nil + case uint8: + return int64(s), nil + case float64: + return int64(s), nil + case float32: + return int64(s), nil + case string: + v, err := strconv.ParseInt(s, 0, 0) + if err == nil { + return v, nil + } + return 0, fmt.Errorf("unable to cast %#v of type %T to int64", i, i) + case bool: + if s { + return 1, nil + } + return 0, nil + case nil: + return 0, nil + default: + return 0, fmt.Errorf("unable to cast %#v of type %T to int64", i, i) + } +} + +// ToInt32E casts an interface to an int32 type. +func ToInt32E(i interface{}) (int32, error) { + i = indirect(i) + + switch s := i.(type) { + case int: + return int32(s), nil + case int64: + return int32(s), nil + case int32: + return s, nil + case int16: + return int32(s), nil + case int8: + return int32(s), nil + case uint: + return int32(s), nil + case uint64: + return int32(s), nil + case uint32: + return int32(s), nil + case uint16: + return int32(s), nil + case uint8: + return int32(s), nil + case float64: + return int32(s), nil + case float32: + return int32(s), nil + case string: + v, err := strconv.ParseInt(s, 0, 0) + if err == nil { + return int32(v), nil + } + return 0, fmt.Errorf("unable to cast %#v of type %T to int32", i, i) + case bool: + if s { + return 1, nil + } + return 0, nil + case nil: + return 0, nil + default: + return 0, fmt.Errorf("unable to cast %#v of type %T to int32", i, i) + } +} + +// ToInt16E casts an interface to an int16 type. +func ToInt16E(i interface{}) (int16, error) { + i = indirect(i) + + switch s := i.(type) { + case int: + return int16(s), nil + case int64: + return int16(s), nil + case int32: + return int16(s), nil + case int16: + return s, nil + case int8: + return int16(s), nil + case uint: + return int16(s), nil + case uint64: + return int16(s), nil + case uint32: + return int16(s), nil + case uint16: + return int16(s), nil + case uint8: + return int16(s), nil + case float64: + return int16(s), nil + case float32: + return int16(s), nil + case string: + v, err := strconv.ParseInt(s, 0, 0) + if err == nil { + return int16(v), nil + } + return 0, fmt.Errorf("unable to cast %#v of type %T to int16", i, i) + case bool: + if s { + return 1, nil + } + return 0, nil + case nil: + return 0, nil + default: + return 0, fmt.Errorf("unable to cast %#v of type %T to int16", i, i) + } +} + +// ToInt8E casts an interface to an int8 type. +func ToInt8E(i interface{}) (int8, error) { + i = indirect(i) + + switch s := i.(type) { + case int: + return int8(s), nil + case int64: + return int8(s), nil + case int32: + return int8(s), nil + case int16: + return int8(s), nil + case int8: + return s, nil + case uint: + return int8(s), nil + case uint64: + return int8(s), nil + case uint32: + return int8(s), nil + case uint16: + return int8(s), nil + case uint8: + return int8(s), nil + case float64: + return int8(s), nil + case float32: + return int8(s), nil + case string: + v, err := strconv.ParseInt(s, 0, 0) + if err == nil { + return int8(v), nil + } + return 0, fmt.Errorf("unable to cast %#v of type %T to int8", i, i) + case bool: + if s { + return 1, nil + } + return 0, nil + case nil: + return 0, nil + default: + return 0, fmt.Errorf("unable to cast %#v of type %T to int8", i, i) + } +} + +// ToIntE casts an interface to an int type. +func ToIntE(i interface{}) (int, error) { + i = indirect(i) + + switch s := i.(type) { + case int: + return s, nil + case int64: + return int(s), nil + case int32: + return int(s), nil + case int16: + return int(s), nil + case int8: + return int(s), nil + case uint: + return int(s), nil + case uint64: + return int(s), nil + case uint32: + return int(s), nil + case uint16: + return int(s), nil + case uint8: + return int(s), nil + case float64: + return int(s), nil + case float32: + return int(s), nil + case string: + v, err := strconv.ParseInt(s, 0, 0) + if err == nil { + return int(v), nil + } + return 0, fmt.Errorf("unable to cast %#v of type %T to int", i, i) + case bool: + if s { + return 1, nil + } + return 0, nil + case nil: + return 0, nil + default: + return 0, fmt.Errorf("unable to cast %#v of type %T to int", i, i) + } +} + +// ToUintE casts an interface to a uint type. +func ToUintE(i interface{}) (uint, error) { + i = indirect(i) + + switch s := i.(type) { + case string: + v, err := strconv.ParseUint(s, 0, 0) + if err == nil { + return uint(v), nil + } + return 0, fmt.Errorf("unable to cast %#v to uint: %s", i, err) + case int: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint(s), nil + case int64: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint(s), nil + case int32: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint(s), nil + case int16: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint(s), nil + case int8: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint(s), nil + case uint: + return s, nil + case uint64: + return uint(s), nil + case uint32: + return uint(s), nil + case uint16: + return uint(s), nil + case uint8: + return uint(s), nil + case float64: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint(s), nil + case float32: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint(s), nil + case bool: + if s { + return 1, nil + } + return 0, nil + case nil: + return 0, nil + default: + return 0, fmt.Errorf("unable to cast %#v of type %T to uint", i, i) + } +} + +// ToUint64E casts an interface to a uint64 type. +func ToUint64E(i interface{}) (uint64, error) { + i = indirect(i) + + switch s := i.(type) { + case string: + v, err := strconv.ParseUint(s, 0, 64) + if err == nil { + return v, nil + } + return 0, fmt.Errorf("unable to cast %#v to uint64: %s", i, err) + case int: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint64(s), nil + case int64: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint64(s), nil + case int32: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint64(s), nil + case int16: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint64(s), nil + case int8: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint64(s), nil + case uint: + return uint64(s), nil + case uint64: + return s, nil + case uint32: + return uint64(s), nil + case uint16: + return uint64(s), nil + case uint8: + return uint64(s), nil + case float32: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint64(s), nil + case float64: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint64(s), nil + case bool: + if s { + return 1, nil + } + return 0, nil + case nil: + return 0, nil + default: + return 0, fmt.Errorf("unable to cast %#v of type %T to uint64", i, i) + } +} + +// ToUint32E casts an interface to a uint32 type. +func ToUint32E(i interface{}) (uint32, error) { + i = indirect(i) + + switch s := i.(type) { + case string: + v, err := strconv.ParseUint(s, 0, 32) + if err == nil { + return uint32(v), nil + } + return 0, fmt.Errorf("unable to cast %#v to uint32: %s", i, err) + case int: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint32(s), nil + case int64: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint32(s), nil + case int32: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint32(s), nil + case int16: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint32(s), nil + case int8: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint32(s), nil + case uint: + return uint32(s), nil + case uint64: + return uint32(s), nil + case uint32: + return s, nil + case uint16: + return uint32(s), nil + case uint8: + return uint32(s), nil + case float64: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint32(s), nil + case float32: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint32(s), nil + case bool: + if s { + return 1, nil + } + return 0, nil + case nil: + return 0, nil + default: + return 0, fmt.Errorf("unable to cast %#v of type %T to uint32", i, i) + } +} + +// ToUint16E casts an interface to a uint16 type. +func ToUint16E(i interface{}) (uint16, error) { + i = indirect(i) + + switch s := i.(type) { + case string: + v, err := strconv.ParseUint(s, 0, 16) + if err == nil { + return uint16(v), nil + } + return 0, fmt.Errorf("unable to cast %#v to uint16: %s", i, err) + case int: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint16(s), nil + case int64: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint16(s), nil + case int32: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint16(s), nil + case int16: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint16(s), nil + case int8: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint16(s), nil + case uint: + return uint16(s), nil + case uint64: + return uint16(s), nil + case uint32: + return uint16(s), nil + case uint16: + return s, nil + case uint8: + return uint16(s), nil + case float64: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint16(s), nil + case float32: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint16(s), nil + case bool: + if s { + return 1, nil + } + return 0, nil + case nil: + return 0, nil + default: + return 0, fmt.Errorf("unable to cast %#v of type %T to uint16", i, i) + } +} + +// ToUint8E casts an interface to a uint type. +func ToUint8E(i interface{}) (uint8, error) { + i = indirect(i) + + switch s := i.(type) { + case string: + v, err := strconv.ParseUint(s, 0, 8) + if err == nil { + return uint8(v), nil + } + return 0, fmt.Errorf("unable to cast %#v to uint8: %s", i, err) + case int: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint8(s), nil + case int64: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint8(s), nil + case int32: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint8(s), nil + case int16: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint8(s), nil + case int8: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint8(s), nil + case uint: + return uint8(s), nil + case uint64: + return uint8(s), nil + case uint32: + return uint8(s), nil + case uint16: + return uint8(s), nil + case uint8: + return s, nil + case float64: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint8(s), nil + case float32: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint8(s), nil + case bool: + if s { + return 1, nil + } + return 0, nil + case nil: + return 0, nil + default: + return 0, fmt.Errorf("unable to cast %#v of type %T to uint8", i, i) + } +} + +// From html/template/content.go +// Copyright 2011 The Go Authors. All rights reserved. +// indirect returns the value, after dereferencing as many times +// as necessary to reach the base type (or nil). +func indirect(a interface{}) interface{} { + if a == nil { + return nil + } + if t := reflect.TypeOf(a); t.Kind() != reflect.Ptr { + // Avoid creating a reflect.Value if it's not a pointer. + return a + } + v := reflect.ValueOf(a) + for v.Kind() == reflect.Ptr && !v.IsNil() { + v = v.Elem() + } + return v.Interface() +} + +// From html/template/content.go +// Copyright 2011 The Go Authors. All rights reserved. +// indirectToStringerOrError returns the value, after dereferencing as many times +// as necessary to reach the base type (or nil) or an implementation of fmt.Stringer +// or error, +func indirectToStringerOrError(a interface{}) interface{} { + if a == nil { + return nil + } + + var errorType = reflect.TypeOf((*error)(nil)).Elem() + var fmtStringerType = reflect.TypeOf((*fmt.Stringer)(nil)).Elem() + + v := reflect.ValueOf(a) + for !v.Type().Implements(fmtStringerType) && !v.Type().Implements(errorType) && v.Kind() == reflect.Ptr && !v.IsNil() { + v = v.Elem() + } + return v.Interface() +} + +// ToStringE casts an interface to a string type. +func ToStringE(i interface{}) (string, error) { + i = indirectToStringerOrError(i) + + switch s := i.(type) { + case string: + return s, nil + case bool: + return strconv.FormatBool(s), nil + case float64: + return strconv.FormatFloat(s, 'f', -1, 64), nil + case float32: + return strconv.FormatFloat(float64(s), 'f', -1, 32), nil + case int: + return strconv.Itoa(s), nil + case int64: + return strconv.FormatInt(s, 10), nil + case int32: + return strconv.Itoa(int(s)), nil + case int16: + return strconv.FormatInt(int64(s), 10), nil + case int8: + return strconv.FormatInt(int64(s), 10), nil + case uint: + return strconv.FormatInt(int64(s), 10), nil + case uint64: + return strconv.FormatInt(int64(s), 10), nil + case uint32: + return strconv.FormatInt(int64(s), 10), nil + case uint16: + return strconv.FormatInt(int64(s), 10), nil + case uint8: + return strconv.FormatInt(int64(s), 10), nil + case []byte: + return string(s), nil + case template.HTML: + return string(s), nil + case template.URL: + return string(s), nil + case template.JS: + return string(s), nil + case template.CSS: + return string(s), nil + case template.HTMLAttr: + return string(s), nil + case nil: + return "", nil + case fmt.Stringer: + return s.String(), nil + case error: + return s.Error(), nil + default: + return "", fmt.Errorf("unable to cast %#v of type %T to string", i, i) + } +} + +// ToStringMapStringE casts an interface to a map[string]string type. +func ToStringMapStringE(i interface{}) (map[string]string, error) { + var m = map[string]string{} + + switch v := i.(type) { + case map[string]string: + return v, nil + case map[string]interface{}: + for k, val := range v { + m[ToString(k)] = ToString(val) + } + return m, nil + case map[interface{}]string: + for k, val := range v { + m[ToString(k)] = ToString(val) + } + return m, nil + case map[interface{}]interface{}: + for k, val := range v { + m[ToString(k)] = ToString(val) + } + return m, nil + case string: + err := jsonStringToObject(v, &m) + return m, err + default: + return m, fmt.Errorf("unable to cast %#v of type %T to map[string]string", i, i) + } +} + +// ToStringMapStringSliceE casts an interface to a map[string][]string type. +func ToStringMapStringSliceE(i interface{}) (map[string][]string, error) { + var m = map[string][]string{} + + switch v := i.(type) { + case map[string][]string: + return v, nil + case map[string][]interface{}: + for k, val := range v { + m[ToString(k)] = ToStringSlice(val) + } + return m, nil + case map[string]string: + for k, val := range v { + m[ToString(k)] = []string{val} + } + case map[string]interface{}: + for k, val := range v { + switch vt := val.(type) { + case []interface{}: + m[ToString(k)] = ToStringSlice(vt) + case []string: + m[ToString(k)] = vt + default: + m[ToString(k)] = []string{ToString(val)} + } + } + return m, nil + case map[interface{}][]string: + for k, val := range v { + m[ToString(k)] = ToStringSlice(val) + } + return m, nil + case map[interface{}]string: + for k, val := range v { + m[ToString(k)] = ToStringSlice(val) + } + return m, nil + case map[interface{}][]interface{}: + for k, val := range v { + m[ToString(k)] = ToStringSlice(val) + } + return m, nil + case map[interface{}]interface{}: + for k, val := range v { + key, err := ToStringE(k) + if err != nil { + return m, fmt.Errorf("unable to cast %#v of type %T to map[string][]string", i, i) + } + value, err := ToStringSliceE(val) + if err != nil { + return m, fmt.Errorf("unable to cast %#v of type %T to map[string][]string", i, i) + } + m[key] = value + } + case string: + err := jsonStringToObject(v, &m) + return m, err + default: + return m, fmt.Errorf("unable to cast %#v of type %T to map[string][]string", i, i) + } + return m, nil +} + +// ToStringMapBoolE casts an interface to a map[string]bool type. +func ToStringMapBoolE(i interface{}) (map[string]bool, error) { + var m = map[string]bool{} + + switch v := i.(type) { + case map[interface{}]interface{}: + for k, val := range v { + m[ToString(k)] = ToBool(val) + } + return m, nil + case map[string]interface{}: + for k, val := range v { + m[ToString(k)] = ToBool(val) + } + return m, nil + case map[string]bool: + return v, nil + case string: + err := jsonStringToObject(v, &m) + return m, err + default: + return m, fmt.Errorf("unable to cast %#v of type %T to map[string]bool", i, i) + } +} + +// ToStringMapE casts an interface to a map[string]interface{} type. +func ToStringMapE(i interface{}) (map[string]interface{}, error) { + var m = map[string]interface{}{} + + switch v := i.(type) { + case map[interface{}]interface{}: + for k, val := range v { + m[ToString(k)] = val + } + return m, nil + case map[string]interface{}: + return v, nil + case string: + err := jsonStringToObject(v, &m) + return m, err + default: + return m, fmt.Errorf("unable to cast %#v of type %T to map[string]interface{}", i, i) + } +} + +// ToSliceE casts an interface to a []interface{} type. +func ToSliceE(i interface{}) ([]interface{}, error) { + var s []interface{} + + switch v := i.(type) { + case []interface{}: + return append(s, v...), nil + case []map[string]interface{}: + for _, u := range v { + s = append(s, u) + } + return s, nil + default: + return s, fmt.Errorf("unable to cast %#v of type %T to []interface{}", i, i) + } +} + +// ToBoolSliceE casts an interface to a []bool type. +func ToBoolSliceE(i interface{}) ([]bool, error) { + if i == nil { + return []bool{}, fmt.Errorf("unable to cast %#v of type %T to []bool", i, i) + } + + switch v := i.(type) { + case []bool: + return v, nil + } + + kind := reflect.TypeOf(i).Kind() + switch kind { + case reflect.Slice, reflect.Array: + s := reflect.ValueOf(i) + a := make([]bool, s.Len()) + for j := 0; j < s.Len(); j++ { + val, err := ToBoolE(s.Index(j).Interface()) + if err != nil { + return []bool{}, fmt.Errorf("unable to cast %#v of type %T to []bool", i, i) + } + a[j] = val + } + return a, nil + default: + return []bool{}, fmt.Errorf("unable to cast %#v of type %T to []bool", i, i) + } +} + +// ToStringSliceE casts an interface to a []string type. +func ToStringSliceE(i interface{}) ([]string, error) { + var a []string + + switch v := i.(type) { + case []interface{}: + for _, u := range v { + a = append(a, ToString(u)) + } + return a, nil + case []string: + return v, nil + case string: + return strings.Fields(v), nil + case interface{}: + str, err := ToStringE(v) + if err != nil { + return a, fmt.Errorf("unable to cast %#v of type %T to []string", i, i) + } + return []string{str}, nil + default: + return a, fmt.Errorf("unable to cast %#v of type %T to []string", i, i) + } +} + +// ToIntSliceE casts an interface to a []int type. +func ToIntSliceE(i interface{}) ([]int, error) { + if i == nil { + return []int{}, fmt.Errorf("unable to cast %#v of type %T to []int", i, i) + } + + switch v := i.(type) { + case []int: + return v, nil + } + + kind := reflect.TypeOf(i).Kind() + switch kind { + case reflect.Slice, reflect.Array: + s := reflect.ValueOf(i) + a := make([]int, s.Len()) + for j := 0; j < s.Len(); j++ { + val, err := ToIntE(s.Index(j).Interface()) + if err != nil { + return []int{}, fmt.Errorf("unable to cast %#v of type %T to []int", i, i) + } + a[j] = val + } + return a, nil + default: + return []int{}, fmt.Errorf("unable to cast %#v of type %T to []int", i, i) + } +} + +// ToDurationSliceE casts an interface to a []time.Duration type. +func ToDurationSliceE(i interface{}) ([]time.Duration, error) { + if i == nil { + return []time.Duration{}, fmt.Errorf("unable to cast %#v of type %T to []time.Duration", i, i) + } + + switch v := i.(type) { + case []time.Duration: + return v, nil + } + + kind := reflect.TypeOf(i).Kind() + switch kind { + case reflect.Slice, reflect.Array: + s := reflect.ValueOf(i) + a := make([]time.Duration, s.Len()) + for j := 0; j < s.Len(); j++ { + val, err := ToDurationE(s.Index(j).Interface()) + if err != nil { + return []time.Duration{}, fmt.Errorf("unable to cast %#v of type %T to []time.Duration", i, i) + } + a[j] = val + } + return a, nil + default: + return []time.Duration{}, fmt.Errorf("unable to cast %#v of type %T to []time.Duration", i, i) + } +} + +// StringToDate attempts to parse a string into a time.Time type using a +// predefined list of formats. If no suitable format is found, an error is +// returned. +func StringToDate(s string) (time.Time, error) { + return parseDateWith(s, []string{ + time.RFC3339, + "2006-01-02T15:04:05", // iso8601 without timezone + time.RFC1123Z, + time.RFC1123, + time.RFC822Z, + time.RFC822, + time.RFC850, + time.ANSIC, + time.UnixDate, + time.RubyDate, + "2006-01-02 15:04:05.999999999 -0700 MST", // Time.String() + "2006-01-02", + "02 Jan 2006", + "2006-01-02 15:04:05 -07:00", + "2006-01-02 15:04:05 -0700", + "2006-01-02 15:04:05Z07:00", // RFC3339 without T + "2006-01-02 15:04:05", + time.Kitchen, + time.Stamp, + time.StampMilli, + time.StampMicro, + time.StampNano, + }) +} + +func parseDateWith(s string, dates []string) (d time.Time, e error) { + for _, dateType := range dates { + if d, e = time.Parse(dateType, s); e == nil { + return + } + } + return d, fmt.Errorf("unable to parse date: %s", s) +} + +// jsonStringToObject attempts to unmarshall a string as JSON into +// the object passed as pointer. +func jsonStringToObject(s string, v interface{}) error { + data := []byte(s) + return json.Unmarshal(data, v) +} diff --git a/vendor/github.com/spf13/jwalterweatherman/LICENSE b/vendor/github.com/spf13/jwalterweatherman/LICENSE new file mode 100644 index 00000000..4527efb9 --- /dev/null +++ b/vendor/github.com/spf13/jwalterweatherman/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Steve Francia + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/spf13/jwalterweatherman/default_notepad.go b/vendor/github.com/spf13/jwalterweatherman/default_notepad.go new file mode 100644 index 00000000..bcb76340 --- /dev/null +++ b/vendor/github.com/spf13/jwalterweatherman/default_notepad.go @@ -0,0 +1,113 @@ +// Copyright © 2016 Steve Francia . +// +// Use of this source code is governed by an MIT-style +// license that can be found in the LICENSE file. + +package jwalterweatherman + +import ( + "io" + "io/ioutil" + "log" + "os" +) + +var ( + TRACE *log.Logger + DEBUG *log.Logger + INFO *log.Logger + WARN *log.Logger + ERROR *log.Logger + CRITICAL *log.Logger + FATAL *log.Logger + + LOG *log.Logger + FEEDBACK *Feedback + + defaultNotepad *Notepad +) + +func reloadDefaultNotepad() { + TRACE = defaultNotepad.TRACE + DEBUG = defaultNotepad.DEBUG + INFO = defaultNotepad.INFO + WARN = defaultNotepad.WARN + ERROR = defaultNotepad.ERROR + CRITICAL = defaultNotepad.CRITICAL + FATAL = defaultNotepad.FATAL + + LOG = defaultNotepad.LOG + FEEDBACK = defaultNotepad.FEEDBACK +} + +func init() { + defaultNotepad = NewNotepad(LevelError, LevelWarn, os.Stdout, ioutil.Discard, "", log.Ldate|log.Ltime) + reloadDefaultNotepad() +} + +// SetLogThreshold set the log threshold for the default notepad. Trace by default. +func SetLogThreshold(threshold Threshold) { + defaultNotepad.SetLogThreshold(threshold) + reloadDefaultNotepad() +} + +// SetLogOutput set the log output for the default notepad. Discarded by default. +func SetLogOutput(handle io.Writer) { + defaultNotepad.SetLogOutput(handle) + reloadDefaultNotepad() +} + +// SetStdoutThreshold set the standard output threshold for the default notepad. +// Info by default. +func SetStdoutThreshold(threshold Threshold) { + defaultNotepad.SetStdoutThreshold(threshold) + reloadDefaultNotepad() +} + +// SetPrefix set the prefix for the default logger. Empty by default. +func SetPrefix(prefix string) { + defaultNotepad.SetPrefix(prefix) + reloadDefaultNotepad() +} + +// SetFlags set the flags for the default logger. "log.Ldate | log.Ltime" by default. +func SetFlags(flags int) { + defaultNotepad.SetFlags(flags) + reloadDefaultNotepad() +} + +// Level returns the current global log threshold. +func LogThreshold() Threshold { + return defaultNotepad.logThreshold +} + +// Level returns the current global output threshold. +func StdoutThreshold() Threshold { + return defaultNotepad.stdoutThreshold +} + +// GetStdoutThreshold returns the defined Treshold for the log logger. +func GetLogThreshold() Threshold { + return defaultNotepad.GetLogThreshold() +} + +// GetStdoutThreshold returns the Treshold for the stdout logger. +func GetStdoutThreshold() Threshold { + return defaultNotepad.GetStdoutThreshold() +} + +// LogCountForLevel returns the number of log invocations for a given threshold. +func LogCountForLevel(l Threshold) uint64 { + return defaultNotepad.LogCountForLevel(l) +} + +// LogCountForLevelsGreaterThanorEqualTo returns the number of log invocations +// greater than or equal to a given threshold. +func LogCountForLevelsGreaterThanorEqualTo(threshold Threshold) uint64 { + return defaultNotepad.LogCountForLevelsGreaterThanorEqualTo(threshold) +} + +// ResetLogCounters resets the invocation counters for all levels. +func ResetLogCounters() { + defaultNotepad.ResetLogCounters() +} diff --git a/vendor/github.com/spf13/jwalterweatherman/log_counter.go b/vendor/github.com/spf13/jwalterweatherman/log_counter.go new file mode 100644 index 00000000..11423ac4 --- /dev/null +++ b/vendor/github.com/spf13/jwalterweatherman/log_counter.go @@ -0,0 +1,55 @@ +// Copyright © 2016 Steve Francia . +// +// Use of this source code is governed by an MIT-style +// license that can be found in the LICENSE file. + +package jwalterweatherman + +import ( + "sync/atomic" +) + +type logCounter struct { + counter uint64 +} + +func (c *logCounter) incr() { + atomic.AddUint64(&c.counter, 1) +} + +func (c *logCounter) resetCounter() { + atomic.StoreUint64(&c.counter, 0) +} + +func (c *logCounter) getCount() uint64 { + return atomic.LoadUint64(&c.counter) +} + +func (c *logCounter) Write(p []byte) (n int, err error) { + c.incr() + return len(p), nil +} + +// LogCountForLevel returns the number of log invocations for a given threshold. +func (n *Notepad) LogCountForLevel(l Threshold) uint64 { + return n.logCounters[l].getCount() +} + +// LogCountForLevelsGreaterThanorEqualTo returns the number of log invocations +// greater than or equal to a given threshold. +func (n *Notepad) LogCountForLevelsGreaterThanorEqualTo(threshold Threshold) uint64 { + var cnt uint64 + + for i := int(threshold); i < len(n.logCounters); i++ { + cnt += n.LogCountForLevel(Threshold(i)) + } + + return cnt +} + +// ResetLogCounters resets the invocation counters for all levels. +func (n *Notepad) ResetLogCounters() { + for _, np := range n.logCounters { + np.resetCounter() + } +} diff --git a/vendor/github.com/spf13/jwalterweatherman/notepad.go b/vendor/github.com/spf13/jwalterweatherman/notepad.go new file mode 100644 index 00000000..ae5aaf71 --- /dev/null +++ b/vendor/github.com/spf13/jwalterweatherman/notepad.go @@ -0,0 +1,194 @@ +// Copyright © 2016 Steve Francia . +// +// Use of this source code is governed by an MIT-style +// license that can be found in the LICENSE file. + +package jwalterweatherman + +import ( + "fmt" + "io" + "log" +) + +type Threshold int + +func (t Threshold) String() string { + return prefixes[t] +} + +const ( + LevelTrace Threshold = iota + LevelDebug + LevelInfo + LevelWarn + LevelError + LevelCritical + LevelFatal +) + +var prefixes map[Threshold]string = map[Threshold]string{ + LevelTrace: "TRACE", + LevelDebug: "DEBUG", + LevelInfo: "INFO", + LevelWarn: "WARN", + LevelError: "ERROR", + LevelCritical: "CRITICAL", + LevelFatal: "FATAL", +} + +// Notepad is where you leave a note! +type Notepad struct { + TRACE *log.Logger + DEBUG *log.Logger + INFO *log.Logger + WARN *log.Logger + ERROR *log.Logger + CRITICAL *log.Logger + FATAL *log.Logger + + LOG *log.Logger + FEEDBACK *Feedback + + loggers [7]**log.Logger + logHandle io.Writer + outHandle io.Writer + logThreshold Threshold + stdoutThreshold Threshold + prefix string + flags int + + // One per Threshold + logCounters [7]*logCounter +} + +// NewNotepad create a new notepad. +func NewNotepad(outThreshold Threshold, logThreshold Threshold, outHandle, logHandle io.Writer, prefix string, flags int) *Notepad { + n := &Notepad{} + + n.loggers = [7]**log.Logger{&n.TRACE, &n.DEBUG, &n.INFO, &n.WARN, &n.ERROR, &n.CRITICAL, &n.FATAL} + n.outHandle = outHandle + n.logHandle = logHandle + n.stdoutThreshold = outThreshold + n.logThreshold = logThreshold + + if len(prefix) != 0 { + n.prefix = "[" + prefix + "] " + } else { + n.prefix = "" + } + + n.flags = flags + + n.LOG = log.New(n.logHandle, + "LOG: ", + n.flags) + n.FEEDBACK = &Feedback{out: log.New(outHandle, "", 0), log: n.LOG} + + n.init() + return n +} + +// init creates the loggers for each level depending on the notepad thresholds. +func (n *Notepad) init() { + logAndOut := io.MultiWriter(n.outHandle, n.logHandle) + + for t, logger := range n.loggers { + threshold := Threshold(t) + counter := &logCounter{} + n.logCounters[t] = counter + prefix := n.prefix + threshold.String() + " " + + switch { + case threshold >= n.logThreshold && threshold >= n.stdoutThreshold: + *logger = log.New(io.MultiWriter(counter, logAndOut), prefix, n.flags) + + case threshold >= n.logThreshold: + *logger = log.New(io.MultiWriter(counter, n.logHandle), prefix, n.flags) + + case threshold >= n.stdoutThreshold: + *logger = log.New(io.MultiWriter(counter, n.outHandle), prefix, n.flags) + + default: + // counter doesn't care about prefix and flags, so don't use them + // for performance. + *logger = log.New(counter, "", 0) + } + } +} + +// SetLogThreshold changes the threshold above which messages are written to the +// log file. +func (n *Notepad) SetLogThreshold(threshold Threshold) { + n.logThreshold = threshold + n.init() +} + +// SetLogOutput changes the file where log messages are written. +func (n *Notepad) SetLogOutput(handle io.Writer) { + n.logHandle = handle + n.init() +} + +// GetStdoutThreshold returns the defined Treshold for the log logger. +func (n *Notepad) GetLogThreshold() Threshold { + return n.logThreshold +} + +// SetStdoutThreshold changes the threshold above which messages are written to the +// standard output. +func (n *Notepad) SetStdoutThreshold(threshold Threshold) { + n.stdoutThreshold = threshold + n.init() +} + +// GetStdoutThreshold returns the Treshold for the stdout logger. +func (n *Notepad) GetStdoutThreshold() Threshold { + return n.stdoutThreshold +} + +// SetPrefix changes the prefix used by the notepad. Prefixes are displayed between +// brackets at the beginning of the line. An empty prefix won't be displayed at all. +func (n *Notepad) SetPrefix(prefix string) { + if len(prefix) != 0 { + n.prefix = "[" + prefix + "] " + } else { + n.prefix = "" + } + n.init() +} + +// SetFlags choose which flags the logger will display (after prefix and message +// level). See the package log for more informations on this. +func (n *Notepad) SetFlags(flags int) { + n.flags = flags + n.init() +} + +// Feedback writes plainly to the outHandle while +// logging with the standard extra information (date, file, etc). +type Feedback struct { + out *log.Logger + log *log.Logger +} + +func (fb *Feedback) Println(v ...interface{}) { + fb.output(fmt.Sprintln(v...)) +} + +func (fb *Feedback) Printf(format string, v ...interface{}) { + fb.output(fmt.Sprintf(format, v...)) +} + +func (fb *Feedback) Print(v ...interface{}) { + fb.output(fmt.Sprint(v...)) +} + +func (fb *Feedback) output(s string) { + if fb.out != nil { + fb.out.Output(2, s) + } + if fb.log != nil { + fb.log.Output(2, s) + } +} diff --git a/vendor/github.com/spf13/pflag/LICENSE b/vendor/github.com/spf13/pflag/LICENSE new file mode 100644 index 00000000..63ed1cfe --- /dev/null +++ b/vendor/github.com/spf13/pflag/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2012 Alex Ogier. All rights reserved. +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/spf13/pflag/bool.go b/vendor/github.com/spf13/pflag/bool.go new file mode 100644 index 00000000..c4c5c0bf --- /dev/null +++ b/vendor/github.com/spf13/pflag/bool.go @@ -0,0 +1,94 @@ +package pflag + +import "strconv" + +// optional interface to indicate boolean flags that can be +// supplied without "=value" text +type boolFlag interface { + Value + IsBoolFlag() bool +} + +// -- bool Value +type boolValue bool + +func newBoolValue(val bool, p *bool) *boolValue { + *p = val + return (*boolValue)(p) +} + +func (b *boolValue) Set(s string) error { + v, err := strconv.ParseBool(s) + *b = boolValue(v) + return err +} + +func (b *boolValue) Type() string { + return "bool" +} + +func (b *boolValue) String() string { return strconv.FormatBool(bool(*b)) } + +func (b *boolValue) IsBoolFlag() bool { return true } + +func boolConv(sval string) (interface{}, error) { + return strconv.ParseBool(sval) +} + +// GetBool return the bool value of a flag with the given name +func (f *FlagSet) GetBool(name string) (bool, error) { + val, err := f.getFlagType(name, "bool", boolConv) + if err != nil { + return false, err + } + return val.(bool), nil +} + +// BoolVar defines a bool flag with specified name, default value, and usage string. +// The argument p points to a bool variable in which to store the value of the flag. +func (f *FlagSet) BoolVar(p *bool, name string, value bool, usage string) { + f.BoolVarP(p, name, "", value, usage) +} + +// BoolVarP is like BoolVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) BoolVarP(p *bool, name, shorthand string, value bool, usage string) { + flag := f.VarPF(newBoolValue(value, p), name, shorthand, usage) + flag.NoOptDefVal = "true" +} + +// BoolVar defines a bool flag with specified name, default value, and usage string. +// The argument p points to a bool variable in which to store the value of the flag. +func BoolVar(p *bool, name string, value bool, usage string) { + BoolVarP(p, name, "", value, usage) +} + +// BoolVarP is like BoolVar, but accepts a shorthand letter that can be used after a single dash. +func BoolVarP(p *bool, name, shorthand string, value bool, usage string) { + flag := CommandLine.VarPF(newBoolValue(value, p), name, shorthand, usage) + flag.NoOptDefVal = "true" +} + +// Bool defines a bool flag with specified name, default value, and usage string. +// The return value is the address of a bool variable that stores the value of the flag. +func (f *FlagSet) Bool(name string, value bool, usage string) *bool { + return f.BoolP(name, "", value, usage) +} + +// BoolP is like Bool, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) BoolP(name, shorthand string, value bool, usage string) *bool { + p := new(bool) + f.BoolVarP(p, name, shorthand, value, usage) + return p +} + +// Bool defines a bool flag with specified name, default value, and usage string. +// The return value is the address of a bool variable that stores the value of the flag. +func Bool(name string, value bool, usage string) *bool { + return BoolP(name, "", value, usage) +} + +// BoolP is like Bool, but accepts a shorthand letter that can be used after a single dash. +func BoolP(name, shorthand string, value bool, usage string) *bool { + b := CommandLine.BoolP(name, shorthand, value, usage) + return b +} diff --git a/vendor/github.com/spf13/pflag/bool_slice.go b/vendor/github.com/spf13/pflag/bool_slice.go new file mode 100644 index 00000000..5af02f1a --- /dev/null +++ b/vendor/github.com/spf13/pflag/bool_slice.go @@ -0,0 +1,147 @@ +package pflag + +import ( + "io" + "strconv" + "strings" +) + +// -- boolSlice Value +type boolSliceValue struct { + value *[]bool + changed bool +} + +func newBoolSliceValue(val []bool, p *[]bool) *boolSliceValue { + bsv := new(boolSliceValue) + bsv.value = p + *bsv.value = val + return bsv +} + +// Set converts, and assigns, the comma-separated boolean argument string representation as the []bool value of this flag. +// If Set is called on a flag that already has a []bool assigned, the newly converted values will be appended. +func (s *boolSliceValue) Set(val string) error { + + // remove all quote characters + rmQuote := strings.NewReplacer(`"`, "", `'`, "", "`", "") + + // read flag arguments with CSV parser + boolStrSlice, err := readAsCSV(rmQuote.Replace(val)) + if err != nil && err != io.EOF { + return err + } + + // parse boolean values into slice + out := make([]bool, 0, len(boolStrSlice)) + for _, boolStr := range boolStrSlice { + b, err := strconv.ParseBool(strings.TrimSpace(boolStr)) + if err != nil { + return err + } + out = append(out, b) + } + + if !s.changed { + *s.value = out + } else { + *s.value = append(*s.value, out...) + } + + s.changed = true + + return nil +} + +// Type returns a string that uniquely represents this flag's type. +func (s *boolSliceValue) Type() string { + return "boolSlice" +} + +// String defines a "native" format for this boolean slice flag value. +func (s *boolSliceValue) String() string { + + boolStrSlice := make([]string, len(*s.value)) + for i, b := range *s.value { + boolStrSlice[i] = strconv.FormatBool(b) + } + + out, _ := writeAsCSV(boolStrSlice) + + return "[" + out + "]" +} + +func boolSliceConv(val string) (interface{}, error) { + val = strings.Trim(val, "[]") + // Empty string would cause a slice with one (empty) entry + if len(val) == 0 { + return []bool{}, nil + } + ss := strings.Split(val, ",") + out := make([]bool, len(ss)) + for i, t := range ss { + var err error + out[i], err = strconv.ParseBool(t) + if err != nil { + return nil, err + } + } + return out, nil +} + +// GetBoolSlice returns the []bool value of a flag with the given name. +func (f *FlagSet) GetBoolSlice(name string) ([]bool, error) { + val, err := f.getFlagType(name, "boolSlice", boolSliceConv) + if err != nil { + return []bool{}, err + } + return val.([]bool), nil +} + +// BoolSliceVar defines a boolSlice flag with specified name, default value, and usage string. +// The argument p points to a []bool variable in which to store the value of the flag. +func (f *FlagSet) BoolSliceVar(p *[]bool, name string, value []bool, usage string) { + f.VarP(newBoolSliceValue(value, p), name, "", usage) +} + +// BoolSliceVarP is like BoolSliceVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) BoolSliceVarP(p *[]bool, name, shorthand string, value []bool, usage string) { + f.VarP(newBoolSliceValue(value, p), name, shorthand, usage) +} + +// BoolSliceVar defines a []bool flag with specified name, default value, and usage string. +// The argument p points to a []bool variable in which to store the value of the flag. +func BoolSliceVar(p *[]bool, name string, value []bool, usage string) { + CommandLine.VarP(newBoolSliceValue(value, p), name, "", usage) +} + +// BoolSliceVarP is like BoolSliceVar, but accepts a shorthand letter that can be used after a single dash. +func BoolSliceVarP(p *[]bool, name, shorthand string, value []bool, usage string) { + CommandLine.VarP(newBoolSliceValue(value, p), name, shorthand, usage) +} + +// BoolSlice defines a []bool flag with specified name, default value, and usage string. +// The return value is the address of a []bool variable that stores the value of the flag. +func (f *FlagSet) BoolSlice(name string, value []bool, usage string) *[]bool { + p := []bool{} + f.BoolSliceVarP(&p, name, "", value, usage) + return &p +} + +// BoolSliceP is like BoolSlice, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) BoolSliceP(name, shorthand string, value []bool, usage string) *[]bool { + p := []bool{} + f.BoolSliceVarP(&p, name, shorthand, value, usage) + return &p +} + +// BoolSlice defines a []bool flag with specified name, default value, and usage string. +// The return value is the address of a []bool variable that stores the value of the flag. +func BoolSlice(name string, value []bool, usage string) *[]bool { + return CommandLine.BoolSliceP(name, "", value, usage) +} + +// BoolSliceP is like BoolSlice, but accepts a shorthand letter that can be used after a single dash. +func BoolSliceP(name, shorthand string, value []bool, usage string) *[]bool { + return CommandLine.BoolSliceP(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/count.go b/vendor/github.com/spf13/pflag/count.go new file mode 100644 index 00000000..aa126e44 --- /dev/null +++ b/vendor/github.com/spf13/pflag/count.go @@ -0,0 +1,96 @@ +package pflag + +import "strconv" + +// -- count Value +type countValue int + +func newCountValue(val int, p *int) *countValue { + *p = val + return (*countValue)(p) +} + +func (i *countValue) Set(s string) error { + // "+1" means that no specific value was passed, so increment + if s == "+1" { + *i = countValue(*i + 1) + return nil + } + v, err := strconv.ParseInt(s, 0, 0) + *i = countValue(v) + return err +} + +func (i *countValue) Type() string { + return "count" +} + +func (i *countValue) String() string { return strconv.Itoa(int(*i)) } + +func countConv(sval string) (interface{}, error) { + i, err := strconv.Atoi(sval) + if err != nil { + return nil, err + } + return i, nil +} + +// GetCount return the int value of a flag with the given name +func (f *FlagSet) GetCount(name string) (int, error) { + val, err := f.getFlagType(name, "count", countConv) + if err != nil { + return 0, err + } + return val.(int), nil +} + +// CountVar defines a count flag with specified name, default value, and usage string. +// The argument p points to an int variable in which to store the value of the flag. +// A count flag will add 1 to its value evey time it is found on the command line +func (f *FlagSet) CountVar(p *int, name string, usage string) { + f.CountVarP(p, name, "", usage) +} + +// CountVarP is like CountVar only take a shorthand for the flag name. +func (f *FlagSet) CountVarP(p *int, name, shorthand string, usage string) { + flag := f.VarPF(newCountValue(0, p), name, shorthand, usage) + flag.NoOptDefVal = "+1" +} + +// CountVar like CountVar only the flag is placed on the CommandLine instead of a given flag set +func CountVar(p *int, name string, usage string) { + CommandLine.CountVar(p, name, usage) +} + +// CountVarP is like CountVar only take a shorthand for the flag name. +func CountVarP(p *int, name, shorthand string, usage string) { + CommandLine.CountVarP(p, name, shorthand, usage) +} + +// Count defines a count flag with specified name, default value, and usage string. +// The return value is the address of an int variable that stores the value of the flag. +// A count flag will add 1 to its value evey time it is found on the command line +func (f *FlagSet) Count(name string, usage string) *int { + p := new(int) + f.CountVarP(p, name, "", usage) + return p +} + +// CountP is like Count only takes a shorthand for the flag name. +func (f *FlagSet) CountP(name, shorthand string, usage string) *int { + p := new(int) + f.CountVarP(p, name, shorthand, usage) + return p +} + +// Count defines a count flag with specified name, default value, and usage string. +// The return value is the address of an int variable that stores the value of the flag. +// A count flag will add 1 to its value evey time it is found on the command line +func Count(name string, usage string) *int { + return CommandLine.CountP(name, "", usage) +} + +// CountP is like Count only takes a shorthand for the flag name. +func CountP(name, shorthand string, usage string) *int { + return CommandLine.CountP(name, shorthand, usage) +} diff --git a/vendor/github.com/spf13/pflag/duration.go b/vendor/github.com/spf13/pflag/duration.go new file mode 100644 index 00000000..e9debef8 --- /dev/null +++ b/vendor/github.com/spf13/pflag/duration.go @@ -0,0 +1,86 @@ +package pflag + +import ( + "time" +) + +// -- time.Duration Value +type durationValue time.Duration + +func newDurationValue(val time.Duration, p *time.Duration) *durationValue { + *p = val + return (*durationValue)(p) +} + +func (d *durationValue) Set(s string) error { + v, err := time.ParseDuration(s) + *d = durationValue(v) + return err +} + +func (d *durationValue) Type() string { + return "duration" +} + +func (d *durationValue) String() string { return (*time.Duration)(d).String() } + +func durationConv(sval string) (interface{}, error) { + return time.ParseDuration(sval) +} + +// GetDuration return the duration value of a flag with the given name +func (f *FlagSet) GetDuration(name string) (time.Duration, error) { + val, err := f.getFlagType(name, "duration", durationConv) + if err != nil { + return 0, err + } + return val.(time.Duration), nil +} + +// DurationVar defines a time.Duration flag with specified name, default value, and usage string. +// The argument p points to a time.Duration variable in which to store the value of the flag. +func (f *FlagSet) DurationVar(p *time.Duration, name string, value time.Duration, usage string) { + f.VarP(newDurationValue(value, p), name, "", usage) +} + +// DurationVarP is like DurationVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) DurationVarP(p *time.Duration, name, shorthand string, value time.Duration, usage string) { + f.VarP(newDurationValue(value, p), name, shorthand, usage) +} + +// DurationVar defines a time.Duration flag with specified name, default value, and usage string. +// The argument p points to a time.Duration variable in which to store the value of the flag. +func DurationVar(p *time.Duration, name string, value time.Duration, usage string) { + CommandLine.VarP(newDurationValue(value, p), name, "", usage) +} + +// DurationVarP is like DurationVar, but accepts a shorthand letter that can be used after a single dash. +func DurationVarP(p *time.Duration, name, shorthand string, value time.Duration, usage string) { + CommandLine.VarP(newDurationValue(value, p), name, shorthand, usage) +} + +// Duration defines a time.Duration flag with specified name, default value, and usage string. +// The return value is the address of a time.Duration variable that stores the value of the flag. +func (f *FlagSet) Duration(name string, value time.Duration, usage string) *time.Duration { + p := new(time.Duration) + f.DurationVarP(p, name, "", value, usage) + return p +} + +// DurationP is like Duration, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) DurationP(name, shorthand string, value time.Duration, usage string) *time.Duration { + p := new(time.Duration) + f.DurationVarP(p, name, shorthand, value, usage) + return p +} + +// Duration defines a time.Duration flag with specified name, default value, and usage string. +// The return value is the address of a time.Duration variable that stores the value of the flag. +func Duration(name string, value time.Duration, usage string) *time.Duration { + return CommandLine.DurationP(name, "", value, usage) +} + +// DurationP is like Duration, but accepts a shorthand letter that can be used after a single dash. +func DurationP(name, shorthand string, value time.Duration, usage string) *time.Duration { + return CommandLine.DurationP(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/duration_slice.go b/vendor/github.com/spf13/pflag/duration_slice.go new file mode 100644 index 00000000..52c6b6dc --- /dev/null +++ b/vendor/github.com/spf13/pflag/duration_slice.go @@ -0,0 +1,128 @@ +package pflag + +import ( + "fmt" + "strings" + "time" +) + +// -- durationSlice Value +type durationSliceValue struct { + value *[]time.Duration + changed bool +} + +func newDurationSliceValue(val []time.Duration, p *[]time.Duration) *durationSliceValue { + dsv := new(durationSliceValue) + dsv.value = p + *dsv.value = val + return dsv +} + +func (s *durationSliceValue) Set(val string) error { + ss := strings.Split(val, ",") + out := make([]time.Duration, len(ss)) + for i, d := range ss { + var err error + out[i], err = time.ParseDuration(d) + if err != nil { + return err + } + + } + if !s.changed { + *s.value = out + } else { + *s.value = append(*s.value, out...) + } + s.changed = true + return nil +} + +func (s *durationSliceValue) Type() string { + return "durationSlice" +} + +func (s *durationSliceValue) String() string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = fmt.Sprintf("%s", d) + } + return "[" + strings.Join(out, ",") + "]" +} + +func durationSliceConv(val string) (interface{}, error) { + val = strings.Trim(val, "[]") + // Empty string would cause a slice with one (empty) entry + if len(val) == 0 { + return []time.Duration{}, nil + } + ss := strings.Split(val, ",") + out := make([]time.Duration, len(ss)) + for i, d := range ss { + var err error + out[i], err = time.ParseDuration(d) + if err != nil { + return nil, err + } + + } + return out, nil +} + +// GetDurationSlice returns the []time.Duration value of a flag with the given name +func (f *FlagSet) GetDurationSlice(name string) ([]time.Duration, error) { + val, err := f.getFlagType(name, "durationSlice", durationSliceConv) + if err != nil { + return []time.Duration{}, err + } + return val.([]time.Duration), nil +} + +// DurationSliceVar defines a durationSlice flag with specified name, default value, and usage string. +// The argument p points to a []time.Duration variable in which to store the value of the flag. +func (f *FlagSet) DurationSliceVar(p *[]time.Duration, name string, value []time.Duration, usage string) { + f.VarP(newDurationSliceValue(value, p), name, "", usage) +} + +// DurationSliceVarP is like DurationSliceVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) DurationSliceVarP(p *[]time.Duration, name, shorthand string, value []time.Duration, usage string) { + f.VarP(newDurationSliceValue(value, p), name, shorthand, usage) +} + +// DurationSliceVar defines a duration[] flag with specified name, default value, and usage string. +// The argument p points to a duration[] variable in which to store the value of the flag. +func DurationSliceVar(p *[]time.Duration, name string, value []time.Duration, usage string) { + CommandLine.VarP(newDurationSliceValue(value, p), name, "", usage) +} + +// DurationSliceVarP is like DurationSliceVar, but accepts a shorthand letter that can be used after a single dash. +func DurationSliceVarP(p *[]time.Duration, name, shorthand string, value []time.Duration, usage string) { + CommandLine.VarP(newDurationSliceValue(value, p), name, shorthand, usage) +} + +// DurationSlice defines a []time.Duration flag with specified name, default value, and usage string. +// The return value is the address of a []time.Duration variable that stores the value of the flag. +func (f *FlagSet) DurationSlice(name string, value []time.Duration, usage string) *[]time.Duration { + p := []time.Duration{} + f.DurationSliceVarP(&p, name, "", value, usage) + return &p +} + +// DurationSliceP is like DurationSlice, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) DurationSliceP(name, shorthand string, value []time.Duration, usage string) *[]time.Duration { + p := []time.Duration{} + f.DurationSliceVarP(&p, name, shorthand, value, usage) + return &p +} + +// DurationSlice defines a []time.Duration flag with specified name, default value, and usage string. +// The return value is the address of a []time.Duration variable that stores the value of the flag. +func DurationSlice(name string, value []time.Duration, usage string) *[]time.Duration { + return CommandLine.DurationSliceP(name, "", value, usage) +} + +// DurationSliceP is like DurationSlice, but accepts a shorthand letter that can be used after a single dash. +func DurationSliceP(name, shorthand string, value []time.Duration, usage string) *[]time.Duration { + return CommandLine.DurationSliceP(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/flag.go b/vendor/github.com/spf13/pflag/flag.go new file mode 100644 index 00000000..28538c07 --- /dev/null +++ b/vendor/github.com/spf13/pflag/flag.go @@ -0,0 +1,1157 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package pflag is a drop-in replacement for Go's flag package, implementing +POSIX/GNU-style --flags. + +pflag is compatible with the GNU extensions to the POSIX recommendations +for command-line options. See +http://www.gnu.org/software/libc/manual/html_node/Argument-Syntax.html + +Usage: + +pflag is a drop-in replacement of Go's native flag package. If you import +pflag under the name "flag" then all code should continue to function +with no changes. + + import flag "github.com/spf13/pflag" + +There is one exception to this: if you directly instantiate the Flag struct +there is one more field "Shorthand" that you will need to set. +Most code never instantiates this struct directly, and instead uses +functions such as String(), BoolVar(), and Var(), and is therefore +unaffected. + +Define flags using flag.String(), Bool(), Int(), etc. + +This declares an integer flag, -flagname, stored in the pointer ip, with type *int. + var ip = flag.Int("flagname", 1234, "help message for flagname") +If you like, you can bind the flag to a variable using the Var() functions. + var flagvar int + func init() { + flag.IntVar(&flagvar, "flagname", 1234, "help message for flagname") + } +Or you can create custom flags that satisfy the Value interface (with +pointer receivers) and couple them to flag parsing by + flag.Var(&flagVal, "name", "help message for flagname") +For such flags, the default value is just the initial value of the variable. + +After all flags are defined, call + flag.Parse() +to parse the command line into the defined flags. + +Flags may then be used directly. If you're using the flags themselves, +they are all pointers; if you bind to variables, they're values. + fmt.Println("ip has value ", *ip) + fmt.Println("flagvar has value ", flagvar) + +After parsing, the arguments after the flag are available as the +slice flag.Args() or individually as flag.Arg(i). +The arguments are indexed from 0 through flag.NArg()-1. + +The pflag package also defines some new functions that are not in flag, +that give one-letter shorthands for flags. You can use these by appending +'P' to the name of any function that defines a flag. + var ip = flag.IntP("flagname", "f", 1234, "help message") + var flagvar bool + func init() { + flag.BoolVarP("boolname", "b", true, "help message") + } + flag.VarP(&flagVar, "varname", "v", 1234, "help message") +Shorthand letters can be used with single dashes on the command line. +Boolean shorthand flags can be combined with other shorthand flags. + +Command line flag syntax: + --flag // boolean flags only + --flag=x + +Unlike the flag package, a single dash before an option means something +different than a double dash. Single dashes signify a series of shorthand +letters for flags. All but the last shorthand letter must be boolean flags. + // boolean flags + -f + -abc + // non-boolean flags + -n 1234 + -Ifile + // mixed + -abcs "hello" + -abcn1234 + +Flag parsing stops after the terminator "--". Unlike the flag package, +flags can be interspersed with arguments anywhere on the command line +before this terminator. + +Integer flags accept 1234, 0664, 0x1234 and may be negative. +Boolean flags (in their long form) accept 1, 0, t, f, true, false, +TRUE, FALSE, True, False. +Duration flags accept any input valid for time.ParseDuration. + +The default set of command-line flags is controlled by +top-level functions. The FlagSet type allows one to define +independent sets of flags, such as to implement subcommands +in a command-line interface. The methods of FlagSet are +analogous to the top-level functions for the command-line +flag set. +*/ +package pflag + +import ( + "bytes" + "errors" + "fmt" + "io" + "os" + "sort" + "strings" +) + +// ErrHelp is the error returned if the flag -help is invoked but no such flag is defined. +var ErrHelp = errors.New("pflag: help requested") + +// ErrorHandling defines how to handle flag parsing errors. +type ErrorHandling int + +const ( + // ContinueOnError will return an err from Parse() if an error is found + ContinueOnError ErrorHandling = iota + // ExitOnError will call os.Exit(2) if an error is found when parsing + ExitOnError + // PanicOnError will panic() if an error is found when parsing flags + PanicOnError +) + +// NormalizedName is a flag name that has been normalized according to rules +// for the FlagSet (e.g. making '-' and '_' equivalent). +type NormalizedName string + +// A FlagSet represents a set of defined flags. +type FlagSet struct { + // Usage is the function called when an error occurs while parsing flags. + // The field is a function (not a method) that may be changed to point to + // a custom error handler. + Usage func() + + // SortFlags is used to indicate, if user wants to have sorted flags in + // help/usage messages. + SortFlags bool + + name string + parsed bool + actual map[NormalizedName]*Flag + orderedActual []*Flag + sortedActual []*Flag + formal map[NormalizedName]*Flag + orderedFormal []*Flag + sortedFormal []*Flag + shorthands map[byte]*Flag + args []string // arguments after flags + argsLenAtDash int // len(args) when a '--' was located when parsing, or -1 if no -- + errorHandling ErrorHandling + output io.Writer // nil means stderr; use out() accessor + interspersed bool // allow interspersed option/non-option args + normalizeNameFunc func(f *FlagSet, name string) NormalizedName +} + +// A Flag represents the state of a flag. +type Flag struct { + Name string // name as it appears on command line + Shorthand string // one-letter abbreviated flag + Usage string // help message + Value Value // value as set + DefValue string // default value (as text); for usage message + Changed bool // If the user set the value (or if left to default) + NoOptDefVal string // default value (as text); if the flag is on the command line without any options + Deprecated string // If this flag is deprecated, this string is the new or now thing to use + Hidden bool // used by cobra.Command to allow flags to be hidden from help/usage text + ShorthandDeprecated string // If the shorthand of this flag is deprecated, this string is the new or now thing to use + Annotations map[string][]string // used by cobra.Command bash autocomple code +} + +// Value is the interface to the dynamic value stored in a flag. +// (The default value is represented as a string.) +type Value interface { + String() string + Set(string) error + Type() string +} + +// sortFlags returns the flags as a slice in lexicographical sorted order. +func sortFlags(flags map[NormalizedName]*Flag) []*Flag { + list := make(sort.StringSlice, len(flags)) + i := 0 + for k := range flags { + list[i] = string(k) + i++ + } + list.Sort() + result := make([]*Flag, len(list)) + for i, name := range list { + result[i] = flags[NormalizedName(name)] + } + return result +} + +// SetNormalizeFunc allows you to add a function which can translate flag names. +// Flags added to the FlagSet will be translated and then when anything tries to +// look up the flag that will also be translated. So it would be possible to create +// a flag named "getURL" and have it translated to "geturl". A user could then pass +// "--getUrl" which may also be translated to "geturl" and everything will work. +func (f *FlagSet) SetNormalizeFunc(n func(f *FlagSet, name string) NormalizedName) { + f.normalizeNameFunc = n + f.sortedFormal = f.sortedFormal[:0] + for fname, flag := range f.formal { + nname := f.normalizeFlagName(flag.Name) + if fname == nname { + continue + } + flag.Name = string(nname) + delete(f.formal, fname) + f.formal[nname] = flag + if _, set := f.actual[fname]; set { + delete(f.actual, fname) + f.actual[nname] = flag + } + } +} + +// GetNormalizeFunc returns the previously set NormalizeFunc of a function which +// does no translation, if not set previously. +func (f *FlagSet) GetNormalizeFunc() func(f *FlagSet, name string) NormalizedName { + if f.normalizeNameFunc != nil { + return f.normalizeNameFunc + } + return func(f *FlagSet, name string) NormalizedName { return NormalizedName(name) } +} + +func (f *FlagSet) normalizeFlagName(name string) NormalizedName { + n := f.GetNormalizeFunc() + return n(f, name) +} + +func (f *FlagSet) out() io.Writer { + if f.output == nil { + return os.Stderr + } + return f.output +} + +// SetOutput sets the destination for usage and error messages. +// If output is nil, os.Stderr is used. +func (f *FlagSet) SetOutput(output io.Writer) { + f.output = output +} + +// VisitAll visits the flags in lexicographical order or +// in primordial order if f.SortFlags is false, calling fn for each. +// It visits all flags, even those not set. +func (f *FlagSet) VisitAll(fn func(*Flag)) { + if len(f.formal) == 0 { + return + } + + var flags []*Flag + if f.SortFlags { + if len(f.formal) != len(f.sortedFormal) { + f.sortedFormal = sortFlags(f.formal) + } + flags = f.sortedFormal + } else { + flags = f.orderedFormal + } + + for _, flag := range flags { + fn(flag) + } +} + +// HasFlags returns a bool to indicate if the FlagSet has any flags definied. +func (f *FlagSet) HasFlags() bool { + return len(f.formal) > 0 +} + +// HasAvailableFlags returns a bool to indicate if the FlagSet has any flags +// definied that are not hidden or deprecated. +func (f *FlagSet) HasAvailableFlags() bool { + for _, flag := range f.formal { + if !flag.Hidden && len(flag.Deprecated) == 0 { + return true + } + } + return false +} + +// VisitAll visits the command-line flags in lexicographical order or +// in primordial order if f.SortFlags is false, calling fn for each. +// It visits all flags, even those not set. +func VisitAll(fn func(*Flag)) { + CommandLine.VisitAll(fn) +} + +// Visit visits the flags in lexicographical order or +// in primordial order if f.SortFlags is false, calling fn for each. +// It visits only those flags that have been set. +func (f *FlagSet) Visit(fn func(*Flag)) { + if len(f.actual) == 0 { + return + } + + var flags []*Flag + if f.SortFlags { + if len(f.actual) != len(f.sortedActual) { + f.sortedActual = sortFlags(f.actual) + } + flags = f.sortedActual + } else { + flags = f.orderedActual + } + + for _, flag := range flags { + fn(flag) + } +} + +// Visit visits the command-line flags in lexicographical order or +// in primordial order if f.SortFlags is false, calling fn for each. +// It visits only those flags that have been set. +func Visit(fn func(*Flag)) { + CommandLine.Visit(fn) +} + +// Lookup returns the Flag structure of the named flag, returning nil if none exists. +func (f *FlagSet) Lookup(name string) *Flag { + return f.lookup(f.normalizeFlagName(name)) +} + +// ShorthandLookup returns the Flag structure of the short handed flag, +// returning nil if none exists. +// It panics, if len(name) > 1. +func (f *FlagSet) ShorthandLookup(name string) *Flag { + if name == "" { + return nil + } + if len(name) > 1 { + msg := fmt.Sprintf("can not look up shorthand which is more than one ASCII character: %q", name) + fmt.Fprintf(f.out(), msg) + panic(msg) + } + c := name[0] + return f.shorthands[c] +} + +// lookup returns the Flag structure of the named flag, returning nil if none exists. +func (f *FlagSet) lookup(name NormalizedName) *Flag { + return f.formal[name] +} + +// func to return a given type for a given flag name +func (f *FlagSet) getFlagType(name string, ftype string, convFunc func(sval string) (interface{}, error)) (interface{}, error) { + flag := f.Lookup(name) + if flag == nil { + err := fmt.Errorf("flag accessed but not defined: %s", name) + return nil, err + } + + if flag.Value.Type() != ftype { + err := fmt.Errorf("trying to get %s value of flag of type %s", ftype, flag.Value.Type()) + return nil, err + } + + sval := flag.Value.String() + result, err := convFunc(sval) + if err != nil { + return nil, err + } + return result, nil +} + +// ArgsLenAtDash will return the length of f.Args at the moment when a -- was +// found during arg parsing. This allows your program to know which args were +// before the -- and which came after. +func (f *FlagSet) ArgsLenAtDash() int { + return f.argsLenAtDash +} + +// MarkDeprecated indicated that a flag is deprecated in your program. It will +// continue to function but will not show up in help or usage messages. Using +// this flag will also print the given usageMessage. +func (f *FlagSet) MarkDeprecated(name string, usageMessage string) error { + flag := f.Lookup(name) + if flag == nil { + return fmt.Errorf("flag %q does not exist", name) + } + if usageMessage == "" { + return fmt.Errorf("deprecated message for flag %q must be set", name) + } + flag.Deprecated = usageMessage + return nil +} + +// MarkShorthandDeprecated will mark the shorthand of a flag deprecated in your +// program. It will continue to function but will not show up in help or usage +// messages. Using this flag will also print the given usageMessage. +func (f *FlagSet) MarkShorthandDeprecated(name string, usageMessage string) error { + flag := f.Lookup(name) + if flag == nil { + return fmt.Errorf("flag %q does not exist", name) + } + if usageMessage == "" { + return fmt.Errorf("deprecated message for flag %q must be set", name) + } + flag.ShorthandDeprecated = usageMessage + return nil +} + +// MarkHidden sets a flag to 'hidden' in your program. It will continue to +// function but will not show up in help or usage messages. +func (f *FlagSet) MarkHidden(name string) error { + flag := f.Lookup(name) + if flag == nil { + return fmt.Errorf("flag %q does not exist", name) + } + flag.Hidden = true + return nil +} + +// Lookup returns the Flag structure of the named command-line flag, +// returning nil if none exists. +func Lookup(name string) *Flag { + return CommandLine.Lookup(name) +} + +// ShorthandLookup returns the Flag structure of the short handed flag, +// returning nil if none exists. +func ShorthandLookup(name string) *Flag { + return CommandLine.ShorthandLookup(name) +} + +// Set sets the value of the named flag. +func (f *FlagSet) Set(name, value string) error { + normalName := f.normalizeFlagName(name) + flag, ok := f.formal[normalName] + if !ok { + return fmt.Errorf("no such flag -%v", name) + } + + err := flag.Value.Set(value) + if err != nil { + var flagName string + if flag.Shorthand != "" && flag.ShorthandDeprecated == "" { + flagName = fmt.Sprintf("-%s, --%s", flag.Shorthand, flag.Name) + } else { + flagName = fmt.Sprintf("--%s", flag.Name) + } + return fmt.Errorf("invalid argument %q for %q flag: %v", value, flagName, err) + } + + if !flag.Changed { + if f.actual == nil { + f.actual = make(map[NormalizedName]*Flag) + } + f.actual[normalName] = flag + f.orderedActual = append(f.orderedActual, flag) + + flag.Changed = true + } + + if flag.Deprecated != "" { + fmt.Fprintf(f.out(), "Flag --%s has been deprecated, %s\n", flag.Name, flag.Deprecated) + } + return nil +} + +// SetAnnotation allows one to set arbitrary annotations on a flag in the FlagSet. +// This is sometimes used by spf13/cobra programs which want to generate additional +// bash completion information. +func (f *FlagSet) SetAnnotation(name, key string, values []string) error { + normalName := f.normalizeFlagName(name) + flag, ok := f.formal[normalName] + if !ok { + return fmt.Errorf("no such flag -%v", name) + } + if flag.Annotations == nil { + flag.Annotations = map[string][]string{} + } + flag.Annotations[key] = values + return nil +} + +// Changed returns true if the flag was explicitly set during Parse() and false +// otherwise +func (f *FlagSet) Changed(name string) bool { + flag := f.Lookup(name) + // If a flag doesn't exist, it wasn't changed.... + if flag == nil { + return false + } + return flag.Changed +} + +// Set sets the value of the named command-line flag. +func Set(name, value string) error { + return CommandLine.Set(name, value) +} + +// PrintDefaults prints, to standard error unless configured +// otherwise, the default values of all defined flags in the set. +func (f *FlagSet) PrintDefaults() { + usages := f.FlagUsages() + fmt.Fprint(f.out(), usages) +} + +// defaultIsZeroValue returns true if the default value for this flag represents +// a zero value. +func (f *Flag) defaultIsZeroValue() bool { + switch f.Value.(type) { + case boolFlag: + return f.DefValue == "false" + case *durationValue: + // Beginning in Go 1.7, duration zero values are "0s" + return f.DefValue == "0" || f.DefValue == "0s" + case *intValue, *int8Value, *int32Value, *int64Value, *uintValue, *uint8Value, *uint16Value, *uint32Value, *uint64Value, *countValue, *float32Value, *float64Value: + return f.DefValue == "0" + case *stringValue: + return f.DefValue == "" + case *ipValue, *ipMaskValue, *ipNetValue: + return f.DefValue == "" + case *intSliceValue, *stringSliceValue, *stringArrayValue: + return f.DefValue == "[]" + default: + switch f.Value.String() { + case "false": + return true + case "": + return true + case "": + return true + case "0": + return true + } + return false + } +} + +// UnquoteUsage extracts a back-quoted name from the usage +// string for a flag and returns it and the un-quoted usage. +// Given "a `name` to show" it returns ("name", "a name to show"). +// If there are no back quotes, the name is an educated guess of the +// type of the flag's value, or the empty string if the flag is boolean. +func UnquoteUsage(flag *Flag) (name string, usage string) { + // Look for a back-quoted name, but avoid the strings package. + usage = flag.Usage + for i := 0; i < len(usage); i++ { + if usage[i] == '`' { + for j := i + 1; j < len(usage); j++ { + if usage[j] == '`' { + name = usage[i+1 : j] + usage = usage[:i] + name + usage[j+1:] + return name, usage + } + } + break // Only one back quote; use type name. + } + } + + name = flag.Value.Type() + switch name { + case "bool": + name = "" + case "float64": + name = "float" + case "int64": + name = "int" + case "uint64": + name = "uint" + case "stringSlice": + name = "strings" + case "intSlice": + name = "ints" + case "uintSlice": + name = "uints" + case "boolSlice": + name = "bools" + } + + return +} + +// Splits the string `s` on whitespace into an initial substring up to +// `i` runes in length and the remainder. Will go `slop` over `i` if +// that encompasses the entire string (which allows the caller to +// avoid short orphan words on the final line). +func wrapN(i, slop int, s string) (string, string) { + if i+slop > len(s) { + return s, "" + } + + w := strings.LastIndexAny(s[:i], " \t") + if w <= 0 { + return s, "" + } + + return s[:w], s[w+1:] +} + +// Wraps the string `s` to a maximum width `w` with leading indent +// `i`. The first line is not indented (this is assumed to be done by +// caller). Pass `w` == 0 to do no wrapping +func wrap(i, w int, s string) string { + if w == 0 { + return s + } + + // space between indent i and end of line width w into which + // we should wrap the text. + wrap := w - i + + var r, l string + + // Not enough space for sensible wrapping. Wrap as a block on + // the next line instead. + if wrap < 24 { + i = 16 + wrap = w - i + r += "\n" + strings.Repeat(" ", i) + } + // If still not enough space then don't even try to wrap. + if wrap < 24 { + return s + } + + // Try to avoid short orphan words on the final line, by + // allowing wrapN to go a bit over if that would fit in the + // remainder of the line. + slop := 5 + wrap = wrap - slop + + // Handle first line, which is indented by the caller (or the + // special case above) + l, s = wrapN(wrap, slop, s) + r = r + l + + // Now wrap the rest + for s != "" { + var t string + + t, s = wrapN(wrap, slop, s) + r = r + "\n" + strings.Repeat(" ", i) + t + } + + return r + +} + +// FlagUsagesWrapped returns a string containing the usage information +// for all flags in the FlagSet. Wrapped to `cols` columns (0 for no +// wrapping) +func (f *FlagSet) FlagUsagesWrapped(cols int) string { + buf := new(bytes.Buffer) + + lines := make([]string, 0, len(f.formal)) + + maxlen := 0 + f.VisitAll(func(flag *Flag) { + if flag.Deprecated != "" || flag.Hidden { + return + } + + line := "" + if flag.Shorthand != "" && flag.ShorthandDeprecated == "" { + line = fmt.Sprintf(" -%s, --%s", flag.Shorthand, flag.Name) + } else { + line = fmt.Sprintf(" --%s", flag.Name) + } + + varname, usage := UnquoteUsage(flag) + if varname != "" { + line += " " + varname + } + if flag.NoOptDefVal != "" { + switch flag.Value.Type() { + case "string": + line += fmt.Sprintf("[=\"%s\"]", flag.NoOptDefVal) + case "bool": + if flag.NoOptDefVal != "true" { + line += fmt.Sprintf("[=%s]", flag.NoOptDefVal) + } + case "count": + if flag.NoOptDefVal != "+1" { + line += fmt.Sprintf("[=%s]", flag.NoOptDefVal) + } + default: + line += fmt.Sprintf("[=%s]", flag.NoOptDefVal) + } + } + + // This special character will be replaced with spacing once the + // correct alignment is calculated + line += "\x00" + if len(line) > maxlen { + maxlen = len(line) + } + + line += usage + if !flag.defaultIsZeroValue() { + if flag.Value.Type() == "string" { + line += fmt.Sprintf(" (default %q)", flag.DefValue) + } else { + line += fmt.Sprintf(" (default %s)", flag.DefValue) + } + } + + lines = append(lines, line) + }) + + for _, line := range lines { + sidx := strings.Index(line, "\x00") + spacing := strings.Repeat(" ", maxlen-sidx) + // maxlen + 2 comes from + 1 for the \x00 and + 1 for the (deliberate) off-by-one in maxlen-sidx + fmt.Fprintln(buf, line[:sidx], spacing, wrap(maxlen+2, cols, line[sidx+1:])) + } + + return buf.String() +} + +// FlagUsages returns a string containing the usage information for all flags in +// the FlagSet +func (f *FlagSet) FlagUsages() string { + return f.FlagUsagesWrapped(0) +} + +// PrintDefaults prints to standard error the default values of all defined command-line flags. +func PrintDefaults() { + CommandLine.PrintDefaults() +} + +// defaultUsage is the default function to print a usage message. +func defaultUsage(f *FlagSet) { + fmt.Fprintf(f.out(), "Usage of %s:\n", f.name) + f.PrintDefaults() +} + +// NOTE: Usage is not just defaultUsage(CommandLine) +// because it serves (via godoc flag Usage) as the example +// for how to write your own usage function. + +// Usage prints to standard error a usage message documenting all defined command-line flags. +// The function is a variable that may be changed to point to a custom function. +// By default it prints a simple header and calls PrintDefaults; for details about the +// format of the output and how to control it, see the documentation for PrintDefaults. +var Usage = func() { + fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0]) + PrintDefaults() +} + +// NFlag returns the number of flags that have been set. +func (f *FlagSet) NFlag() int { return len(f.actual) } + +// NFlag returns the number of command-line flags that have been set. +func NFlag() int { return len(CommandLine.actual) } + +// Arg returns the i'th argument. Arg(0) is the first remaining argument +// after flags have been processed. +func (f *FlagSet) Arg(i int) string { + if i < 0 || i >= len(f.args) { + return "" + } + return f.args[i] +} + +// Arg returns the i'th command-line argument. Arg(0) is the first remaining argument +// after flags have been processed. +func Arg(i int) string { + return CommandLine.Arg(i) +} + +// NArg is the number of arguments remaining after flags have been processed. +func (f *FlagSet) NArg() int { return len(f.args) } + +// NArg is the number of arguments remaining after flags have been processed. +func NArg() int { return len(CommandLine.args) } + +// Args returns the non-flag arguments. +func (f *FlagSet) Args() []string { return f.args } + +// Args returns the non-flag command-line arguments. +func Args() []string { return CommandLine.args } + +// Var defines a flag with the specified name and usage string. The type and +// value of the flag are represented by the first argument, of type Value, which +// typically holds a user-defined implementation of Value. For instance, the +// caller could create a flag that turns a comma-separated string into a slice +// of strings by giving the slice the methods of Value; in particular, Set would +// decompose the comma-separated string into the slice. +func (f *FlagSet) Var(value Value, name string, usage string) { + f.VarP(value, name, "", usage) +} + +// VarPF is like VarP, but returns the flag created +func (f *FlagSet) VarPF(value Value, name, shorthand, usage string) *Flag { + // Remember the default value as a string; it won't change. + flag := &Flag{ + Name: name, + Shorthand: shorthand, + Usage: usage, + Value: value, + DefValue: value.String(), + } + f.AddFlag(flag) + return flag +} + +// VarP is like Var, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) VarP(value Value, name, shorthand, usage string) { + f.VarPF(value, name, shorthand, usage) +} + +// AddFlag will add the flag to the FlagSet +func (f *FlagSet) AddFlag(flag *Flag) { + normalizedFlagName := f.normalizeFlagName(flag.Name) + + _, alreadyThere := f.formal[normalizedFlagName] + if alreadyThere { + msg := fmt.Sprintf("%s flag redefined: %s", f.name, flag.Name) + fmt.Fprintln(f.out(), msg) + panic(msg) // Happens only if flags are declared with identical names + } + if f.formal == nil { + f.formal = make(map[NormalizedName]*Flag) + } + + flag.Name = string(normalizedFlagName) + f.formal[normalizedFlagName] = flag + f.orderedFormal = append(f.orderedFormal, flag) + + if flag.Shorthand == "" { + return + } + if len(flag.Shorthand) > 1 { + msg := fmt.Sprintf("%q shorthand is more than one ASCII character", flag.Shorthand) + fmt.Fprintf(f.out(), msg) + panic(msg) + } + if f.shorthands == nil { + f.shorthands = make(map[byte]*Flag) + } + c := flag.Shorthand[0] + used, alreadyThere := f.shorthands[c] + if alreadyThere { + msg := fmt.Sprintf("unable to redefine %q shorthand in %q flagset: it's already used for %q flag", c, f.name, used.Name) + fmt.Fprintf(f.out(), msg) + panic(msg) + } + f.shorthands[c] = flag +} + +// AddFlagSet adds one FlagSet to another. If a flag is already present in f +// the flag from newSet will be ignored. +func (f *FlagSet) AddFlagSet(newSet *FlagSet) { + if newSet == nil { + return + } + newSet.VisitAll(func(flag *Flag) { + if f.Lookup(flag.Name) == nil { + f.AddFlag(flag) + } + }) +} + +// Var defines a flag with the specified name and usage string. The type and +// value of the flag are represented by the first argument, of type Value, which +// typically holds a user-defined implementation of Value. For instance, the +// caller could create a flag that turns a comma-separated string into a slice +// of strings by giving the slice the methods of Value; in particular, Set would +// decompose the comma-separated string into the slice. +func Var(value Value, name string, usage string) { + CommandLine.VarP(value, name, "", usage) +} + +// VarP is like Var, but accepts a shorthand letter that can be used after a single dash. +func VarP(value Value, name, shorthand, usage string) { + CommandLine.VarP(value, name, shorthand, usage) +} + +// failf prints to standard error a formatted error and usage message and +// returns the error. +func (f *FlagSet) failf(format string, a ...interface{}) error { + err := fmt.Errorf(format, a...) + if f.errorHandling != ContinueOnError { + fmt.Fprintln(f.out(), err) + f.usage() + } + return err +} + +// usage calls the Usage method for the flag set, or the usage function if +// the flag set is CommandLine. +func (f *FlagSet) usage() { + if f == CommandLine { + Usage() + } else if f.Usage == nil { + defaultUsage(f) + } else { + f.Usage() + } +} + +func (f *FlagSet) parseLongArg(s string, args []string, fn parseFunc) (a []string, err error) { + a = args + name := s[2:] + if len(name) == 0 || name[0] == '-' || name[0] == '=' { + err = f.failf("bad flag syntax: %s", s) + return + } + + split := strings.SplitN(name, "=", 2) + name = split[0] + flag, exists := f.formal[f.normalizeFlagName(name)] + if !exists { + if name == "help" { // special case for nice help message. + f.usage() + return a, ErrHelp + } + err = f.failf("unknown flag: --%s", name) + return + } + + var value string + if len(split) == 2 { + // '--flag=arg' + value = split[1] + } else if flag.NoOptDefVal != "" { + // '--flag' (arg was optional) + value = flag.NoOptDefVal + } else if len(a) > 0 { + // '--flag arg' + value = a[0] + a = a[1:] + } else { + // '--flag' (arg was required) + err = f.failf("flag needs an argument: %s", s) + return + } + + err = fn(flag, value) + if err != nil { + f.failf(err.Error()) + } + return +} + +func (f *FlagSet) parseSingleShortArg(shorthands string, args []string, fn parseFunc) (outShorts string, outArgs []string, err error) { + if strings.HasPrefix(shorthands, "test.") { + return + } + + outArgs = args + outShorts = shorthands[1:] + c := shorthands[0] + + flag, exists := f.shorthands[c] + if !exists { + if c == 'h' { // special case for nice help message. + f.usage() + err = ErrHelp + return + } + err = f.failf("unknown shorthand flag: %q in -%s", c, shorthands) + return + } + + var value string + if len(shorthands) > 2 && shorthands[1] == '=' { + // '-f=arg' + value = shorthands[2:] + outShorts = "" + } else if flag.NoOptDefVal != "" { + // '-f' (arg was optional) + value = flag.NoOptDefVal + } else if len(shorthands) > 1 { + // '-farg' + value = shorthands[1:] + outShorts = "" + } else if len(args) > 0 { + // '-f arg' + value = args[0] + outArgs = args[1:] + } else { + // '-f' (arg was required) + err = f.failf("flag needs an argument: %q in -%s", c, shorthands) + return + } + + if flag.ShorthandDeprecated != "" { + fmt.Fprintf(f.out(), "Flag shorthand -%s has been deprecated, %s\n", flag.Shorthand, flag.ShorthandDeprecated) + } + + err = fn(flag, value) + if err != nil { + f.failf(err.Error()) + } + return +} + +func (f *FlagSet) parseShortArg(s string, args []string, fn parseFunc) (a []string, err error) { + a = args + shorthands := s[1:] + + // "shorthands" can be a series of shorthand letters of flags (e.g. "-vvv"). + for len(shorthands) > 0 { + shorthands, a, err = f.parseSingleShortArg(shorthands, args, fn) + if err != nil { + return + } + } + + return +} + +func (f *FlagSet) parseArgs(args []string, fn parseFunc) (err error) { + for len(args) > 0 { + s := args[0] + args = args[1:] + if len(s) == 0 || s[0] != '-' || len(s) == 1 { + if !f.interspersed { + f.args = append(f.args, s) + f.args = append(f.args, args...) + return nil + } + f.args = append(f.args, s) + continue + } + + if s[1] == '-' { + if len(s) == 2 { // "--" terminates the flags + f.argsLenAtDash = len(f.args) + f.args = append(f.args, args...) + break + } + args, err = f.parseLongArg(s, args, fn) + } else { + args, err = f.parseShortArg(s, args, fn) + } + if err != nil { + return + } + } + return +} + +// Parse parses flag definitions from the argument list, which should not +// include the command name. Must be called after all flags in the FlagSet +// are defined and before flags are accessed by the program. +// The return value will be ErrHelp if -help was set but not defined. +func (f *FlagSet) Parse(arguments []string) error { + f.parsed = true + + if len(arguments) < 0 { + return nil + } + + f.args = make([]string, 0, len(arguments)) + + set := func(flag *Flag, value string) error { + return f.Set(flag.Name, value) + } + + err := f.parseArgs(arguments, set) + if err != nil { + switch f.errorHandling { + case ContinueOnError: + return err + case ExitOnError: + fmt.Println(err) + os.Exit(2) + case PanicOnError: + panic(err) + } + } + return nil +} + +type parseFunc func(flag *Flag, value string) error + +// ParseAll parses flag definitions from the argument list, which should not +// include the command name. The arguments for fn are flag and value. Must be +// called after all flags in the FlagSet are defined and before flags are +// accessed by the program. The return value will be ErrHelp if -help was set +// but not defined. +func (f *FlagSet) ParseAll(arguments []string, fn func(flag *Flag, value string) error) error { + f.parsed = true + f.args = make([]string, 0, len(arguments)) + + err := f.parseArgs(arguments, fn) + if err != nil { + switch f.errorHandling { + case ContinueOnError: + return err + case ExitOnError: + os.Exit(2) + case PanicOnError: + panic(err) + } + } + return nil +} + +// Parsed reports whether f.Parse has been called. +func (f *FlagSet) Parsed() bool { + return f.parsed +} + +// Parse parses the command-line flags from os.Args[1:]. Must be called +// after all flags are defined and before flags are accessed by the program. +func Parse() { + // Ignore errors; CommandLine is set for ExitOnError. + CommandLine.Parse(os.Args[1:]) +} + +// ParseAll parses the command-line flags from os.Args[1:] and called fn for each. +// The arguments for fn are flag and value. Must be called after all flags are +// defined and before flags are accessed by the program. +func ParseAll(fn func(flag *Flag, value string) error) { + // Ignore errors; CommandLine is set for ExitOnError. + CommandLine.ParseAll(os.Args[1:], fn) +} + +// SetInterspersed sets whether to support interspersed option/non-option arguments. +func SetInterspersed(interspersed bool) { + CommandLine.SetInterspersed(interspersed) +} + +// Parsed returns true if the command-line flags have been parsed. +func Parsed() bool { + return CommandLine.Parsed() +} + +// CommandLine is the default set of command-line flags, parsed from os.Args. +var CommandLine = NewFlagSet(os.Args[0], ExitOnError) + +// NewFlagSet returns a new, empty flag set with the specified name, +// error handling property and SortFlags set to true. +func NewFlagSet(name string, errorHandling ErrorHandling) *FlagSet { + f := &FlagSet{ + name: name, + errorHandling: errorHandling, + argsLenAtDash: -1, + interspersed: true, + SortFlags: true, + } + return f +} + +// SetInterspersed sets whether to support interspersed option/non-option arguments. +func (f *FlagSet) SetInterspersed(interspersed bool) { + f.interspersed = interspersed +} + +// Init sets the name and error handling property for a flag set. +// By default, the zero FlagSet uses an empty name and the +// ContinueOnError error handling policy. +func (f *FlagSet) Init(name string, errorHandling ErrorHandling) { + f.name = name + f.errorHandling = errorHandling + f.argsLenAtDash = -1 +} diff --git a/vendor/github.com/spf13/pflag/float32.go b/vendor/github.com/spf13/pflag/float32.go new file mode 100644 index 00000000..a243f81f --- /dev/null +++ b/vendor/github.com/spf13/pflag/float32.go @@ -0,0 +1,88 @@ +package pflag + +import "strconv" + +// -- float32 Value +type float32Value float32 + +func newFloat32Value(val float32, p *float32) *float32Value { + *p = val + return (*float32Value)(p) +} + +func (f *float32Value) Set(s string) error { + v, err := strconv.ParseFloat(s, 32) + *f = float32Value(v) + return err +} + +func (f *float32Value) Type() string { + return "float32" +} + +func (f *float32Value) String() string { return strconv.FormatFloat(float64(*f), 'g', -1, 32) } + +func float32Conv(sval string) (interface{}, error) { + v, err := strconv.ParseFloat(sval, 32) + if err != nil { + return 0, err + } + return float32(v), nil +} + +// GetFloat32 return the float32 value of a flag with the given name +func (f *FlagSet) GetFloat32(name string) (float32, error) { + val, err := f.getFlagType(name, "float32", float32Conv) + if err != nil { + return 0, err + } + return val.(float32), nil +} + +// Float32Var defines a float32 flag with specified name, default value, and usage string. +// The argument p points to a float32 variable in which to store the value of the flag. +func (f *FlagSet) Float32Var(p *float32, name string, value float32, usage string) { + f.VarP(newFloat32Value(value, p), name, "", usage) +} + +// Float32VarP is like Float32Var, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Float32VarP(p *float32, name, shorthand string, value float32, usage string) { + f.VarP(newFloat32Value(value, p), name, shorthand, usage) +} + +// Float32Var defines a float32 flag with specified name, default value, and usage string. +// The argument p points to a float32 variable in which to store the value of the flag. +func Float32Var(p *float32, name string, value float32, usage string) { + CommandLine.VarP(newFloat32Value(value, p), name, "", usage) +} + +// Float32VarP is like Float32Var, but accepts a shorthand letter that can be used after a single dash. +func Float32VarP(p *float32, name, shorthand string, value float32, usage string) { + CommandLine.VarP(newFloat32Value(value, p), name, shorthand, usage) +} + +// Float32 defines a float32 flag with specified name, default value, and usage string. +// The return value is the address of a float32 variable that stores the value of the flag. +func (f *FlagSet) Float32(name string, value float32, usage string) *float32 { + p := new(float32) + f.Float32VarP(p, name, "", value, usage) + return p +} + +// Float32P is like Float32, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Float32P(name, shorthand string, value float32, usage string) *float32 { + p := new(float32) + f.Float32VarP(p, name, shorthand, value, usage) + return p +} + +// Float32 defines a float32 flag with specified name, default value, and usage string. +// The return value is the address of a float32 variable that stores the value of the flag. +func Float32(name string, value float32, usage string) *float32 { + return CommandLine.Float32P(name, "", value, usage) +} + +// Float32P is like Float32, but accepts a shorthand letter that can be used after a single dash. +func Float32P(name, shorthand string, value float32, usage string) *float32 { + return CommandLine.Float32P(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/float64.go b/vendor/github.com/spf13/pflag/float64.go new file mode 100644 index 00000000..04b5492a --- /dev/null +++ b/vendor/github.com/spf13/pflag/float64.go @@ -0,0 +1,84 @@ +package pflag + +import "strconv" + +// -- float64 Value +type float64Value float64 + +func newFloat64Value(val float64, p *float64) *float64Value { + *p = val + return (*float64Value)(p) +} + +func (f *float64Value) Set(s string) error { + v, err := strconv.ParseFloat(s, 64) + *f = float64Value(v) + return err +} + +func (f *float64Value) Type() string { + return "float64" +} + +func (f *float64Value) String() string { return strconv.FormatFloat(float64(*f), 'g', -1, 64) } + +func float64Conv(sval string) (interface{}, error) { + return strconv.ParseFloat(sval, 64) +} + +// GetFloat64 return the float64 value of a flag with the given name +func (f *FlagSet) GetFloat64(name string) (float64, error) { + val, err := f.getFlagType(name, "float64", float64Conv) + if err != nil { + return 0, err + } + return val.(float64), nil +} + +// Float64Var defines a float64 flag with specified name, default value, and usage string. +// The argument p points to a float64 variable in which to store the value of the flag. +func (f *FlagSet) Float64Var(p *float64, name string, value float64, usage string) { + f.VarP(newFloat64Value(value, p), name, "", usage) +} + +// Float64VarP is like Float64Var, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Float64VarP(p *float64, name, shorthand string, value float64, usage string) { + f.VarP(newFloat64Value(value, p), name, shorthand, usage) +} + +// Float64Var defines a float64 flag with specified name, default value, and usage string. +// The argument p points to a float64 variable in which to store the value of the flag. +func Float64Var(p *float64, name string, value float64, usage string) { + CommandLine.VarP(newFloat64Value(value, p), name, "", usage) +} + +// Float64VarP is like Float64Var, but accepts a shorthand letter that can be used after a single dash. +func Float64VarP(p *float64, name, shorthand string, value float64, usage string) { + CommandLine.VarP(newFloat64Value(value, p), name, shorthand, usage) +} + +// Float64 defines a float64 flag with specified name, default value, and usage string. +// The return value is the address of a float64 variable that stores the value of the flag. +func (f *FlagSet) Float64(name string, value float64, usage string) *float64 { + p := new(float64) + f.Float64VarP(p, name, "", value, usage) + return p +} + +// Float64P is like Float64, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Float64P(name, shorthand string, value float64, usage string) *float64 { + p := new(float64) + f.Float64VarP(p, name, shorthand, value, usage) + return p +} + +// Float64 defines a float64 flag with specified name, default value, and usage string. +// The return value is the address of a float64 variable that stores the value of the flag. +func Float64(name string, value float64, usage string) *float64 { + return CommandLine.Float64P(name, "", value, usage) +} + +// Float64P is like Float64, but accepts a shorthand letter that can be used after a single dash. +func Float64P(name, shorthand string, value float64, usage string) *float64 { + return CommandLine.Float64P(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/golangflag.go b/vendor/github.com/spf13/pflag/golangflag.go new file mode 100644 index 00000000..c4f47ebe --- /dev/null +++ b/vendor/github.com/spf13/pflag/golangflag.go @@ -0,0 +1,101 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pflag + +import ( + goflag "flag" + "reflect" + "strings" +) + +// flagValueWrapper implements pflag.Value around a flag.Value. The main +// difference here is the addition of the Type method that returns a string +// name of the type. As this is generally unknown, we approximate that with +// reflection. +type flagValueWrapper struct { + inner goflag.Value + flagType string +} + +// We are just copying the boolFlag interface out of goflag as that is what +// they use to decide if a flag should get "true" when no arg is given. +type goBoolFlag interface { + goflag.Value + IsBoolFlag() bool +} + +func wrapFlagValue(v goflag.Value) Value { + // If the flag.Value happens to also be a pflag.Value, just use it directly. + if pv, ok := v.(Value); ok { + return pv + } + + pv := &flagValueWrapper{ + inner: v, + } + + t := reflect.TypeOf(v) + if t.Kind() == reflect.Interface || t.Kind() == reflect.Ptr { + t = t.Elem() + } + + pv.flagType = strings.TrimSuffix(t.Name(), "Value") + return pv +} + +func (v *flagValueWrapper) String() string { + return v.inner.String() +} + +func (v *flagValueWrapper) Set(s string) error { + return v.inner.Set(s) +} + +func (v *flagValueWrapper) Type() string { + return v.flagType +} + +// PFlagFromGoFlag will return a *pflag.Flag given a *flag.Flag +// If the *flag.Flag.Name was a single character (ex: `v`) it will be accessiblei +// with both `-v` and `--v` in flags. If the golang flag was more than a single +// character (ex: `verbose`) it will only be accessible via `--verbose` +func PFlagFromGoFlag(goflag *goflag.Flag) *Flag { + // Remember the default value as a string; it won't change. + flag := &Flag{ + Name: goflag.Name, + Usage: goflag.Usage, + Value: wrapFlagValue(goflag.Value), + // Looks like golang flags don't set DefValue correctly :-( + //DefValue: goflag.DefValue, + DefValue: goflag.Value.String(), + } + // Ex: if the golang flag was -v, allow both -v and --v to work + if len(flag.Name) == 1 { + flag.Shorthand = flag.Name + } + if fv, ok := goflag.Value.(goBoolFlag); ok && fv.IsBoolFlag() { + flag.NoOptDefVal = "true" + } + return flag +} + +// AddGoFlag will add the given *flag.Flag to the pflag.FlagSet +func (f *FlagSet) AddGoFlag(goflag *goflag.Flag) { + if f.Lookup(goflag.Name) != nil { + return + } + newflag := PFlagFromGoFlag(goflag) + f.AddFlag(newflag) +} + +// AddGoFlagSet will add the given *flag.FlagSet to the pflag.FlagSet +func (f *FlagSet) AddGoFlagSet(newSet *goflag.FlagSet) { + if newSet == nil { + return + } + newSet.VisitAll(func(goflag *goflag.Flag) { + f.AddGoFlag(goflag) + }) +} diff --git a/vendor/github.com/spf13/pflag/int.go b/vendor/github.com/spf13/pflag/int.go new file mode 100644 index 00000000..1474b89d --- /dev/null +++ b/vendor/github.com/spf13/pflag/int.go @@ -0,0 +1,84 @@ +package pflag + +import "strconv" + +// -- int Value +type intValue int + +func newIntValue(val int, p *int) *intValue { + *p = val + return (*intValue)(p) +} + +func (i *intValue) Set(s string) error { + v, err := strconv.ParseInt(s, 0, 64) + *i = intValue(v) + return err +} + +func (i *intValue) Type() string { + return "int" +} + +func (i *intValue) String() string { return strconv.Itoa(int(*i)) } + +func intConv(sval string) (interface{}, error) { + return strconv.Atoi(sval) +} + +// GetInt return the int value of a flag with the given name +func (f *FlagSet) GetInt(name string) (int, error) { + val, err := f.getFlagType(name, "int", intConv) + if err != nil { + return 0, err + } + return val.(int), nil +} + +// IntVar defines an int flag with specified name, default value, and usage string. +// The argument p points to an int variable in which to store the value of the flag. +func (f *FlagSet) IntVar(p *int, name string, value int, usage string) { + f.VarP(newIntValue(value, p), name, "", usage) +} + +// IntVarP is like IntVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) IntVarP(p *int, name, shorthand string, value int, usage string) { + f.VarP(newIntValue(value, p), name, shorthand, usage) +} + +// IntVar defines an int flag with specified name, default value, and usage string. +// The argument p points to an int variable in which to store the value of the flag. +func IntVar(p *int, name string, value int, usage string) { + CommandLine.VarP(newIntValue(value, p), name, "", usage) +} + +// IntVarP is like IntVar, but accepts a shorthand letter that can be used after a single dash. +func IntVarP(p *int, name, shorthand string, value int, usage string) { + CommandLine.VarP(newIntValue(value, p), name, shorthand, usage) +} + +// Int defines an int flag with specified name, default value, and usage string. +// The return value is the address of an int variable that stores the value of the flag. +func (f *FlagSet) Int(name string, value int, usage string) *int { + p := new(int) + f.IntVarP(p, name, "", value, usage) + return p +} + +// IntP is like Int, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) IntP(name, shorthand string, value int, usage string) *int { + p := new(int) + f.IntVarP(p, name, shorthand, value, usage) + return p +} + +// Int defines an int flag with specified name, default value, and usage string. +// The return value is the address of an int variable that stores the value of the flag. +func Int(name string, value int, usage string) *int { + return CommandLine.IntP(name, "", value, usage) +} + +// IntP is like Int, but accepts a shorthand letter that can be used after a single dash. +func IntP(name, shorthand string, value int, usage string) *int { + return CommandLine.IntP(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/int16.go b/vendor/github.com/spf13/pflag/int16.go new file mode 100644 index 00000000..f1a01d05 --- /dev/null +++ b/vendor/github.com/spf13/pflag/int16.go @@ -0,0 +1,88 @@ +package pflag + +import "strconv" + +// -- int16 Value +type int16Value int16 + +func newInt16Value(val int16, p *int16) *int16Value { + *p = val + return (*int16Value)(p) +} + +func (i *int16Value) Set(s string) error { + v, err := strconv.ParseInt(s, 0, 16) + *i = int16Value(v) + return err +} + +func (i *int16Value) Type() string { + return "int16" +} + +func (i *int16Value) String() string { return strconv.FormatInt(int64(*i), 10) } + +func int16Conv(sval string) (interface{}, error) { + v, err := strconv.ParseInt(sval, 0, 16) + if err != nil { + return 0, err + } + return int16(v), nil +} + +// GetInt16 returns the int16 value of a flag with the given name +func (f *FlagSet) GetInt16(name string) (int16, error) { + val, err := f.getFlagType(name, "int16", int16Conv) + if err != nil { + return 0, err + } + return val.(int16), nil +} + +// Int16Var defines an int16 flag with specified name, default value, and usage string. +// The argument p points to an int16 variable in which to store the value of the flag. +func (f *FlagSet) Int16Var(p *int16, name string, value int16, usage string) { + f.VarP(newInt16Value(value, p), name, "", usage) +} + +// Int16VarP is like Int16Var, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Int16VarP(p *int16, name, shorthand string, value int16, usage string) { + f.VarP(newInt16Value(value, p), name, shorthand, usage) +} + +// Int16Var defines an int16 flag with specified name, default value, and usage string. +// The argument p points to an int16 variable in which to store the value of the flag. +func Int16Var(p *int16, name string, value int16, usage string) { + CommandLine.VarP(newInt16Value(value, p), name, "", usage) +} + +// Int16VarP is like Int16Var, but accepts a shorthand letter that can be used after a single dash. +func Int16VarP(p *int16, name, shorthand string, value int16, usage string) { + CommandLine.VarP(newInt16Value(value, p), name, shorthand, usage) +} + +// Int16 defines an int16 flag with specified name, default value, and usage string. +// The return value is the address of an int16 variable that stores the value of the flag. +func (f *FlagSet) Int16(name string, value int16, usage string) *int16 { + p := new(int16) + f.Int16VarP(p, name, "", value, usage) + return p +} + +// Int16P is like Int16, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Int16P(name, shorthand string, value int16, usage string) *int16 { + p := new(int16) + f.Int16VarP(p, name, shorthand, value, usage) + return p +} + +// Int16 defines an int16 flag with specified name, default value, and usage string. +// The return value is the address of an int16 variable that stores the value of the flag. +func Int16(name string, value int16, usage string) *int16 { + return CommandLine.Int16P(name, "", value, usage) +} + +// Int16P is like Int16, but accepts a shorthand letter that can be used after a single dash. +func Int16P(name, shorthand string, value int16, usage string) *int16 { + return CommandLine.Int16P(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/int32.go b/vendor/github.com/spf13/pflag/int32.go new file mode 100644 index 00000000..9b95944f --- /dev/null +++ b/vendor/github.com/spf13/pflag/int32.go @@ -0,0 +1,88 @@ +package pflag + +import "strconv" + +// -- int32 Value +type int32Value int32 + +func newInt32Value(val int32, p *int32) *int32Value { + *p = val + return (*int32Value)(p) +} + +func (i *int32Value) Set(s string) error { + v, err := strconv.ParseInt(s, 0, 32) + *i = int32Value(v) + return err +} + +func (i *int32Value) Type() string { + return "int32" +} + +func (i *int32Value) String() string { return strconv.FormatInt(int64(*i), 10) } + +func int32Conv(sval string) (interface{}, error) { + v, err := strconv.ParseInt(sval, 0, 32) + if err != nil { + return 0, err + } + return int32(v), nil +} + +// GetInt32 return the int32 value of a flag with the given name +func (f *FlagSet) GetInt32(name string) (int32, error) { + val, err := f.getFlagType(name, "int32", int32Conv) + if err != nil { + return 0, err + } + return val.(int32), nil +} + +// Int32Var defines an int32 flag with specified name, default value, and usage string. +// The argument p points to an int32 variable in which to store the value of the flag. +func (f *FlagSet) Int32Var(p *int32, name string, value int32, usage string) { + f.VarP(newInt32Value(value, p), name, "", usage) +} + +// Int32VarP is like Int32Var, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Int32VarP(p *int32, name, shorthand string, value int32, usage string) { + f.VarP(newInt32Value(value, p), name, shorthand, usage) +} + +// Int32Var defines an int32 flag with specified name, default value, and usage string. +// The argument p points to an int32 variable in which to store the value of the flag. +func Int32Var(p *int32, name string, value int32, usage string) { + CommandLine.VarP(newInt32Value(value, p), name, "", usage) +} + +// Int32VarP is like Int32Var, but accepts a shorthand letter that can be used after a single dash. +func Int32VarP(p *int32, name, shorthand string, value int32, usage string) { + CommandLine.VarP(newInt32Value(value, p), name, shorthand, usage) +} + +// Int32 defines an int32 flag with specified name, default value, and usage string. +// The return value is the address of an int32 variable that stores the value of the flag. +func (f *FlagSet) Int32(name string, value int32, usage string) *int32 { + p := new(int32) + f.Int32VarP(p, name, "", value, usage) + return p +} + +// Int32P is like Int32, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Int32P(name, shorthand string, value int32, usage string) *int32 { + p := new(int32) + f.Int32VarP(p, name, shorthand, value, usage) + return p +} + +// Int32 defines an int32 flag with specified name, default value, and usage string. +// The return value is the address of an int32 variable that stores the value of the flag. +func Int32(name string, value int32, usage string) *int32 { + return CommandLine.Int32P(name, "", value, usage) +} + +// Int32P is like Int32, but accepts a shorthand letter that can be used after a single dash. +func Int32P(name, shorthand string, value int32, usage string) *int32 { + return CommandLine.Int32P(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/int64.go b/vendor/github.com/spf13/pflag/int64.go new file mode 100644 index 00000000..0026d781 --- /dev/null +++ b/vendor/github.com/spf13/pflag/int64.go @@ -0,0 +1,84 @@ +package pflag + +import "strconv" + +// -- int64 Value +type int64Value int64 + +func newInt64Value(val int64, p *int64) *int64Value { + *p = val + return (*int64Value)(p) +} + +func (i *int64Value) Set(s string) error { + v, err := strconv.ParseInt(s, 0, 64) + *i = int64Value(v) + return err +} + +func (i *int64Value) Type() string { + return "int64" +} + +func (i *int64Value) String() string { return strconv.FormatInt(int64(*i), 10) } + +func int64Conv(sval string) (interface{}, error) { + return strconv.ParseInt(sval, 0, 64) +} + +// GetInt64 return the int64 value of a flag with the given name +func (f *FlagSet) GetInt64(name string) (int64, error) { + val, err := f.getFlagType(name, "int64", int64Conv) + if err != nil { + return 0, err + } + return val.(int64), nil +} + +// Int64Var defines an int64 flag with specified name, default value, and usage string. +// The argument p points to an int64 variable in which to store the value of the flag. +func (f *FlagSet) Int64Var(p *int64, name string, value int64, usage string) { + f.VarP(newInt64Value(value, p), name, "", usage) +} + +// Int64VarP is like Int64Var, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Int64VarP(p *int64, name, shorthand string, value int64, usage string) { + f.VarP(newInt64Value(value, p), name, shorthand, usage) +} + +// Int64Var defines an int64 flag with specified name, default value, and usage string. +// The argument p points to an int64 variable in which to store the value of the flag. +func Int64Var(p *int64, name string, value int64, usage string) { + CommandLine.VarP(newInt64Value(value, p), name, "", usage) +} + +// Int64VarP is like Int64Var, but accepts a shorthand letter that can be used after a single dash. +func Int64VarP(p *int64, name, shorthand string, value int64, usage string) { + CommandLine.VarP(newInt64Value(value, p), name, shorthand, usage) +} + +// Int64 defines an int64 flag with specified name, default value, and usage string. +// The return value is the address of an int64 variable that stores the value of the flag. +func (f *FlagSet) Int64(name string, value int64, usage string) *int64 { + p := new(int64) + f.Int64VarP(p, name, "", value, usage) + return p +} + +// Int64P is like Int64, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Int64P(name, shorthand string, value int64, usage string) *int64 { + p := new(int64) + f.Int64VarP(p, name, shorthand, value, usage) + return p +} + +// Int64 defines an int64 flag with specified name, default value, and usage string. +// The return value is the address of an int64 variable that stores the value of the flag. +func Int64(name string, value int64, usage string) *int64 { + return CommandLine.Int64P(name, "", value, usage) +} + +// Int64P is like Int64, but accepts a shorthand letter that can be used after a single dash. +func Int64P(name, shorthand string, value int64, usage string) *int64 { + return CommandLine.Int64P(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/int8.go b/vendor/github.com/spf13/pflag/int8.go new file mode 100644 index 00000000..4da92228 --- /dev/null +++ b/vendor/github.com/spf13/pflag/int8.go @@ -0,0 +1,88 @@ +package pflag + +import "strconv" + +// -- int8 Value +type int8Value int8 + +func newInt8Value(val int8, p *int8) *int8Value { + *p = val + return (*int8Value)(p) +} + +func (i *int8Value) Set(s string) error { + v, err := strconv.ParseInt(s, 0, 8) + *i = int8Value(v) + return err +} + +func (i *int8Value) Type() string { + return "int8" +} + +func (i *int8Value) String() string { return strconv.FormatInt(int64(*i), 10) } + +func int8Conv(sval string) (interface{}, error) { + v, err := strconv.ParseInt(sval, 0, 8) + if err != nil { + return 0, err + } + return int8(v), nil +} + +// GetInt8 return the int8 value of a flag with the given name +func (f *FlagSet) GetInt8(name string) (int8, error) { + val, err := f.getFlagType(name, "int8", int8Conv) + if err != nil { + return 0, err + } + return val.(int8), nil +} + +// Int8Var defines an int8 flag with specified name, default value, and usage string. +// The argument p points to an int8 variable in which to store the value of the flag. +func (f *FlagSet) Int8Var(p *int8, name string, value int8, usage string) { + f.VarP(newInt8Value(value, p), name, "", usage) +} + +// Int8VarP is like Int8Var, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Int8VarP(p *int8, name, shorthand string, value int8, usage string) { + f.VarP(newInt8Value(value, p), name, shorthand, usage) +} + +// Int8Var defines an int8 flag with specified name, default value, and usage string. +// The argument p points to an int8 variable in which to store the value of the flag. +func Int8Var(p *int8, name string, value int8, usage string) { + CommandLine.VarP(newInt8Value(value, p), name, "", usage) +} + +// Int8VarP is like Int8Var, but accepts a shorthand letter that can be used after a single dash. +func Int8VarP(p *int8, name, shorthand string, value int8, usage string) { + CommandLine.VarP(newInt8Value(value, p), name, shorthand, usage) +} + +// Int8 defines an int8 flag with specified name, default value, and usage string. +// The return value is the address of an int8 variable that stores the value of the flag. +func (f *FlagSet) Int8(name string, value int8, usage string) *int8 { + p := new(int8) + f.Int8VarP(p, name, "", value, usage) + return p +} + +// Int8P is like Int8, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Int8P(name, shorthand string, value int8, usage string) *int8 { + p := new(int8) + f.Int8VarP(p, name, shorthand, value, usage) + return p +} + +// Int8 defines an int8 flag with specified name, default value, and usage string. +// The return value is the address of an int8 variable that stores the value of the flag. +func Int8(name string, value int8, usage string) *int8 { + return CommandLine.Int8P(name, "", value, usage) +} + +// Int8P is like Int8, but accepts a shorthand letter that can be used after a single dash. +func Int8P(name, shorthand string, value int8, usage string) *int8 { + return CommandLine.Int8P(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/int_slice.go b/vendor/github.com/spf13/pflag/int_slice.go new file mode 100644 index 00000000..1e7c9edd --- /dev/null +++ b/vendor/github.com/spf13/pflag/int_slice.go @@ -0,0 +1,128 @@ +package pflag + +import ( + "fmt" + "strconv" + "strings" +) + +// -- intSlice Value +type intSliceValue struct { + value *[]int + changed bool +} + +func newIntSliceValue(val []int, p *[]int) *intSliceValue { + isv := new(intSliceValue) + isv.value = p + *isv.value = val + return isv +} + +func (s *intSliceValue) Set(val string) error { + ss := strings.Split(val, ",") + out := make([]int, len(ss)) + for i, d := range ss { + var err error + out[i], err = strconv.Atoi(d) + if err != nil { + return err + } + + } + if !s.changed { + *s.value = out + } else { + *s.value = append(*s.value, out...) + } + s.changed = true + return nil +} + +func (s *intSliceValue) Type() string { + return "intSlice" +} + +func (s *intSliceValue) String() string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = fmt.Sprintf("%d", d) + } + return "[" + strings.Join(out, ",") + "]" +} + +func intSliceConv(val string) (interface{}, error) { + val = strings.Trim(val, "[]") + // Empty string would cause a slice with one (empty) entry + if len(val) == 0 { + return []int{}, nil + } + ss := strings.Split(val, ",") + out := make([]int, len(ss)) + for i, d := range ss { + var err error + out[i], err = strconv.Atoi(d) + if err != nil { + return nil, err + } + + } + return out, nil +} + +// GetIntSlice return the []int value of a flag with the given name +func (f *FlagSet) GetIntSlice(name string) ([]int, error) { + val, err := f.getFlagType(name, "intSlice", intSliceConv) + if err != nil { + return []int{}, err + } + return val.([]int), nil +} + +// IntSliceVar defines a intSlice flag with specified name, default value, and usage string. +// The argument p points to a []int variable in which to store the value of the flag. +func (f *FlagSet) IntSliceVar(p *[]int, name string, value []int, usage string) { + f.VarP(newIntSliceValue(value, p), name, "", usage) +} + +// IntSliceVarP is like IntSliceVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) IntSliceVarP(p *[]int, name, shorthand string, value []int, usage string) { + f.VarP(newIntSliceValue(value, p), name, shorthand, usage) +} + +// IntSliceVar defines a int[] flag with specified name, default value, and usage string. +// The argument p points to a int[] variable in which to store the value of the flag. +func IntSliceVar(p *[]int, name string, value []int, usage string) { + CommandLine.VarP(newIntSliceValue(value, p), name, "", usage) +} + +// IntSliceVarP is like IntSliceVar, but accepts a shorthand letter that can be used after a single dash. +func IntSliceVarP(p *[]int, name, shorthand string, value []int, usage string) { + CommandLine.VarP(newIntSliceValue(value, p), name, shorthand, usage) +} + +// IntSlice defines a []int flag with specified name, default value, and usage string. +// The return value is the address of a []int variable that stores the value of the flag. +func (f *FlagSet) IntSlice(name string, value []int, usage string) *[]int { + p := []int{} + f.IntSliceVarP(&p, name, "", value, usage) + return &p +} + +// IntSliceP is like IntSlice, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) IntSliceP(name, shorthand string, value []int, usage string) *[]int { + p := []int{} + f.IntSliceVarP(&p, name, shorthand, value, usage) + return &p +} + +// IntSlice defines a []int flag with specified name, default value, and usage string. +// The return value is the address of a []int variable that stores the value of the flag. +func IntSlice(name string, value []int, usage string) *[]int { + return CommandLine.IntSliceP(name, "", value, usage) +} + +// IntSliceP is like IntSlice, but accepts a shorthand letter that can be used after a single dash. +func IntSliceP(name, shorthand string, value []int, usage string) *[]int { + return CommandLine.IntSliceP(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/ip.go b/vendor/github.com/spf13/pflag/ip.go new file mode 100644 index 00000000..3d414ba6 --- /dev/null +++ b/vendor/github.com/spf13/pflag/ip.go @@ -0,0 +1,94 @@ +package pflag + +import ( + "fmt" + "net" + "strings" +) + +// -- net.IP value +type ipValue net.IP + +func newIPValue(val net.IP, p *net.IP) *ipValue { + *p = val + return (*ipValue)(p) +} + +func (i *ipValue) String() string { return net.IP(*i).String() } +func (i *ipValue) Set(s string) error { + ip := net.ParseIP(strings.TrimSpace(s)) + if ip == nil { + return fmt.Errorf("failed to parse IP: %q", s) + } + *i = ipValue(ip) + return nil +} + +func (i *ipValue) Type() string { + return "ip" +} + +func ipConv(sval string) (interface{}, error) { + ip := net.ParseIP(sval) + if ip != nil { + return ip, nil + } + return nil, fmt.Errorf("invalid string being converted to IP address: %s", sval) +} + +// GetIP return the net.IP value of a flag with the given name +func (f *FlagSet) GetIP(name string) (net.IP, error) { + val, err := f.getFlagType(name, "ip", ipConv) + if err != nil { + return nil, err + } + return val.(net.IP), nil +} + +// IPVar defines an net.IP flag with specified name, default value, and usage string. +// The argument p points to an net.IP variable in which to store the value of the flag. +func (f *FlagSet) IPVar(p *net.IP, name string, value net.IP, usage string) { + f.VarP(newIPValue(value, p), name, "", usage) +} + +// IPVarP is like IPVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) IPVarP(p *net.IP, name, shorthand string, value net.IP, usage string) { + f.VarP(newIPValue(value, p), name, shorthand, usage) +} + +// IPVar defines an net.IP flag with specified name, default value, and usage string. +// The argument p points to an net.IP variable in which to store the value of the flag. +func IPVar(p *net.IP, name string, value net.IP, usage string) { + CommandLine.VarP(newIPValue(value, p), name, "", usage) +} + +// IPVarP is like IPVar, but accepts a shorthand letter that can be used after a single dash. +func IPVarP(p *net.IP, name, shorthand string, value net.IP, usage string) { + CommandLine.VarP(newIPValue(value, p), name, shorthand, usage) +} + +// IP defines an net.IP flag with specified name, default value, and usage string. +// The return value is the address of an net.IP variable that stores the value of the flag. +func (f *FlagSet) IP(name string, value net.IP, usage string) *net.IP { + p := new(net.IP) + f.IPVarP(p, name, "", value, usage) + return p +} + +// IPP is like IP, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) IPP(name, shorthand string, value net.IP, usage string) *net.IP { + p := new(net.IP) + f.IPVarP(p, name, shorthand, value, usage) + return p +} + +// IP defines an net.IP flag with specified name, default value, and usage string. +// The return value is the address of an net.IP variable that stores the value of the flag. +func IP(name string, value net.IP, usage string) *net.IP { + return CommandLine.IPP(name, "", value, usage) +} + +// IPP is like IP, but accepts a shorthand letter that can be used after a single dash. +func IPP(name, shorthand string, value net.IP, usage string) *net.IP { + return CommandLine.IPP(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/ip_slice.go b/vendor/github.com/spf13/pflag/ip_slice.go new file mode 100644 index 00000000..7dd196fe --- /dev/null +++ b/vendor/github.com/spf13/pflag/ip_slice.go @@ -0,0 +1,148 @@ +package pflag + +import ( + "fmt" + "io" + "net" + "strings" +) + +// -- ipSlice Value +type ipSliceValue struct { + value *[]net.IP + changed bool +} + +func newIPSliceValue(val []net.IP, p *[]net.IP) *ipSliceValue { + ipsv := new(ipSliceValue) + ipsv.value = p + *ipsv.value = val + return ipsv +} + +// Set converts, and assigns, the comma-separated IP argument string representation as the []net.IP value of this flag. +// If Set is called on a flag that already has a []net.IP assigned, the newly converted values will be appended. +func (s *ipSliceValue) Set(val string) error { + + // remove all quote characters + rmQuote := strings.NewReplacer(`"`, "", `'`, "", "`", "") + + // read flag arguments with CSV parser + ipStrSlice, err := readAsCSV(rmQuote.Replace(val)) + if err != nil && err != io.EOF { + return err + } + + // parse ip values into slice + out := make([]net.IP, 0, len(ipStrSlice)) + for _, ipStr := range ipStrSlice { + ip := net.ParseIP(strings.TrimSpace(ipStr)) + if ip == nil { + return fmt.Errorf("invalid string being converted to IP address: %s", ipStr) + } + out = append(out, ip) + } + + if !s.changed { + *s.value = out + } else { + *s.value = append(*s.value, out...) + } + + s.changed = true + + return nil +} + +// Type returns a string that uniquely represents this flag's type. +func (s *ipSliceValue) Type() string { + return "ipSlice" +} + +// String defines a "native" format for this net.IP slice flag value. +func (s *ipSliceValue) String() string { + + ipStrSlice := make([]string, len(*s.value)) + for i, ip := range *s.value { + ipStrSlice[i] = ip.String() + } + + out, _ := writeAsCSV(ipStrSlice) + + return "[" + out + "]" +} + +func ipSliceConv(val string) (interface{}, error) { + val = strings.Trim(val, "[]") + // Emtpy string would cause a slice with one (empty) entry + if len(val) == 0 { + return []net.IP{}, nil + } + ss := strings.Split(val, ",") + out := make([]net.IP, len(ss)) + for i, sval := range ss { + ip := net.ParseIP(strings.TrimSpace(sval)) + if ip == nil { + return nil, fmt.Errorf("invalid string being converted to IP address: %s", sval) + } + out[i] = ip + } + return out, nil +} + +// GetIPSlice returns the []net.IP value of a flag with the given name +func (f *FlagSet) GetIPSlice(name string) ([]net.IP, error) { + val, err := f.getFlagType(name, "ipSlice", ipSliceConv) + if err != nil { + return []net.IP{}, err + } + return val.([]net.IP), nil +} + +// IPSliceVar defines a ipSlice flag with specified name, default value, and usage string. +// The argument p points to a []net.IP variable in which to store the value of the flag. +func (f *FlagSet) IPSliceVar(p *[]net.IP, name string, value []net.IP, usage string) { + f.VarP(newIPSliceValue(value, p), name, "", usage) +} + +// IPSliceVarP is like IPSliceVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) IPSliceVarP(p *[]net.IP, name, shorthand string, value []net.IP, usage string) { + f.VarP(newIPSliceValue(value, p), name, shorthand, usage) +} + +// IPSliceVar defines a []net.IP flag with specified name, default value, and usage string. +// The argument p points to a []net.IP variable in which to store the value of the flag. +func IPSliceVar(p *[]net.IP, name string, value []net.IP, usage string) { + CommandLine.VarP(newIPSliceValue(value, p), name, "", usage) +} + +// IPSliceVarP is like IPSliceVar, but accepts a shorthand letter that can be used after a single dash. +func IPSliceVarP(p *[]net.IP, name, shorthand string, value []net.IP, usage string) { + CommandLine.VarP(newIPSliceValue(value, p), name, shorthand, usage) +} + +// IPSlice defines a []net.IP flag with specified name, default value, and usage string. +// The return value is the address of a []net.IP variable that stores the value of that flag. +func (f *FlagSet) IPSlice(name string, value []net.IP, usage string) *[]net.IP { + p := []net.IP{} + f.IPSliceVarP(&p, name, "", value, usage) + return &p +} + +// IPSliceP is like IPSlice, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) IPSliceP(name, shorthand string, value []net.IP, usage string) *[]net.IP { + p := []net.IP{} + f.IPSliceVarP(&p, name, shorthand, value, usage) + return &p +} + +// IPSlice defines a []net.IP flag with specified name, default value, and usage string. +// The return value is the address of a []net.IP variable that stores the value of the flag. +func IPSlice(name string, value []net.IP, usage string) *[]net.IP { + return CommandLine.IPSliceP(name, "", value, usage) +} + +// IPSliceP is like IPSlice, but accepts a shorthand letter that can be used after a single dash. +func IPSliceP(name, shorthand string, value []net.IP, usage string) *[]net.IP { + return CommandLine.IPSliceP(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/ipmask.go b/vendor/github.com/spf13/pflag/ipmask.go new file mode 100644 index 00000000..5bd44bd2 --- /dev/null +++ b/vendor/github.com/spf13/pflag/ipmask.go @@ -0,0 +1,122 @@ +package pflag + +import ( + "fmt" + "net" + "strconv" +) + +// -- net.IPMask value +type ipMaskValue net.IPMask + +func newIPMaskValue(val net.IPMask, p *net.IPMask) *ipMaskValue { + *p = val + return (*ipMaskValue)(p) +} + +func (i *ipMaskValue) String() string { return net.IPMask(*i).String() } +func (i *ipMaskValue) Set(s string) error { + ip := ParseIPv4Mask(s) + if ip == nil { + return fmt.Errorf("failed to parse IP mask: %q", s) + } + *i = ipMaskValue(ip) + return nil +} + +func (i *ipMaskValue) Type() string { + return "ipMask" +} + +// ParseIPv4Mask written in IP form (e.g. 255.255.255.0). +// This function should really belong to the net package. +func ParseIPv4Mask(s string) net.IPMask { + mask := net.ParseIP(s) + if mask == nil { + if len(s) != 8 { + return nil + } + // net.IPMask.String() actually outputs things like ffffff00 + // so write a horrible parser for that as well :-( + m := []int{} + for i := 0; i < 4; i++ { + b := "0x" + s[2*i:2*i+2] + d, err := strconv.ParseInt(b, 0, 0) + if err != nil { + return nil + } + m = append(m, int(d)) + } + s := fmt.Sprintf("%d.%d.%d.%d", m[0], m[1], m[2], m[3]) + mask = net.ParseIP(s) + if mask == nil { + return nil + } + } + return net.IPv4Mask(mask[12], mask[13], mask[14], mask[15]) +} + +func parseIPv4Mask(sval string) (interface{}, error) { + mask := ParseIPv4Mask(sval) + if mask == nil { + return nil, fmt.Errorf("unable to parse %s as net.IPMask", sval) + } + return mask, nil +} + +// GetIPv4Mask return the net.IPv4Mask value of a flag with the given name +func (f *FlagSet) GetIPv4Mask(name string) (net.IPMask, error) { + val, err := f.getFlagType(name, "ipMask", parseIPv4Mask) + if err != nil { + return nil, err + } + return val.(net.IPMask), nil +} + +// IPMaskVar defines an net.IPMask flag with specified name, default value, and usage string. +// The argument p points to an net.IPMask variable in which to store the value of the flag. +func (f *FlagSet) IPMaskVar(p *net.IPMask, name string, value net.IPMask, usage string) { + f.VarP(newIPMaskValue(value, p), name, "", usage) +} + +// IPMaskVarP is like IPMaskVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) IPMaskVarP(p *net.IPMask, name, shorthand string, value net.IPMask, usage string) { + f.VarP(newIPMaskValue(value, p), name, shorthand, usage) +} + +// IPMaskVar defines an net.IPMask flag with specified name, default value, and usage string. +// The argument p points to an net.IPMask variable in which to store the value of the flag. +func IPMaskVar(p *net.IPMask, name string, value net.IPMask, usage string) { + CommandLine.VarP(newIPMaskValue(value, p), name, "", usage) +} + +// IPMaskVarP is like IPMaskVar, but accepts a shorthand letter that can be used after a single dash. +func IPMaskVarP(p *net.IPMask, name, shorthand string, value net.IPMask, usage string) { + CommandLine.VarP(newIPMaskValue(value, p), name, shorthand, usage) +} + +// IPMask defines an net.IPMask flag with specified name, default value, and usage string. +// The return value is the address of an net.IPMask variable that stores the value of the flag. +func (f *FlagSet) IPMask(name string, value net.IPMask, usage string) *net.IPMask { + p := new(net.IPMask) + f.IPMaskVarP(p, name, "", value, usage) + return p +} + +// IPMaskP is like IPMask, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) IPMaskP(name, shorthand string, value net.IPMask, usage string) *net.IPMask { + p := new(net.IPMask) + f.IPMaskVarP(p, name, shorthand, value, usage) + return p +} + +// IPMask defines an net.IPMask flag with specified name, default value, and usage string. +// The return value is the address of an net.IPMask variable that stores the value of the flag. +func IPMask(name string, value net.IPMask, usage string) *net.IPMask { + return CommandLine.IPMaskP(name, "", value, usage) +} + +// IPMaskP is like IP, but accepts a shorthand letter that can be used after a single dash. +func IPMaskP(name, shorthand string, value net.IPMask, usage string) *net.IPMask { + return CommandLine.IPMaskP(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/ipnet.go b/vendor/github.com/spf13/pflag/ipnet.go new file mode 100644 index 00000000..e2c1b8bc --- /dev/null +++ b/vendor/github.com/spf13/pflag/ipnet.go @@ -0,0 +1,98 @@ +package pflag + +import ( + "fmt" + "net" + "strings" +) + +// IPNet adapts net.IPNet for use as a flag. +type ipNetValue net.IPNet + +func (ipnet ipNetValue) String() string { + n := net.IPNet(ipnet) + return n.String() +} + +func (ipnet *ipNetValue) Set(value string) error { + _, n, err := net.ParseCIDR(strings.TrimSpace(value)) + if err != nil { + return err + } + *ipnet = ipNetValue(*n) + return nil +} + +func (*ipNetValue) Type() string { + return "ipNet" +} + +func newIPNetValue(val net.IPNet, p *net.IPNet) *ipNetValue { + *p = val + return (*ipNetValue)(p) +} + +func ipNetConv(sval string) (interface{}, error) { + _, n, err := net.ParseCIDR(strings.TrimSpace(sval)) + if err == nil { + return *n, nil + } + return nil, fmt.Errorf("invalid string being converted to IPNet: %s", sval) +} + +// GetIPNet return the net.IPNet value of a flag with the given name +func (f *FlagSet) GetIPNet(name string) (net.IPNet, error) { + val, err := f.getFlagType(name, "ipNet", ipNetConv) + if err != nil { + return net.IPNet{}, err + } + return val.(net.IPNet), nil +} + +// IPNetVar defines an net.IPNet flag with specified name, default value, and usage string. +// The argument p points to an net.IPNet variable in which to store the value of the flag. +func (f *FlagSet) IPNetVar(p *net.IPNet, name string, value net.IPNet, usage string) { + f.VarP(newIPNetValue(value, p), name, "", usage) +} + +// IPNetVarP is like IPNetVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) IPNetVarP(p *net.IPNet, name, shorthand string, value net.IPNet, usage string) { + f.VarP(newIPNetValue(value, p), name, shorthand, usage) +} + +// IPNetVar defines an net.IPNet flag with specified name, default value, and usage string. +// The argument p points to an net.IPNet variable in which to store the value of the flag. +func IPNetVar(p *net.IPNet, name string, value net.IPNet, usage string) { + CommandLine.VarP(newIPNetValue(value, p), name, "", usage) +} + +// IPNetVarP is like IPNetVar, but accepts a shorthand letter that can be used after a single dash. +func IPNetVarP(p *net.IPNet, name, shorthand string, value net.IPNet, usage string) { + CommandLine.VarP(newIPNetValue(value, p), name, shorthand, usage) +} + +// IPNet defines an net.IPNet flag with specified name, default value, and usage string. +// The return value is the address of an net.IPNet variable that stores the value of the flag. +func (f *FlagSet) IPNet(name string, value net.IPNet, usage string) *net.IPNet { + p := new(net.IPNet) + f.IPNetVarP(p, name, "", value, usage) + return p +} + +// IPNetP is like IPNet, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) IPNetP(name, shorthand string, value net.IPNet, usage string) *net.IPNet { + p := new(net.IPNet) + f.IPNetVarP(p, name, shorthand, value, usage) + return p +} + +// IPNet defines an net.IPNet flag with specified name, default value, and usage string. +// The return value is the address of an net.IPNet variable that stores the value of the flag. +func IPNet(name string, value net.IPNet, usage string) *net.IPNet { + return CommandLine.IPNetP(name, "", value, usage) +} + +// IPNetP is like IPNet, but accepts a shorthand letter that can be used after a single dash. +func IPNetP(name, shorthand string, value net.IPNet, usage string) *net.IPNet { + return CommandLine.IPNetP(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/string.go b/vendor/github.com/spf13/pflag/string.go new file mode 100644 index 00000000..04e0a26f --- /dev/null +++ b/vendor/github.com/spf13/pflag/string.go @@ -0,0 +1,80 @@ +package pflag + +// -- string Value +type stringValue string + +func newStringValue(val string, p *string) *stringValue { + *p = val + return (*stringValue)(p) +} + +func (s *stringValue) Set(val string) error { + *s = stringValue(val) + return nil +} +func (s *stringValue) Type() string { + return "string" +} + +func (s *stringValue) String() string { return string(*s) } + +func stringConv(sval string) (interface{}, error) { + return sval, nil +} + +// GetString return the string value of a flag with the given name +func (f *FlagSet) GetString(name string) (string, error) { + val, err := f.getFlagType(name, "string", stringConv) + if err != nil { + return "", err + } + return val.(string), nil +} + +// StringVar defines a string flag with specified name, default value, and usage string. +// The argument p points to a string variable in which to store the value of the flag. +func (f *FlagSet) StringVar(p *string, name string, value string, usage string) { + f.VarP(newStringValue(value, p), name, "", usage) +} + +// StringVarP is like StringVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) StringVarP(p *string, name, shorthand string, value string, usage string) { + f.VarP(newStringValue(value, p), name, shorthand, usage) +} + +// StringVar defines a string flag with specified name, default value, and usage string. +// The argument p points to a string variable in which to store the value of the flag. +func StringVar(p *string, name string, value string, usage string) { + CommandLine.VarP(newStringValue(value, p), name, "", usage) +} + +// StringVarP is like StringVar, but accepts a shorthand letter that can be used after a single dash. +func StringVarP(p *string, name, shorthand string, value string, usage string) { + CommandLine.VarP(newStringValue(value, p), name, shorthand, usage) +} + +// String defines a string flag with specified name, default value, and usage string. +// The return value is the address of a string variable that stores the value of the flag. +func (f *FlagSet) String(name string, value string, usage string) *string { + p := new(string) + f.StringVarP(p, name, "", value, usage) + return p +} + +// StringP is like String, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) StringP(name, shorthand string, value string, usage string) *string { + p := new(string) + f.StringVarP(p, name, shorthand, value, usage) + return p +} + +// String defines a string flag with specified name, default value, and usage string. +// The return value is the address of a string variable that stores the value of the flag. +func String(name string, value string, usage string) *string { + return CommandLine.StringP(name, "", value, usage) +} + +// StringP is like String, but accepts a shorthand letter that can be used after a single dash. +func StringP(name, shorthand string, value string, usage string) *string { + return CommandLine.StringP(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/string_array.go b/vendor/github.com/spf13/pflag/string_array.go new file mode 100644 index 00000000..fa7bc601 --- /dev/null +++ b/vendor/github.com/spf13/pflag/string_array.go @@ -0,0 +1,103 @@ +package pflag + +// -- stringArray Value +type stringArrayValue struct { + value *[]string + changed bool +} + +func newStringArrayValue(val []string, p *[]string) *stringArrayValue { + ssv := new(stringArrayValue) + ssv.value = p + *ssv.value = val + return ssv +} + +func (s *stringArrayValue) Set(val string) error { + if !s.changed { + *s.value = []string{val} + s.changed = true + } else { + *s.value = append(*s.value, val) + } + return nil +} + +func (s *stringArrayValue) Type() string { + return "stringArray" +} + +func (s *stringArrayValue) String() string { + str, _ := writeAsCSV(*s.value) + return "[" + str + "]" +} + +func stringArrayConv(sval string) (interface{}, error) { + sval = sval[1 : len(sval)-1] + // An empty string would cause a array with one (empty) string + if len(sval) == 0 { + return []string{}, nil + } + return readAsCSV(sval) +} + +// GetStringArray return the []string value of a flag with the given name +func (f *FlagSet) GetStringArray(name string) ([]string, error) { + val, err := f.getFlagType(name, "stringArray", stringArrayConv) + if err != nil { + return []string{}, err + } + return val.([]string), nil +} + +// StringArrayVar defines a string flag with specified name, default value, and usage string. +// The argument p points to a []string variable in which to store the values of the multiple flags. +// The value of each argument will not try to be separated by comma. Use a StringSlice for that. +func (f *FlagSet) StringArrayVar(p *[]string, name string, value []string, usage string) { + f.VarP(newStringArrayValue(value, p), name, "", usage) +} + +// StringArrayVarP is like StringArrayVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) StringArrayVarP(p *[]string, name, shorthand string, value []string, usage string) { + f.VarP(newStringArrayValue(value, p), name, shorthand, usage) +} + +// StringArrayVar defines a string flag with specified name, default value, and usage string. +// The argument p points to a []string variable in which to store the value of the flag. +// The value of each argument will not try to be separated by comma. Use a StringSlice for that. +func StringArrayVar(p *[]string, name string, value []string, usage string) { + CommandLine.VarP(newStringArrayValue(value, p), name, "", usage) +} + +// StringArrayVarP is like StringArrayVar, but accepts a shorthand letter that can be used after a single dash. +func StringArrayVarP(p *[]string, name, shorthand string, value []string, usage string) { + CommandLine.VarP(newStringArrayValue(value, p), name, shorthand, usage) +} + +// StringArray defines a string flag with specified name, default value, and usage string. +// The return value is the address of a []string variable that stores the value of the flag. +// The value of each argument will not try to be separated by comma. Use a StringSlice for that. +func (f *FlagSet) StringArray(name string, value []string, usage string) *[]string { + p := []string{} + f.StringArrayVarP(&p, name, "", value, usage) + return &p +} + +// StringArrayP is like StringArray, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) StringArrayP(name, shorthand string, value []string, usage string) *[]string { + p := []string{} + f.StringArrayVarP(&p, name, shorthand, value, usage) + return &p +} + +// StringArray defines a string flag with specified name, default value, and usage string. +// The return value is the address of a []string variable that stores the value of the flag. +// The value of each argument will not try to be separated by comma. Use a StringSlice for that. +func StringArray(name string, value []string, usage string) *[]string { + return CommandLine.StringArrayP(name, "", value, usage) +} + +// StringArrayP is like StringArray, but accepts a shorthand letter that can be used after a single dash. +func StringArrayP(name, shorthand string, value []string, usage string) *[]string { + return CommandLine.StringArrayP(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/string_slice.go b/vendor/github.com/spf13/pflag/string_slice.go new file mode 100644 index 00000000..0cd3ccc0 --- /dev/null +++ b/vendor/github.com/spf13/pflag/string_slice.go @@ -0,0 +1,149 @@ +package pflag + +import ( + "bytes" + "encoding/csv" + "strings" +) + +// -- stringSlice Value +type stringSliceValue struct { + value *[]string + changed bool +} + +func newStringSliceValue(val []string, p *[]string) *stringSliceValue { + ssv := new(stringSliceValue) + ssv.value = p + *ssv.value = val + return ssv +} + +func readAsCSV(val string) ([]string, error) { + if val == "" { + return []string{}, nil + } + stringReader := strings.NewReader(val) + csvReader := csv.NewReader(stringReader) + return csvReader.Read() +} + +func writeAsCSV(vals []string) (string, error) { + b := &bytes.Buffer{} + w := csv.NewWriter(b) + err := w.Write(vals) + if err != nil { + return "", err + } + w.Flush() + return strings.TrimSuffix(b.String(), "\n"), nil +} + +func (s *stringSliceValue) Set(val string) error { + v, err := readAsCSV(val) + if err != nil { + return err + } + if !s.changed { + *s.value = v + } else { + *s.value = append(*s.value, v...) + } + s.changed = true + return nil +} + +func (s *stringSliceValue) Type() string { + return "stringSlice" +} + +func (s *stringSliceValue) String() string { + str, _ := writeAsCSV(*s.value) + return "[" + str + "]" +} + +func stringSliceConv(sval string) (interface{}, error) { + sval = sval[1 : len(sval)-1] + // An empty string would cause a slice with one (empty) string + if len(sval) == 0 { + return []string{}, nil + } + return readAsCSV(sval) +} + +// GetStringSlice return the []string value of a flag with the given name +func (f *FlagSet) GetStringSlice(name string) ([]string, error) { + val, err := f.getFlagType(name, "stringSlice", stringSliceConv) + if err != nil { + return []string{}, err + } + return val.([]string), nil +} + +// StringSliceVar defines a string flag with specified name, default value, and usage string. +// The argument p points to a []string variable in which to store the value of the flag. +// Compared to StringArray flags, StringSlice flags take comma-separated value as arguments and split them accordingly. +// For example: +// --ss="v1,v2" -ss="v3" +// will result in +// []string{"v1", "v2", "v3"} +func (f *FlagSet) StringSliceVar(p *[]string, name string, value []string, usage string) { + f.VarP(newStringSliceValue(value, p), name, "", usage) +} + +// StringSliceVarP is like StringSliceVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) StringSliceVarP(p *[]string, name, shorthand string, value []string, usage string) { + f.VarP(newStringSliceValue(value, p), name, shorthand, usage) +} + +// StringSliceVar defines a string flag with specified name, default value, and usage string. +// The argument p points to a []string variable in which to store the value of the flag. +// Compared to StringArray flags, StringSlice flags take comma-separated value as arguments and split them accordingly. +// For example: +// --ss="v1,v2" -ss="v3" +// will result in +// []string{"v1", "v2", "v3"} +func StringSliceVar(p *[]string, name string, value []string, usage string) { + CommandLine.VarP(newStringSliceValue(value, p), name, "", usage) +} + +// StringSliceVarP is like StringSliceVar, but accepts a shorthand letter that can be used after a single dash. +func StringSliceVarP(p *[]string, name, shorthand string, value []string, usage string) { + CommandLine.VarP(newStringSliceValue(value, p), name, shorthand, usage) +} + +// StringSlice defines a string flag with specified name, default value, and usage string. +// The return value is the address of a []string variable that stores the value of the flag. +// Compared to StringArray flags, StringSlice flags take comma-separated value as arguments and split them accordingly. +// For example: +// --ss="v1,v2" -ss="v3" +// will result in +// []string{"v1", "v2", "v3"} +func (f *FlagSet) StringSlice(name string, value []string, usage string) *[]string { + p := []string{} + f.StringSliceVarP(&p, name, "", value, usage) + return &p +} + +// StringSliceP is like StringSlice, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) StringSliceP(name, shorthand string, value []string, usage string) *[]string { + p := []string{} + f.StringSliceVarP(&p, name, shorthand, value, usage) + return &p +} + +// StringSlice defines a string flag with specified name, default value, and usage string. +// The return value is the address of a []string variable that stores the value of the flag. +// Compared to StringArray flags, StringSlice flags take comma-separated value as arguments and split them accordingly. +// For example: +// --ss="v1,v2" -ss="v3" +// will result in +// []string{"v1", "v2", "v3"} +func StringSlice(name string, value []string, usage string) *[]string { + return CommandLine.StringSliceP(name, "", value, usage) +} + +// StringSliceP is like StringSlice, but accepts a shorthand letter that can be used after a single dash. +func StringSliceP(name, shorthand string, value []string, usage string) *[]string { + return CommandLine.StringSliceP(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/uint.go b/vendor/github.com/spf13/pflag/uint.go new file mode 100644 index 00000000..dcbc2b75 --- /dev/null +++ b/vendor/github.com/spf13/pflag/uint.go @@ -0,0 +1,88 @@ +package pflag + +import "strconv" + +// -- uint Value +type uintValue uint + +func newUintValue(val uint, p *uint) *uintValue { + *p = val + return (*uintValue)(p) +} + +func (i *uintValue) Set(s string) error { + v, err := strconv.ParseUint(s, 0, 64) + *i = uintValue(v) + return err +} + +func (i *uintValue) Type() string { + return "uint" +} + +func (i *uintValue) String() string { return strconv.FormatUint(uint64(*i), 10) } + +func uintConv(sval string) (interface{}, error) { + v, err := strconv.ParseUint(sval, 0, 0) + if err != nil { + return 0, err + } + return uint(v), nil +} + +// GetUint return the uint value of a flag with the given name +func (f *FlagSet) GetUint(name string) (uint, error) { + val, err := f.getFlagType(name, "uint", uintConv) + if err != nil { + return 0, err + } + return val.(uint), nil +} + +// UintVar defines a uint flag with specified name, default value, and usage string. +// The argument p points to a uint variable in which to store the value of the flag. +func (f *FlagSet) UintVar(p *uint, name string, value uint, usage string) { + f.VarP(newUintValue(value, p), name, "", usage) +} + +// UintVarP is like UintVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) UintVarP(p *uint, name, shorthand string, value uint, usage string) { + f.VarP(newUintValue(value, p), name, shorthand, usage) +} + +// UintVar defines a uint flag with specified name, default value, and usage string. +// The argument p points to a uint variable in which to store the value of the flag. +func UintVar(p *uint, name string, value uint, usage string) { + CommandLine.VarP(newUintValue(value, p), name, "", usage) +} + +// UintVarP is like UintVar, but accepts a shorthand letter that can be used after a single dash. +func UintVarP(p *uint, name, shorthand string, value uint, usage string) { + CommandLine.VarP(newUintValue(value, p), name, shorthand, usage) +} + +// Uint defines a uint flag with specified name, default value, and usage string. +// The return value is the address of a uint variable that stores the value of the flag. +func (f *FlagSet) Uint(name string, value uint, usage string) *uint { + p := new(uint) + f.UintVarP(p, name, "", value, usage) + return p +} + +// UintP is like Uint, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) UintP(name, shorthand string, value uint, usage string) *uint { + p := new(uint) + f.UintVarP(p, name, shorthand, value, usage) + return p +} + +// Uint defines a uint flag with specified name, default value, and usage string. +// The return value is the address of a uint variable that stores the value of the flag. +func Uint(name string, value uint, usage string) *uint { + return CommandLine.UintP(name, "", value, usage) +} + +// UintP is like Uint, but accepts a shorthand letter that can be used after a single dash. +func UintP(name, shorthand string, value uint, usage string) *uint { + return CommandLine.UintP(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/uint16.go b/vendor/github.com/spf13/pflag/uint16.go new file mode 100644 index 00000000..7e9914ed --- /dev/null +++ b/vendor/github.com/spf13/pflag/uint16.go @@ -0,0 +1,88 @@ +package pflag + +import "strconv" + +// -- uint16 value +type uint16Value uint16 + +func newUint16Value(val uint16, p *uint16) *uint16Value { + *p = val + return (*uint16Value)(p) +} + +func (i *uint16Value) Set(s string) error { + v, err := strconv.ParseUint(s, 0, 16) + *i = uint16Value(v) + return err +} + +func (i *uint16Value) Type() string { + return "uint16" +} + +func (i *uint16Value) String() string { return strconv.FormatUint(uint64(*i), 10) } + +func uint16Conv(sval string) (interface{}, error) { + v, err := strconv.ParseUint(sval, 0, 16) + if err != nil { + return 0, err + } + return uint16(v), nil +} + +// GetUint16 return the uint16 value of a flag with the given name +func (f *FlagSet) GetUint16(name string) (uint16, error) { + val, err := f.getFlagType(name, "uint16", uint16Conv) + if err != nil { + return 0, err + } + return val.(uint16), nil +} + +// Uint16Var defines a uint flag with specified name, default value, and usage string. +// The argument p points to a uint variable in which to store the value of the flag. +func (f *FlagSet) Uint16Var(p *uint16, name string, value uint16, usage string) { + f.VarP(newUint16Value(value, p), name, "", usage) +} + +// Uint16VarP is like Uint16Var, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Uint16VarP(p *uint16, name, shorthand string, value uint16, usage string) { + f.VarP(newUint16Value(value, p), name, shorthand, usage) +} + +// Uint16Var defines a uint flag with specified name, default value, and usage string. +// The argument p points to a uint variable in which to store the value of the flag. +func Uint16Var(p *uint16, name string, value uint16, usage string) { + CommandLine.VarP(newUint16Value(value, p), name, "", usage) +} + +// Uint16VarP is like Uint16Var, but accepts a shorthand letter that can be used after a single dash. +func Uint16VarP(p *uint16, name, shorthand string, value uint16, usage string) { + CommandLine.VarP(newUint16Value(value, p), name, shorthand, usage) +} + +// Uint16 defines a uint flag with specified name, default value, and usage string. +// The return value is the address of a uint variable that stores the value of the flag. +func (f *FlagSet) Uint16(name string, value uint16, usage string) *uint16 { + p := new(uint16) + f.Uint16VarP(p, name, "", value, usage) + return p +} + +// Uint16P is like Uint16, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Uint16P(name, shorthand string, value uint16, usage string) *uint16 { + p := new(uint16) + f.Uint16VarP(p, name, shorthand, value, usage) + return p +} + +// Uint16 defines a uint flag with specified name, default value, and usage string. +// The return value is the address of a uint variable that stores the value of the flag. +func Uint16(name string, value uint16, usage string) *uint16 { + return CommandLine.Uint16P(name, "", value, usage) +} + +// Uint16P is like Uint16, but accepts a shorthand letter that can be used after a single dash. +func Uint16P(name, shorthand string, value uint16, usage string) *uint16 { + return CommandLine.Uint16P(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/uint32.go b/vendor/github.com/spf13/pflag/uint32.go new file mode 100644 index 00000000..d8024539 --- /dev/null +++ b/vendor/github.com/spf13/pflag/uint32.go @@ -0,0 +1,88 @@ +package pflag + +import "strconv" + +// -- uint32 value +type uint32Value uint32 + +func newUint32Value(val uint32, p *uint32) *uint32Value { + *p = val + return (*uint32Value)(p) +} + +func (i *uint32Value) Set(s string) error { + v, err := strconv.ParseUint(s, 0, 32) + *i = uint32Value(v) + return err +} + +func (i *uint32Value) Type() string { + return "uint32" +} + +func (i *uint32Value) String() string { return strconv.FormatUint(uint64(*i), 10) } + +func uint32Conv(sval string) (interface{}, error) { + v, err := strconv.ParseUint(sval, 0, 32) + if err != nil { + return 0, err + } + return uint32(v), nil +} + +// GetUint32 return the uint32 value of a flag with the given name +func (f *FlagSet) GetUint32(name string) (uint32, error) { + val, err := f.getFlagType(name, "uint32", uint32Conv) + if err != nil { + return 0, err + } + return val.(uint32), nil +} + +// Uint32Var defines a uint32 flag with specified name, default value, and usage string. +// The argument p points to a uint32 variable in which to store the value of the flag. +func (f *FlagSet) Uint32Var(p *uint32, name string, value uint32, usage string) { + f.VarP(newUint32Value(value, p), name, "", usage) +} + +// Uint32VarP is like Uint32Var, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Uint32VarP(p *uint32, name, shorthand string, value uint32, usage string) { + f.VarP(newUint32Value(value, p), name, shorthand, usage) +} + +// Uint32Var defines a uint32 flag with specified name, default value, and usage string. +// The argument p points to a uint32 variable in which to store the value of the flag. +func Uint32Var(p *uint32, name string, value uint32, usage string) { + CommandLine.VarP(newUint32Value(value, p), name, "", usage) +} + +// Uint32VarP is like Uint32Var, but accepts a shorthand letter that can be used after a single dash. +func Uint32VarP(p *uint32, name, shorthand string, value uint32, usage string) { + CommandLine.VarP(newUint32Value(value, p), name, shorthand, usage) +} + +// Uint32 defines a uint32 flag with specified name, default value, and usage string. +// The return value is the address of a uint32 variable that stores the value of the flag. +func (f *FlagSet) Uint32(name string, value uint32, usage string) *uint32 { + p := new(uint32) + f.Uint32VarP(p, name, "", value, usage) + return p +} + +// Uint32P is like Uint32, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Uint32P(name, shorthand string, value uint32, usage string) *uint32 { + p := new(uint32) + f.Uint32VarP(p, name, shorthand, value, usage) + return p +} + +// Uint32 defines a uint32 flag with specified name, default value, and usage string. +// The return value is the address of a uint32 variable that stores the value of the flag. +func Uint32(name string, value uint32, usage string) *uint32 { + return CommandLine.Uint32P(name, "", value, usage) +} + +// Uint32P is like Uint32, but accepts a shorthand letter that can be used after a single dash. +func Uint32P(name, shorthand string, value uint32, usage string) *uint32 { + return CommandLine.Uint32P(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/uint64.go b/vendor/github.com/spf13/pflag/uint64.go new file mode 100644 index 00000000..f62240f2 --- /dev/null +++ b/vendor/github.com/spf13/pflag/uint64.go @@ -0,0 +1,88 @@ +package pflag + +import "strconv" + +// -- uint64 Value +type uint64Value uint64 + +func newUint64Value(val uint64, p *uint64) *uint64Value { + *p = val + return (*uint64Value)(p) +} + +func (i *uint64Value) Set(s string) error { + v, err := strconv.ParseUint(s, 0, 64) + *i = uint64Value(v) + return err +} + +func (i *uint64Value) Type() string { + return "uint64" +} + +func (i *uint64Value) String() string { return strconv.FormatUint(uint64(*i), 10) } + +func uint64Conv(sval string) (interface{}, error) { + v, err := strconv.ParseUint(sval, 0, 64) + if err != nil { + return 0, err + } + return uint64(v), nil +} + +// GetUint64 return the uint64 value of a flag with the given name +func (f *FlagSet) GetUint64(name string) (uint64, error) { + val, err := f.getFlagType(name, "uint64", uint64Conv) + if err != nil { + return 0, err + } + return val.(uint64), nil +} + +// Uint64Var defines a uint64 flag with specified name, default value, and usage string. +// The argument p points to a uint64 variable in which to store the value of the flag. +func (f *FlagSet) Uint64Var(p *uint64, name string, value uint64, usage string) { + f.VarP(newUint64Value(value, p), name, "", usage) +} + +// Uint64VarP is like Uint64Var, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Uint64VarP(p *uint64, name, shorthand string, value uint64, usage string) { + f.VarP(newUint64Value(value, p), name, shorthand, usage) +} + +// Uint64Var defines a uint64 flag with specified name, default value, and usage string. +// The argument p points to a uint64 variable in which to store the value of the flag. +func Uint64Var(p *uint64, name string, value uint64, usage string) { + CommandLine.VarP(newUint64Value(value, p), name, "", usage) +} + +// Uint64VarP is like Uint64Var, but accepts a shorthand letter that can be used after a single dash. +func Uint64VarP(p *uint64, name, shorthand string, value uint64, usage string) { + CommandLine.VarP(newUint64Value(value, p), name, shorthand, usage) +} + +// Uint64 defines a uint64 flag with specified name, default value, and usage string. +// The return value is the address of a uint64 variable that stores the value of the flag. +func (f *FlagSet) Uint64(name string, value uint64, usage string) *uint64 { + p := new(uint64) + f.Uint64VarP(p, name, "", value, usage) + return p +} + +// Uint64P is like Uint64, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Uint64P(name, shorthand string, value uint64, usage string) *uint64 { + p := new(uint64) + f.Uint64VarP(p, name, shorthand, value, usage) + return p +} + +// Uint64 defines a uint64 flag with specified name, default value, and usage string. +// The return value is the address of a uint64 variable that stores the value of the flag. +func Uint64(name string, value uint64, usage string) *uint64 { + return CommandLine.Uint64P(name, "", value, usage) +} + +// Uint64P is like Uint64, but accepts a shorthand letter that can be used after a single dash. +func Uint64P(name, shorthand string, value uint64, usage string) *uint64 { + return CommandLine.Uint64P(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/uint8.go b/vendor/github.com/spf13/pflag/uint8.go new file mode 100644 index 00000000..bb0e83c1 --- /dev/null +++ b/vendor/github.com/spf13/pflag/uint8.go @@ -0,0 +1,88 @@ +package pflag + +import "strconv" + +// -- uint8 Value +type uint8Value uint8 + +func newUint8Value(val uint8, p *uint8) *uint8Value { + *p = val + return (*uint8Value)(p) +} + +func (i *uint8Value) Set(s string) error { + v, err := strconv.ParseUint(s, 0, 8) + *i = uint8Value(v) + return err +} + +func (i *uint8Value) Type() string { + return "uint8" +} + +func (i *uint8Value) String() string { return strconv.FormatUint(uint64(*i), 10) } + +func uint8Conv(sval string) (interface{}, error) { + v, err := strconv.ParseUint(sval, 0, 8) + if err != nil { + return 0, err + } + return uint8(v), nil +} + +// GetUint8 return the uint8 value of a flag with the given name +func (f *FlagSet) GetUint8(name string) (uint8, error) { + val, err := f.getFlagType(name, "uint8", uint8Conv) + if err != nil { + return 0, err + } + return val.(uint8), nil +} + +// Uint8Var defines a uint8 flag with specified name, default value, and usage string. +// The argument p points to a uint8 variable in which to store the value of the flag. +func (f *FlagSet) Uint8Var(p *uint8, name string, value uint8, usage string) { + f.VarP(newUint8Value(value, p), name, "", usage) +} + +// Uint8VarP is like Uint8Var, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Uint8VarP(p *uint8, name, shorthand string, value uint8, usage string) { + f.VarP(newUint8Value(value, p), name, shorthand, usage) +} + +// Uint8Var defines a uint8 flag with specified name, default value, and usage string. +// The argument p points to a uint8 variable in which to store the value of the flag. +func Uint8Var(p *uint8, name string, value uint8, usage string) { + CommandLine.VarP(newUint8Value(value, p), name, "", usage) +} + +// Uint8VarP is like Uint8Var, but accepts a shorthand letter that can be used after a single dash. +func Uint8VarP(p *uint8, name, shorthand string, value uint8, usage string) { + CommandLine.VarP(newUint8Value(value, p), name, shorthand, usage) +} + +// Uint8 defines a uint8 flag with specified name, default value, and usage string. +// The return value is the address of a uint8 variable that stores the value of the flag. +func (f *FlagSet) Uint8(name string, value uint8, usage string) *uint8 { + p := new(uint8) + f.Uint8VarP(p, name, "", value, usage) + return p +} + +// Uint8P is like Uint8, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Uint8P(name, shorthand string, value uint8, usage string) *uint8 { + p := new(uint8) + f.Uint8VarP(p, name, shorthand, value, usage) + return p +} + +// Uint8 defines a uint8 flag with specified name, default value, and usage string. +// The return value is the address of a uint8 variable that stores the value of the flag. +func Uint8(name string, value uint8, usage string) *uint8 { + return CommandLine.Uint8P(name, "", value, usage) +} + +// Uint8P is like Uint8, but accepts a shorthand letter that can be used after a single dash. +func Uint8P(name, shorthand string, value uint8, usage string) *uint8 { + return CommandLine.Uint8P(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/uint_slice.go b/vendor/github.com/spf13/pflag/uint_slice.go new file mode 100644 index 00000000..edd94c60 --- /dev/null +++ b/vendor/github.com/spf13/pflag/uint_slice.go @@ -0,0 +1,126 @@ +package pflag + +import ( + "fmt" + "strconv" + "strings" +) + +// -- uintSlice Value +type uintSliceValue struct { + value *[]uint + changed bool +} + +func newUintSliceValue(val []uint, p *[]uint) *uintSliceValue { + uisv := new(uintSliceValue) + uisv.value = p + *uisv.value = val + return uisv +} + +func (s *uintSliceValue) Set(val string) error { + ss := strings.Split(val, ",") + out := make([]uint, len(ss)) + for i, d := range ss { + u, err := strconv.ParseUint(d, 10, 0) + if err != nil { + return err + } + out[i] = uint(u) + } + if !s.changed { + *s.value = out + } else { + *s.value = append(*s.value, out...) + } + s.changed = true + return nil +} + +func (s *uintSliceValue) Type() string { + return "uintSlice" +} + +func (s *uintSliceValue) String() string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = fmt.Sprintf("%d", d) + } + return "[" + strings.Join(out, ",") + "]" +} + +func uintSliceConv(val string) (interface{}, error) { + val = strings.Trim(val, "[]") + // Empty string would cause a slice with one (empty) entry + if len(val) == 0 { + return []uint{}, nil + } + ss := strings.Split(val, ",") + out := make([]uint, len(ss)) + for i, d := range ss { + u, err := strconv.ParseUint(d, 10, 0) + if err != nil { + return nil, err + } + out[i] = uint(u) + } + return out, nil +} + +// GetUintSlice returns the []uint value of a flag with the given name. +func (f *FlagSet) GetUintSlice(name string) ([]uint, error) { + val, err := f.getFlagType(name, "uintSlice", uintSliceConv) + if err != nil { + return []uint{}, err + } + return val.([]uint), nil +} + +// UintSliceVar defines a uintSlice flag with specified name, default value, and usage string. +// The argument p points to a []uint variable in which to store the value of the flag. +func (f *FlagSet) UintSliceVar(p *[]uint, name string, value []uint, usage string) { + f.VarP(newUintSliceValue(value, p), name, "", usage) +} + +// UintSliceVarP is like UintSliceVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) UintSliceVarP(p *[]uint, name, shorthand string, value []uint, usage string) { + f.VarP(newUintSliceValue(value, p), name, shorthand, usage) +} + +// UintSliceVar defines a uint[] flag with specified name, default value, and usage string. +// The argument p points to a uint[] variable in which to store the value of the flag. +func UintSliceVar(p *[]uint, name string, value []uint, usage string) { + CommandLine.VarP(newUintSliceValue(value, p), name, "", usage) +} + +// UintSliceVarP is like the UintSliceVar, but accepts a shorthand letter that can be used after a single dash. +func UintSliceVarP(p *[]uint, name, shorthand string, value []uint, usage string) { + CommandLine.VarP(newUintSliceValue(value, p), name, shorthand, usage) +} + +// UintSlice defines a []uint flag with specified name, default value, and usage string. +// The return value is the address of a []uint variable that stores the value of the flag. +func (f *FlagSet) UintSlice(name string, value []uint, usage string) *[]uint { + p := []uint{} + f.UintSliceVarP(&p, name, "", value, usage) + return &p +} + +// UintSliceP is like UintSlice, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) UintSliceP(name, shorthand string, value []uint, usage string) *[]uint { + p := []uint{} + f.UintSliceVarP(&p, name, shorthand, value, usage) + return &p +} + +// UintSlice defines a []uint flag with specified name, default value, and usage string. +// The return value is the address of a []uint variable that stores the value of the flag. +func UintSlice(name string, value []uint, usage string) *[]uint { + return CommandLine.UintSliceP(name, "", value, usage) +} + +// UintSliceP is like UintSlice, but accepts a shorthand letter that can be used after a single dash. +func UintSliceP(name, shorthand string, value []uint, usage string) *[]uint { + return CommandLine.UintSliceP(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/viper/LICENSE b/vendor/github.com/spf13/viper/LICENSE new file mode 100644 index 00000000..4527efb9 --- /dev/null +++ b/vendor/github.com/spf13/viper/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Steve Francia + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/spf13/viper/flags.go b/vendor/github.com/spf13/viper/flags.go new file mode 100644 index 00000000..dd32f4e1 --- /dev/null +++ b/vendor/github.com/spf13/viper/flags.go @@ -0,0 +1,57 @@ +package viper + +import "github.com/spf13/pflag" + +// FlagValueSet is an interface that users can implement +// to bind a set of flags to viper. +type FlagValueSet interface { + VisitAll(fn func(FlagValue)) +} + +// FlagValue is an interface that users can implement +// to bind different flags to viper. +type FlagValue interface { + HasChanged() bool + Name() string + ValueString() string + ValueType() string +} + +// pflagValueSet is a wrapper around *pflag.ValueSet +// that implements FlagValueSet. +type pflagValueSet struct { + flags *pflag.FlagSet +} + +// VisitAll iterates over all *pflag.Flag inside the *pflag.FlagSet. +func (p pflagValueSet) VisitAll(fn func(flag FlagValue)) { + p.flags.VisitAll(func(flag *pflag.Flag) { + fn(pflagValue{flag}) + }) +} + +// pflagValue is a wrapper aroung *pflag.flag +// that implements FlagValue +type pflagValue struct { + flag *pflag.Flag +} + +// HasChanges returns whether the flag has changes or not. +func (p pflagValue) HasChanged() bool { + return p.flag.Changed +} + +// Name returns the name of the flag. +func (p pflagValue) Name() string { + return p.flag.Name +} + +// ValueString returns the value of the flag as a string. +func (p pflagValue) ValueString() string { + return p.flag.Value.String() +} + +// ValueType returns the type of the flag as a string. +func (p pflagValue) ValueType() string { + return p.flag.Value.Type() +} diff --git a/vendor/github.com/spf13/viper/remote/remote.go b/vendor/github.com/spf13/viper/remote/remote.go new file mode 100644 index 00000000..810d0702 --- /dev/null +++ b/vendor/github.com/spf13/viper/remote/remote.go @@ -0,0 +1,105 @@ +// Copyright © 2015 Steve Francia . +// +// Use of this source code is governed by an MIT-style +// license that can be found in the LICENSE file. + +// Package remote integrates the remote features of Viper. +package remote + +import ( + "bytes" + "io" + "os" + + "github.com/spf13/viper" + crypt "github.com/xordataexchange/crypt/config" +) + +type remoteConfigProvider struct{} + +func (rc remoteConfigProvider) Get(rp viper.RemoteProvider) (io.Reader, error) { + cm, err := getConfigManager(rp) + if err != nil { + return nil, err + } + b, err := cm.Get(rp.Path()) + if err != nil { + return nil, err + } + return bytes.NewReader(b), nil +} + +func (rc remoteConfigProvider) Watch(rp viper.RemoteProvider) (io.Reader, error) { + cm, err := getConfigManager(rp) + if err != nil { + return nil, err + } + resp, err := cm.Get(rp.Path()) + if err != nil { + return nil, err + } + + return bytes.NewReader(resp), nil +} + +func (rc remoteConfigProvider) WatchChannel(rp viper.RemoteProvider) (<-chan *viper.RemoteResponse, chan bool) { + cm, err := getConfigManager(rp) + if err != nil { + return nil, nil + } + quit := make(chan bool) + quitwc := make(chan bool) + viperResponsCh := make(chan *viper.RemoteResponse) + cryptoResponseCh := cm.Watch(rp.Path(), quit) + // need this function to convert the Channel response form crypt.Response to viper.Response + go func(cr <-chan *crypt.Response, vr chan<- *viper.RemoteResponse, quitwc <-chan bool, quit chan<- bool) { + for { + select { + case <-quitwc: + quit <- true + return + case resp := <-cr: + vr <- &viper.RemoteResponse{ + Error: resp.Error, + Value: resp.Value, + } + + } + + } + }(cryptoResponseCh, viperResponsCh, quitwc, quit) + + return viperResponsCh, quitwc +} + +func getConfigManager(rp viper.RemoteProvider) (crypt.ConfigManager, error) { + var cm crypt.ConfigManager + var err error + + if rp.SecretKeyring() != "" { + kr, err := os.Open(rp.SecretKeyring()) + defer kr.Close() + if err != nil { + return nil, err + } + if rp.Provider() == "etcd" { + cm, err = crypt.NewEtcdConfigManager([]string{rp.Endpoint()}, kr) + } else { + cm, err = crypt.NewConsulConfigManager([]string{rp.Endpoint()}, kr) + } + } else { + if rp.Provider() == "etcd" { + cm, err = crypt.NewStandardEtcdConfigManager([]string{rp.Endpoint()}) + } else { + cm, err = crypt.NewStandardConsulConfigManager([]string{rp.Endpoint()}) + } + } + if err != nil { + return nil, err + } + return cm, nil +} + +func init() { + viper.RemoteConfig = &remoteConfigProvider{} +} diff --git a/vendor/github.com/spf13/viper/util.go b/vendor/github.com/spf13/viper/util.go new file mode 100644 index 00000000..952cad44 --- /dev/null +++ b/vendor/github.com/spf13/viper/util.go @@ -0,0 +1,221 @@ +// Copyright © 2014 Steve Francia . +// +// Use of this source code is governed by an MIT-style +// license that can be found in the LICENSE file. + +// Viper is a application configuration system. +// It believes that applications can be configured a variety of ways +// via flags, ENVIRONMENT variables, configuration files retrieved +// from the file system, or a remote key/value store. + +package viper + +import ( + "fmt" + "os" + "path/filepath" + "runtime" + "strings" + "unicode" + + "github.com/spf13/afero" + "github.com/spf13/cast" + jww "github.com/spf13/jwalterweatherman" +) + +// ConfigParseError denotes failing to parse configuration file. +type ConfigParseError struct { + err error +} + +// Error returns the formatted configuration error. +func (pe ConfigParseError) Error() string { + return fmt.Sprintf("While parsing config: %s", pe.err.Error()) +} + +// toCaseInsensitiveValue checks if the value is a map; +// if so, create a copy and lower-case the keys recursively. +func toCaseInsensitiveValue(value interface{}) interface{} { + switch v := value.(type) { + case map[interface{}]interface{}: + value = copyAndInsensitiviseMap(cast.ToStringMap(v)) + case map[string]interface{}: + value = copyAndInsensitiviseMap(v) + } + + return value +} + +// copyAndInsensitiviseMap behaves like insensitiviseMap, but creates a copy of +// any map it makes case insensitive. +func copyAndInsensitiviseMap(m map[string]interface{}) map[string]interface{} { + nm := make(map[string]interface{}) + + for key, val := range m { + lkey := strings.ToLower(key) + switch v := val.(type) { + case map[interface{}]interface{}: + nm[lkey] = copyAndInsensitiviseMap(cast.ToStringMap(v)) + case map[string]interface{}: + nm[lkey] = copyAndInsensitiviseMap(v) + default: + nm[lkey] = v + } + } + + return nm +} + +func insensitiviseMap(m map[string]interface{}) { + for key, val := range m { + switch val.(type) { + case map[interface{}]interface{}: + // nested map: cast and recursively insensitivise + val = cast.ToStringMap(val) + insensitiviseMap(val.(map[string]interface{})) + case map[string]interface{}: + // nested map: recursively insensitivise + insensitiviseMap(val.(map[string]interface{})) + } + + lower := strings.ToLower(key) + if key != lower { + // remove old key (not lower-cased) + delete(m, key) + } + // update map + m[lower] = val + } +} + +func absPathify(inPath string) string { + jww.INFO.Println("Trying to resolve absolute path to", inPath) + + if strings.HasPrefix(inPath, "$HOME") { + inPath = userHomeDir() + inPath[5:] + } + + if strings.HasPrefix(inPath, "$") { + end := strings.Index(inPath, string(os.PathSeparator)) + inPath = os.Getenv(inPath[1:end]) + inPath[end:] + } + + if filepath.IsAbs(inPath) { + return filepath.Clean(inPath) + } + + p, err := filepath.Abs(inPath) + if err == nil { + return filepath.Clean(p) + } + + jww.ERROR.Println("Couldn't discover absolute path") + jww.ERROR.Println(err) + return "" +} + +// Check if File / Directory Exists +func exists(fs afero.Fs, path string) (bool, error) { + _, err := fs.Stat(path) + if err == nil { + return true, nil + } + if os.IsNotExist(err) { + return false, nil + } + return false, err +} + +func stringInSlice(a string, list []string) bool { + for _, b := range list { + if b == a { + return true + } + } + return false +} + +func userHomeDir() string { + if runtime.GOOS == "windows" { + home := os.Getenv("HOMEDRIVE") + os.Getenv("HOMEPATH") + if home == "" { + home = os.Getenv("USERPROFILE") + } + return home + } + return os.Getenv("HOME") +} + +func safeMul(a, b uint) uint { + c := a * b + if a > 1 && b > 1 && c/b != a { + return 0 + } + return c +} + +// parseSizeInBytes converts strings like 1GB or 12 mb into an unsigned integer number of bytes +func parseSizeInBytes(sizeStr string) uint { + sizeStr = strings.TrimSpace(sizeStr) + lastChar := len(sizeStr) - 1 + multiplier := uint(1) + + if lastChar > 0 { + if sizeStr[lastChar] == 'b' || sizeStr[lastChar] == 'B' { + if lastChar > 1 { + switch unicode.ToLower(rune(sizeStr[lastChar-1])) { + case 'k': + multiplier = 1 << 10 + sizeStr = strings.TrimSpace(sizeStr[:lastChar-1]) + case 'm': + multiplier = 1 << 20 + sizeStr = strings.TrimSpace(sizeStr[:lastChar-1]) + case 'g': + multiplier = 1 << 30 + sizeStr = strings.TrimSpace(sizeStr[:lastChar-1]) + default: + multiplier = 1 + sizeStr = strings.TrimSpace(sizeStr[:lastChar]) + } + } + } + } + + size := cast.ToInt(sizeStr) + if size < 0 { + size = 0 + } + + return safeMul(uint(size), multiplier) +} + +// deepSearch scans deep maps, following the key indexes listed in the +// sequence "path". +// The last value is expected to be another map, and is returned. +// +// In case intermediate keys do not exist, or map to a non-map value, +// a new map is created and inserted, and the search continues from there: +// the initial map "m" may be modified! +func deepSearch(m map[string]interface{}, path []string) map[string]interface{} { + for _, k := range path { + m2, ok := m[k] + if !ok { + // intermediate key does not exist + // => create it and continue from there + m3 := make(map[string]interface{}) + m[k] = m3 + m = m3 + continue + } + m3, ok := m2.(map[string]interface{}) + if !ok { + // intermediate key is a value + // => replace with a new map + m3 = make(map[string]interface{}) + m[k] = m3 + } + // continue search from here + m = m3 + } + return m +} diff --git a/vendor/github.com/spf13/viper/viper.go b/vendor/github.com/spf13/viper/viper.go new file mode 100644 index 00000000..ad8a0372 --- /dev/null +++ b/vendor/github.com/spf13/viper/viper.go @@ -0,0 +1,1775 @@ +// Copyright © 2014 Steve Francia . +// +// Use of this source code is governed by an MIT-style +// license that can be found in the LICENSE file. + +// Viper is a application configuration system. +// It believes that applications can be configured a variety of ways +// via flags, ENVIRONMENT variables, configuration files retrieved +// from the file system, or a remote key/value store. + +// Each item takes precedence over the item below it: + +// overrides +// flag +// env +// config +// key/value store +// default + +package viper + +import ( + "bytes" + "encoding/csv" + "encoding/json" + "fmt" + "io" + "log" + "os" + "path/filepath" + "reflect" + "strings" + "time" + + yaml "gopkg.in/yaml.v2" + + "github.com/fsnotify/fsnotify" + "github.com/hashicorp/hcl" + "github.com/hashicorp/hcl/hcl/printer" + "github.com/magiconair/properties" + "github.com/mitchellh/mapstructure" + toml "github.com/pelletier/go-toml" + "github.com/spf13/afero" + "github.com/spf13/cast" + jww "github.com/spf13/jwalterweatherman" + "github.com/spf13/pflag" +) + +// ConfigMarshalError happens when failing to marshal the configuration. +type ConfigMarshalError struct { + err error +} + +// Error returns the formatted configuration error. +func (e ConfigMarshalError) Error() string { + return fmt.Sprintf("While marshaling config: %s", e.err.Error()) +} + +var v *Viper + +type RemoteResponse struct { + Value []byte + Error error +} + +func init() { + v = New() +} + +type remoteConfigFactory interface { + Get(rp RemoteProvider) (io.Reader, error) + Watch(rp RemoteProvider) (io.Reader, error) + WatchChannel(rp RemoteProvider) (<-chan *RemoteResponse, chan bool) +} + +// RemoteConfig is optional, see the remote package +var RemoteConfig remoteConfigFactory + +// UnsupportedConfigError denotes encountering an unsupported +// configuration filetype. +type UnsupportedConfigError string + +// Error returns the formatted configuration error. +func (str UnsupportedConfigError) Error() string { + return fmt.Sprintf("Unsupported Config Type %q", string(str)) +} + +// UnsupportedRemoteProviderError denotes encountering an unsupported remote +// provider. Currently only etcd and Consul are supported. +type UnsupportedRemoteProviderError string + +// Error returns the formatted remote provider error. +func (str UnsupportedRemoteProviderError) Error() string { + return fmt.Sprintf("Unsupported Remote Provider Type %q", string(str)) +} + +// RemoteConfigError denotes encountering an error while trying to +// pull the configuration from the remote provider. +type RemoteConfigError string + +// Error returns the formatted remote provider error +func (rce RemoteConfigError) Error() string { + return fmt.Sprintf("Remote Configurations Error: %s", string(rce)) +} + +// ConfigFileNotFoundError denotes failing to find configuration file. +type ConfigFileNotFoundError struct { + name, locations string +} + +// Error returns the formatted configuration error. +func (fnfe ConfigFileNotFoundError) Error() string { + return fmt.Sprintf("Config File %q Not Found in %q", fnfe.name, fnfe.locations) +} + +// Viper is a prioritized configuration registry. It +// maintains a set of configuration sources, fetches +// values to populate those, and provides them according +// to the source's priority. +// The priority of the sources is the following: +// 1. overrides +// 2. flags +// 3. env. variables +// 4. config file +// 5. key/value store +// 6. defaults +// +// For example, if values from the following sources were loaded: +// +// Defaults : { +// "secret": "", +// "user": "default", +// "endpoint": "https://localhost" +// } +// Config : { +// "user": "root" +// "secret": "defaultsecret" +// } +// Env : { +// "secret": "somesecretkey" +// } +// +// The resulting config will have the following values: +// +// { +// "secret": "somesecretkey", +// "user": "root", +// "endpoint": "https://localhost" +// } +type Viper struct { + // Delimiter that separates a list of keys + // used to access a nested value in one go + keyDelim string + + // A set of paths to look for the config file in + configPaths []string + + // The filesystem to read config from. + fs afero.Fs + + // A set of remote providers to search for the configuration + remoteProviders []*defaultRemoteProvider + + // Name of file to look for inside the path + configName string + configFile string + configType string + envPrefix string + + automaticEnvApplied bool + envKeyReplacer *strings.Replacer + + config map[string]interface{} + override map[string]interface{} + defaults map[string]interface{} + kvstore map[string]interface{} + pflags map[string]FlagValue + env map[string]string + aliases map[string]string + typeByDefValue bool + + // Store read properties on the object so that we can write back in order with comments. + // This will only be used if the configuration read is a properties file. + properties *properties.Properties + + onConfigChange func(fsnotify.Event) +} + +// New returns an initialized Viper instance. +func New() *Viper { + v := new(Viper) + v.keyDelim = "." + v.configName = "config" + v.fs = afero.NewOsFs() + v.config = make(map[string]interface{}) + v.override = make(map[string]interface{}) + v.defaults = make(map[string]interface{}) + v.kvstore = make(map[string]interface{}) + v.pflags = make(map[string]FlagValue) + v.env = make(map[string]string) + v.aliases = make(map[string]string) + v.typeByDefValue = false + + return v +} + +// Intended for testing, will reset all to default settings. +// In the public interface for the viper package so applications +// can use it in their testing as well. +func Reset() { + v = New() + SupportedExts = []string{"json", "toml", "yaml", "yml", "properties", "props", "prop", "hcl"} + SupportedRemoteProviders = []string{"etcd", "consul"} +} + +type defaultRemoteProvider struct { + provider string + endpoint string + path string + secretKeyring string +} + +func (rp defaultRemoteProvider) Provider() string { + return rp.provider +} + +func (rp defaultRemoteProvider) Endpoint() string { + return rp.endpoint +} + +func (rp defaultRemoteProvider) Path() string { + return rp.path +} + +func (rp defaultRemoteProvider) SecretKeyring() string { + return rp.secretKeyring +} + +// RemoteProvider stores the configuration necessary +// to connect to a remote key/value store. +// Optional secretKeyring to unencrypt encrypted values +// can be provided. +type RemoteProvider interface { + Provider() string + Endpoint() string + Path() string + SecretKeyring() string +} + +// SupportedExts are universally supported extensions. +var SupportedExts = []string{"json", "toml", "yaml", "yml", "properties", "props", "prop", "hcl"} + +// SupportedRemoteProviders are universally supported remote providers. +var SupportedRemoteProviders = []string{"etcd", "consul"} + +func OnConfigChange(run func(in fsnotify.Event)) { v.OnConfigChange(run) } +func (v *Viper) OnConfigChange(run func(in fsnotify.Event)) { + v.onConfigChange = run +} + +func WatchConfig() { v.WatchConfig() } +func (v *Viper) WatchConfig() { + go func() { + watcher, err := fsnotify.NewWatcher() + if err != nil { + log.Fatal(err) + } + defer watcher.Close() + + // we have to watch the entire directory to pick up renames/atomic saves in a cross-platform way + filename, err := v.getConfigFile() + if err != nil { + log.Println("error:", err) + return + } + + configFile := filepath.Clean(filename) + configDir, _ := filepath.Split(configFile) + + done := make(chan bool) + go func() { + for { + select { + case event := <-watcher.Events: + // we only care about the config file + if filepath.Clean(event.Name) == configFile { + if event.Op&fsnotify.Write == fsnotify.Write || event.Op&fsnotify.Create == fsnotify.Create { + err := v.ReadInConfig() + if err != nil { + log.Println("error:", err) + } + v.onConfigChange(event) + } + } + case err := <-watcher.Errors: + log.Println("error:", err) + } + } + }() + + watcher.Add(configDir) + <-done + }() +} + +// SetConfigFile explicitly defines the path, name and extension of the config file. +// Viper will use this and not check any of the config paths. +func SetConfigFile(in string) { v.SetConfigFile(in) } +func (v *Viper) SetConfigFile(in string) { + if in != "" { + v.configFile = in + } +} + +// SetEnvPrefix defines a prefix that ENVIRONMENT variables will use. +// E.g. if your prefix is "spf", the env registry will look for env +// variables that start with "SPF_". +func SetEnvPrefix(in string) { v.SetEnvPrefix(in) } +func (v *Viper) SetEnvPrefix(in string) { + if in != "" { + v.envPrefix = in + } +} + +func (v *Viper) mergeWithEnvPrefix(in string) string { + if v.envPrefix != "" { + return strings.ToUpper(v.envPrefix + "_" + in) + } + + return strings.ToUpper(in) +} + +// TODO: should getEnv logic be moved into find(). Can generalize the use of +// rewriting keys many things, Ex: Get('someKey') -> some_key +// (camel case to snake case for JSON keys perhaps) + +// getEnv is a wrapper around os.Getenv which replaces characters in the original +// key. This allows env vars which have different keys than the config object +// keys. +func (v *Viper) getEnv(key string) string { + if v.envKeyReplacer != nil { + key = v.envKeyReplacer.Replace(key) + } + return os.Getenv(key) +} + +// ConfigFileUsed returns the file used to populate the config registry. +func ConfigFileUsed() string { return v.ConfigFileUsed() } +func (v *Viper) ConfigFileUsed() string { return v.configFile } + +// AddConfigPath adds a path for Viper to search for the config file in. +// Can be called multiple times to define multiple search paths. +func AddConfigPath(in string) { v.AddConfigPath(in) } +func (v *Viper) AddConfigPath(in string) { + if in != "" { + absin := absPathify(in) + jww.INFO.Println("adding", absin, "to paths to search") + if !stringInSlice(absin, v.configPaths) { + v.configPaths = append(v.configPaths, absin) + } + } +} + +// AddRemoteProvider adds a remote configuration source. +// Remote Providers are searched in the order they are added. +// provider is a string value, "etcd" or "consul" are currently supported. +// endpoint is the url. etcd requires http://ip:port consul requires ip:port +// path is the path in the k/v store to retrieve configuration +// To retrieve a config file called myapp.json from /configs/myapp.json +// you should set path to /configs and set config name (SetConfigName()) to +// "myapp" +func AddRemoteProvider(provider, endpoint, path string) error { + return v.AddRemoteProvider(provider, endpoint, path) +} +func (v *Viper) AddRemoteProvider(provider, endpoint, path string) error { + if !stringInSlice(provider, SupportedRemoteProviders) { + return UnsupportedRemoteProviderError(provider) + } + if provider != "" && endpoint != "" { + jww.INFO.Printf("adding %s:%s to remote provider list", provider, endpoint) + rp := &defaultRemoteProvider{ + endpoint: endpoint, + provider: provider, + path: path, + } + if !v.providerPathExists(rp) { + v.remoteProviders = append(v.remoteProviders, rp) + } + } + return nil +} + +// AddSecureRemoteProvider adds a remote configuration source. +// Secure Remote Providers are searched in the order they are added. +// provider is a string value, "etcd" or "consul" are currently supported. +// endpoint is the url. etcd requires http://ip:port consul requires ip:port +// secretkeyring is the filepath to your openpgp secret keyring. e.g. /etc/secrets/myring.gpg +// path is the path in the k/v store to retrieve configuration +// To retrieve a config file called myapp.json from /configs/myapp.json +// you should set path to /configs and set config name (SetConfigName()) to +// "myapp" +// Secure Remote Providers are implemented with github.com/xordataexchange/crypt +func AddSecureRemoteProvider(provider, endpoint, path, secretkeyring string) error { + return v.AddSecureRemoteProvider(provider, endpoint, path, secretkeyring) +} + +func (v *Viper) AddSecureRemoteProvider(provider, endpoint, path, secretkeyring string) error { + if !stringInSlice(provider, SupportedRemoteProviders) { + return UnsupportedRemoteProviderError(provider) + } + if provider != "" && endpoint != "" { + jww.INFO.Printf("adding %s:%s to remote provider list", provider, endpoint) + rp := &defaultRemoteProvider{ + endpoint: endpoint, + provider: provider, + path: path, + secretKeyring: secretkeyring, + } + if !v.providerPathExists(rp) { + v.remoteProviders = append(v.remoteProviders, rp) + } + } + return nil +} + +func (v *Viper) providerPathExists(p *defaultRemoteProvider) bool { + for _, y := range v.remoteProviders { + if reflect.DeepEqual(y, p) { + return true + } + } + return false +} + +// searchMap recursively searches for a value for path in source map. +// Returns nil if not found. +// Note: This assumes that the path entries and map keys are lower cased. +func (v *Viper) searchMap(source map[string]interface{}, path []string) interface{} { + if len(path) == 0 { + return source + } + + next, ok := source[path[0]] + if ok { + // Fast path + if len(path) == 1 { + return next + } + + // Nested case + switch next.(type) { + case map[interface{}]interface{}: + return v.searchMap(cast.ToStringMap(next), path[1:]) + case map[string]interface{}: + // Type assertion is safe here since it is only reached + // if the type of `next` is the same as the type being asserted + return v.searchMap(next.(map[string]interface{}), path[1:]) + default: + // got a value but nested key expected, return "nil" for not found + return nil + } + } + return nil +} + +// searchMapWithPathPrefixes recursively searches for a value for path in source map. +// +// While searchMap() considers each path element as a single map key, this +// function searches for, and prioritizes, merged path elements. +// e.g., if in the source, "foo" is defined with a sub-key "bar", and "foo.bar" +// is also defined, this latter value is returned for path ["foo", "bar"]. +// +// This should be useful only at config level (other maps may not contain dots +// in their keys). +// +// Note: This assumes that the path entries and map keys are lower cased. +func (v *Viper) searchMapWithPathPrefixes(source map[string]interface{}, path []string) interface{} { + if len(path) == 0 { + return source + } + + // search for path prefixes, starting from the longest one + for i := len(path); i > 0; i-- { + prefixKey := strings.ToLower(strings.Join(path[0:i], v.keyDelim)) + + next, ok := source[prefixKey] + if ok { + // Fast path + if i == len(path) { + return next + } + + // Nested case + var val interface{} + switch next.(type) { + case map[interface{}]interface{}: + val = v.searchMapWithPathPrefixes(cast.ToStringMap(next), path[i:]) + case map[string]interface{}: + // Type assertion is safe here since it is only reached + // if the type of `next` is the same as the type being asserted + val = v.searchMapWithPathPrefixes(next.(map[string]interface{}), path[i:]) + default: + // got a value but nested key expected, do nothing and look for next prefix + } + if val != nil { + return val + } + } + } + + // not found + return nil +} + +// isPathShadowedInDeepMap makes sure the given path is not shadowed somewhere +// on its path in the map. +// e.g., if "foo.bar" has a value in the given map, it “shadows” +// "foo.bar.baz" in a lower-priority map +func (v *Viper) isPathShadowedInDeepMap(path []string, m map[string]interface{}) string { + var parentVal interface{} + for i := 1; i < len(path); i++ { + parentVal = v.searchMap(m, path[0:i]) + if parentVal == nil { + // not found, no need to add more path elements + return "" + } + switch parentVal.(type) { + case map[interface{}]interface{}: + continue + case map[string]interface{}: + continue + default: + // parentVal is a regular value which shadows "path" + return strings.Join(path[0:i], v.keyDelim) + } + } + return "" +} + +// isPathShadowedInFlatMap makes sure the given path is not shadowed somewhere +// in a sub-path of the map. +// e.g., if "foo.bar" has a value in the given map, it “shadows” +// "foo.bar.baz" in a lower-priority map +func (v *Viper) isPathShadowedInFlatMap(path []string, mi interface{}) string { + // unify input map + var m map[string]interface{} + switch mi.(type) { + case map[string]string, map[string]FlagValue: + m = cast.ToStringMap(mi) + default: + return "" + } + + // scan paths + var parentKey string + for i := 1; i < len(path); i++ { + parentKey = strings.Join(path[0:i], v.keyDelim) + if _, ok := m[parentKey]; ok { + return parentKey + } + } + return "" +} + +// isPathShadowedInAutoEnv makes sure the given path is not shadowed somewhere +// in the environment, when automatic env is on. +// e.g., if "foo.bar" has a value in the environment, it “shadows” +// "foo.bar.baz" in a lower-priority map +func (v *Viper) isPathShadowedInAutoEnv(path []string) string { + var parentKey string + var val string + for i := 1; i < len(path); i++ { + parentKey = strings.Join(path[0:i], v.keyDelim) + if val = v.getEnv(v.mergeWithEnvPrefix(parentKey)); val != "" { + return parentKey + } + } + return "" +} + +// SetTypeByDefaultValue enables or disables the inference of a key value's +// type when the Get function is used based upon a key's default value as +// opposed to the value returned based on the normal fetch logic. +// +// For example, if a key has a default value of []string{} and the same key +// is set via an environment variable to "a b c", a call to the Get function +// would return a string slice for the key if the key's type is inferred by +// the default value and the Get function would return: +// +// []string {"a", "b", "c"} +// +// Otherwise the Get function would return: +// +// "a b c" +func SetTypeByDefaultValue(enable bool) { v.SetTypeByDefaultValue(enable) } +func (v *Viper) SetTypeByDefaultValue(enable bool) { + v.typeByDefValue = enable +} + +// GetViper gets the global Viper instance. +func GetViper() *Viper { + return v +} + +// Get can retrieve any value given the key to use. +// Get is case-insensitive for a key. +// Get has the behavior of returning the value associated with the first +// place from where it is set. Viper will check in the following order: +// override, flag, env, config file, key/value store, default +// +// Get returns an interface. For a specific value use one of the Get____ methods. +func Get(key string) interface{} { return v.Get(key) } +func (v *Viper) Get(key string) interface{} { + lcaseKey := strings.ToLower(key) + val := v.find(lcaseKey) + if val == nil { + return nil + } + + if v.typeByDefValue { + // TODO(bep) this branch isn't covered by a single test. + valType := val + path := strings.Split(lcaseKey, v.keyDelim) + defVal := v.searchMap(v.defaults, path) + if defVal != nil { + valType = defVal + } + + switch valType.(type) { + case bool: + return cast.ToBool(val) + case string: + return cast.ToString(val) + case int64, int32, int16, int8, int: + return cast.ToInt(val) + case float64, float32: + return cast.ToFloat64(val) + case time.Time: + return cast.ToTime(val) + case time.Duration: + return cast.ToDuration(val) + case []string: + return cast.ToStringSlice(val) + } + } + + return val +} + +// Sub returns new Viper instance representing a sub tree of this instance. +// Sub is case-insensitive for a key. +func Sub(key string) *Viper { return v.Sub(key) } +func (v *Viper) Sub(key string) *Viper { + subv := New() + data := v.Get(key) + if data == nil { + return nil + } + + if reflect.TypeOf(data).Kind() == reflect.Map { + subv.config = cast.ToStringMap(data) + return subv + } + return nil +} + +// GetString returns the value associated with the key as a string. +func GetString(key string) string { return v.GetString(key) } +func (v *Viper) GetString(key string) string { + return cast.ToString(v.Get(key)) +} + +// GetBool returns the value associated with the key as a boolean. +func GetBool(key string) bool { return v.GetBool(key) } +func (v *Viper) GetBool(key string) bool { + return cast.ToBool(v.Get(key)) +} + +// GetInt returns the value associated with the key as an integer. +func GetInt(key string) int { return v.GetInt(key) } +func (v *Viper) GetInt(key string) int { + return cast.ToInt(v.Get(key)) +} + +// GetInt64 returns the value associated with the key as an integer. +func GetInt64(key string) int64 { return v.GetInt64(key) } +func (v *Viper) GetInt64(key string) int64 { + return cast.ToInt64(v.Get(key)) +} + +// GetFloat64 returns the value associated with the key as a float64. +func GetFloat64(key string) float64 { return v.GetFloat64(key) } +func (v *Viper) GetFloat64(key string) float64 { + return cast.ToFloat64(v.Get(key)) +} + +// GetTime returns the value associated with the key as time. +func GetTime(key string) time.Time { return v.GetTime(key) } +func (v *Viper) GetTime(key string) time.Time { + return cast.ToTime(v.Get(key)) +} + +// GetDuration returns the value associated with the key as a duration. +func GetDuration(key string) time.Duration { return v.GetDuration(key) } +func (v *Viper) GetDuration(key string) time.Duration { + return cast.ToDuration(v.Get(key)) +} + +// GetStringSlice returns the value associated with the key as a slice of strings. +func GetStringSlice(key string) []string { return v.GetStringSlice(key) } +func (v *Viper) GetStringSlice(key string) []string { + return cast.ToStringSlice(v.Get(key)) +} + +// GetStringMap returns the value associated with the key as a map of interfaces. +func GetStringMap(key string) map[string]interface{} { return v.GetStringMap(key) } +func (v *Viper) GetStringMap(key string) map[string]interface{} { + return cast.ToStringMap(v.Get(key)) +} + +// GetStringMapString returns the value associated with the key as a map of strings. +func GetStringMapString(key string) map[string]string { return v.GetStringMapString(key) } +func (v *Viper) GetStringMapString(key string) map[string]string { + return cast.ToStringMapString(v.Get(key)) +} + +// GetStringMapStringSlice returns the value associated with the key as a map to a slice of strings. +func GetStringMapStringSlice(key string) map[string][]string { return v.GetStringMapStringSlice(key) } +func (v *Viper) GetStringMapStringSlice(key string) map[string][]string { + return cast.ToStringMapStringSlice(v.Get(key)) +} + +// GetSizeInBytes returns the size of the value associated with the given key +// in bytes. +func GetSizeInBytes(key string) uint { return v.GetSizeInBytes(key) } +func (v *Viper) GetSizeInBytes(key string) uint { + sizeStr := cast.ToString(v.Get(key)) + return parseSizeInBytes(sizeStr) +} + +// UnmarshalKey takes a single key and unmarshals it into a Struct. +func UnmarshalKey(key string, rawVal interface{}) error { return v.UnmarshalKey(key, rawVal) } +func (v *Viper) UnmarshalKey(key string, rawVal interface{}) error { + err := decode(v.Get(key), defaultDecoderConfig(rawVal)) + + if err != nil { + return err + } + + v.insensitiviseMaps() + + return nil +} + +// Unmarshal unmarshals the config into a Struct. Make sure that the tags +// on the fields of the structure are properly set. +func Unmarshal(rawVal interface{}) error { return v.Unmarshal(rawVal) } +func (v *Viper) Unmarshal(rawVal interface{}) error { + err := decode(v.AllSettings(), defaultDecoderConfig(rawVal)) + + if err != nil { + return err + } + + v.insensitiviseMaps() + + return nil +} + +// defaultDecoderConfig returns default mapsstructure.DecoderConfig with suppot +// of time.Duration values & string slices +func defaultDecoderConfig(output interface{}) *mapstructure.DecoderConfig { + return &mapstructure.DecoderConfig{ + Metadata: nil, + Result: output, + WeaklyTypedInput: true, + DecodeHook: mapstructure.ComposeDecodeHookFunc( + mapstructure.StringToTimeDurationHookFunc(), + mapstructure.StringToSliceHookFunc(","), + ), + } +} + +// A wrapper around mapstructure.Decode that mimics the WeakDecode functionality +func decode(input interface{}, config *mapstructure.DecoderConfig) error { + decoder, err := mapstructure.NewDecoder(config) + if err != nil { + return err + } + return decoder.Decode(input) +} + +// UnmarshalExact unmarshals the config into a Struct, erroring if a field is nonexistent +// in the destination struct. +func (v *Viper) UnmarshalExact(rawVal interface{}) error { + config := defaultDecoderConfig(rawVal) + config.ErrorUnused = true + + err := decode(v.AllSettings(), config) + + if err != nil { + return err + } + + v.insensitiviseMaps() + + return nil +} + +// BindPFlags binds a full flag set to the configuration, using each flag's long +// name as the config key. +func BindPFlags(flags *pflag.FlagSet) error { return v.BindPFlags(flags) } +func (v *Viper) BindPFlags(flags *pflag.FlagSet) error { + return v.BindFlagValues(pflagValueSet{flags}) +} + +// BindPFlag binds a specific key to a pflag (as used by cobra). +// Example (where serverCmd is a Cobra instance): +// +// serverCmd.Flags().Int("port", 1138, "Port to run Application server on") +// Viper.BindPFlag("port", serverCmd.Flags().Lookup("port")) +// +func BindPFlag(key string, flag *pflag.Flag) error { return v.BindPFlag(key, flag) } +func (v *Viper) BindPFlag(key string, flag *pflag.Flag) error { + return v.BindFlagValue(key, pflagValue{flag}) +} + +// BindFlagValues binds a full FlagValue set to the configuration, using each flag's long +// name as the config key. +func BindFlagValues(flags FlagValueSet) error { return v.BindFlagValues(flags) } +func (v *Viper) BindFlagValues(flags FlagValueSet) (err error) { + flags.VisitAll(func(flag FlagValue) { + if err = v.BindFlagValue(flag.Name(), flag); err != nil { + return + } + }) + return nil +} + +// BindFlagValue binds a specific key to a FlagValue. +// Example (where serverCmd is a Cobra instance): +// +// serverCmd.Flags().Int("port", 1138, "Port to run Application server on") +// Viper.BindFlagValue("port", serverCmd.Flags().Lookup("port")) +// +func BindFlagValue(key string, flag FlagValue) error { return v.BindFlagValue(key, flag) } +func (v *Viper) BindFlagValue(key string, flag FlagValue) error { + if flag == nil { + return fmt.Errorf("flag for %q is nil", key) + } + v.pflags[strings.ToLower(key)] = flag + return nil +} + +// BindEnv binds a Viper key to a ENV variable. +// ENV variables are case sensitive. +// If only a key is provided, it will use the env key matching the key, uppercased. +// EnvPrefix will be used when set when env name is not provided. +func BindEnv(input ...string) error { return v.BindEnv(input...) } +func (v *Viper) BindEnv(input ...string) error { + var key, envkey string + if len(input) == 0 { + return fmt.Errorf("BindEnv missing key to bind to") + } + + key = strings.ToLower(input[0]) + + if len(input) == 1 { + envkey = v.mergeWithEnvPrefix(key) + } else { + envkey = input[1] + } + + v.env[key] = envkey + + return nil +} + +// Given a key, find the value. +// Viper will check in the following order: +// flag, env, config file, key/value store, default. +// Viper will check to see if an alias exists first. +// Note: this assumes a lower-cased key given. +func (v *Viper) find(lcaseKey string) interface{} { + + var ( + val interface{} + exists bool + path = strings.Split(lcaseKey, v.keyDelim) + nested = len(path) > 1 + ) + + // compute the path through the nested maps to the nested value + if nested && v.isPathShadowedInDeepMap(path, castMapStringToMapInterface(v.aliases)) != "" { + return nil + } + + // if the requested key is an alias, then return the proper key + lcaseKey = v.realKey(lcaseKey) + path = strings.Split(lcaseKey, v.keyDelim) + nested = len(path) > 1 + + // Set() override first + val = v.searchMap(v.override, path) + if val != nil { + return val + } + if nested && v.isPathShadowedInDeepMap(path, v.override) != "" { + return nil + } + + // PFlag override next + flag, exists := v.pflags[lcaseKey] + if exists && flag.HasChanged() { + switch flag.ValueType() { + case "int", "int8", "int16", "int32", "int64": + return cast.ToInt(flag.ValueString()) + case "bool": + return cast.ToBool(flag.ValueString()) + case "stringSlice": + s := strings.TrimPrefix(flag.ValueString(), "[") + s = strings.TrimSuffix(s, "]") + res, _ := readAsCSV(s) + return res + default: + return flag.ValueString() + } + } + if nested && v.isPathShadowedInFlatMap(path, v.pflags) != "" { + return nil + } + + // Env override next + if v.automaticEnvApplied { + // even if it hasn't been registered, if automaticEnv is used, + // check any Get request + if val = v.getEnv(v.mergeWithEnvPrefix(lcaseKey)); val != "" { + return val + } + if nested && v.isPathShadowedInAutoEnv(path) != "" { + return nil + } + } + envkey, exists := v.env[lcaseKey] + if exists { + if val = v.getEnv(envkey); val != "" { + return val + } + } + if nested && v.isPathShadowedInFlatMap(path, v.env) != "" { + return nil + } + + // Config file next + val = v.searchMapWithPathPrefixes(v.config, path) + if val != nil { + return val + } + if nested && v.isPathShadowedInDeepMap(path, v.config) != "" { + return nil + } + + // K/V store next + val = v.searchMap(v.kvstore, path) + if val != nil { + return val + } + if nested && v.isPathShadowedInDeepMap(path, v.kvstore) != "" { + return nil + } + + // Default next + val = v.searchMap(v.defaults, path) + if val != nil { + return val + } + if nested && v.isPathShadowedInDeepMap(path, v.defaults) != "" { + return nil + } + + // last chance: if no other value is returned and a flag does exist for the value, + // get the flag's value even if the flag's value has not changed + if flag, exists := v.pflags[lcaseKey]; exists { + switch flag.ValueType() { + case "int", "int8", "int16", "int32", "int64": + return cast.ToInt(flag.ValueString()) + case "bool": + return cast.ToBool(flag.ValueString()) + case "stringSlice": + s := strings.TrimPrefix(flag.ValueString(), "[") + s = strings.TrimSuffix(s, "]") + res, _ := readAsCSV(s) + return res + default: + return flag.ValueString() + } + } + // last item, no need to check shadowing + + return nil +} + +func readAsCSV(val string) ([]string, error) { + if val == "" { + return []string{}, nil + } + stringReader := strings.NewReader(val) + csvReader := csv.NewReader(stringReader) + return csvReader.Read() +} + +// IsSet checks to see if the key has been set in any of the data locations. +// IsSet is case-insensitive for a key. +func IsSet(key string) bool { return v.IsSet(key) } +func (v *Viper) IsSet(key string) bool { + lcaseKey := strings.ToLower(key) + val := v.find(lcaseKey) + return val != nil +} + +// AutomaticEnv has Viper check ENV variables for all. +// keys set in config, default & flags +func AutomaticEnv() { v.AutomaticEnv() } +func (v *Viper) AutomaticEnv() { + v.automaticEnvApplied = true +} + +// SetEnvKeyReplacer sets the strings.Replacer on the viper object +// Useful for mapping an environmental variable to a key that does +// not match it. +func SetEnvKeyReplacer(r *strings.Replacer) { v.SetEnvKeyReplacer(r) } +func (v *Viper) SetEnvKeyReplacer(r *strings.Replacer) { + v.envKeyReplacer = r +} + +// Aliases provide another accessor for the same key. +// This enables one to change a name without breaking the application +func RegisterAlias(alias string, key string) { v.RegisterAlias(alias, key) } +func (v *Viper) RegisterAlias(alias string, key string) { + v.registerAlias(alias, strings.ToLower(key)) +} + +func (v *Viper) registerAlias(alias string, key string) { + alias = strings.ToLower(alias) + if alias != key && alias != v.realKey(key) { + _, exists := v.aliases[alias] + + if !exists { + // if we alias something that exists in one of the maps to another + // name, we'll never be able to get that value using the original + // name, so move the config value to the new realkey. + if val, ok := v.config[alias]; ok { + delete(v.config, alias) + v.config[key] = val + } + if val, ok := v.kvstore[alias]; ok { + delete(v.kvstore, alias) + v.kvstore[key] = val + } + if val, ok := v.defaults[alias]; ok { + delete(v.defaults, alias) + v.defaults[key] = val + } + if val, ok := v.override[alias]; ok { + delete(v.override, alias) + v.override[key] = val + } + v.aliases[alias] = key + } + } else { + jww.WARN.Println("Creating circular reference alias", alias, key, v.realKey(key)) + } +} + +func (v *Viper) realKey(key string) string { + newkey, exists := v.aliases[key] + if exists { + jww.DEBUG.Println("Alias", key, "to", newkey) + return v.realKey(newkey) + } + return key +} + +// InConfig checks to see if the given key (or an alias) is in the config file. +func InConfig(key string) bool { return v.InConfig(key) } +func (v *Viper) InConfig(key string) bool { + // if the requested key is an alias, then return the proper key + key = v.realKey(key) + + _, exists := v.config[key] + return exists +} + +// SetDefault sets the default value for this key. +// SetDefault is case-insensitive for a key. +// Default only used when no value is provided by the user via flag, config or ENV. +func SetDefault(key string, value interface{}) { v.SetDefault(key, value) } +func (v *Viper) SetDefault(key string, value interface{}) { + // If alias passed in, then set the proper default + key = v.realKey(strings.ToLower(key)) + value = toCaseInsensitiveValue(value) + + path := strings.Split(key, v.keyDelim) + lastKey := strings.ToLower(path[len(path)-1]) + deepestMap := deepSearch(v.defaults, path[0:len(path)-1]) + + // set innermost value + deepestMap[lastKey] = value +} + +// Set sets the value for the key in the override regiser. +// Set is case-insensitive for a key. +// Will be used instead of values obtained via +// flags, config file, ENV, default, or key/value store. +func Set(key string, value interface{}) { v.Set(key, value) } +func (v *Viper) Set(key string, value interface{}) { + // If alias passed in, then set the proper override + key = v.realKey(strings.ToLower(key)) + value = toCaseInsensitiveValue(value) + + path := strings.Split(key, v.keyDelim) + lastKey := strings.ToLower(path[len(path)-1]) + deepestMap := deepSearch(v.override, path[0:len(path)-1]) + + // set innermost value + deepestMap[lastKey] = value +} + +// ReadInConfig will discover and load the configuration file from disk +// and key/value stores, searching in one of the defined paths. +func ReadInConfig() error { return v.ReadInConfig() } +func (v *Viper) ReadInConfig() error { + jww.INFO.Println("Attempting to read in config file") + filename, err := v.getConfigFile() + if err != nil { + return err + } + + if !stringInSlice(v.getConfigType(), SupportedExts) { + return UnsupportedConfigError(v.getConfigType()) + } + + jww.DEBUG.Println("Reading file: ", filename) + file, err := afero.ReadFile(v.fs, filename) + if err != nil { + return err + } + + config := make(map[string]interface{}) + + err = v.unmarshalReader(bytes.NewReader(file), config) + if err != nil { + return err + } + + v.config = config + return nil +} + +// MergeInConfig merges a new configuration with an existing config. +func MergeInConfig() error { return v.MergeInConfig() } +func (v *Viper) MergeInConfig() error { + jww.INFO.Println("Attempting to merge in config file") + filename, err := v.getConfigFile() + if err != nil { + return err + } + + if !stringInSlice(v.getConfigType(), SupportedExts) { + return UnsupportedConfigError(v.getConfigType()) + } + + file, err := afero.ReadFile(v.fs, filename) + if err != nil { + return err + } + + return v.MergeConfig(bytes.NewReader(file)) +} + +// ReadConfig will read a configuration file, setting existing keys to nil if the +// key does not exist in the file. +func ReadConfig(in io.Reader) error { return v.ReadConfig(in) } +func (v *Viper) ReadConfig(in io.Reader) error { + v.config = make(map[string]interface{}) + return v.unmarshalReader(in, v.config) +} + +// MergeConfig merges a new configuration with an existing config. +func MergeConfig(in io.Reader) error { return v.MergeConfig(in) } +func (v *Viper) MergeConfig(in io.Reader) error { + if v.config == nil { + v.config = make(map[string]interface{}) + } + cfg := make(map[string]interface{}) + if err := v.unmarshalReader(in, cfg); err != nil { + return err + } + mergeMaps(cfg, v.config, nil) + return nil +} + +// WriteConfig writes the current configuration to a file. +func WriteConfig() error { return v.WriteConfig() } +func (v *Viper) WriteConfig() error { + filename, err := v.getConfigFile() + if err != nil { + return err + } + return v.writeConfig(filename, true) +} + +// SafeWriteConfig writes current configuration to file only if the file does not exist. +func SafeWriteConfig() error { return v.SafeWriteConfig() } +func (v *Viper) SafeWriteConfig() error { + filename, err := v.getConfigFile() + if err != nil { + return err + } + return v.writeConfig(filename, false) +} + +// WriteConfigAs writes current configuration to a given filename. +func WriteConfigAs(filename string) error { return v.WriteConfigAs(filename) } +func (v *Viper) WriteConfigAs(filename string) error { + return v.writeConfig(filename, true) +} + +// SafeWriteConfigAs writes current configuration to a given filename if it does not exist. +func SafeWriteConfigAs(filename string) error { return v.SafeWriteConfigAs(filename) } +func (v *Viper) SafeWriteConfigAs(filename string) error { + return v.writeConfig(filename, false) +} + +func writeConfig(filename string, force bool) error { return v.writeConfig(filename, force) } +func (v *Viper) writeConfig(filename string, force bool) error { + jww.INFO.Println("Attempting to write configuration to file.") + ext := filepath.Ext(filename) + if len(ext) <= 1 { + return fmt.Errorf("Filename: %s requires valid extension.", filename) + } + configType := ext[1:] + if !stringInSlice(configType, SupportedExts) { + return UnsupportedConfigError(configType) + } + if v.config == nil { + v.config = make(map[string]interface{}) + } + var flags int + if force == true { + flags = os.O_CREATE | os.O_TRUNC | os.O_WRONLY + } else { + if _, err := os.Stat(filename); os.IsNotExist(err) { + flags = os.O_WRONLY + } else { + return fmt.Errorf("File: %s exists. Use WriteConfig to overwrite.", filename) + } + } + f, err := v.fs.OpenFile(filename, flags, os.FileMode(0644)) + if err != nil { + return err + } + return v.marshalWriter(f, configType) +} + +// Unmarshal a Reader into a map. +// Should probably be an unexported function. +func unmarshalReader(in io.Reader, c map[string]interface{}) error { + return v.unmarshalReader(in, c) +} +func (v *Viper) unmarshalReader(in io.Reader, c map[string]interface{}) error { + buf := new(bytes.Buffer) + buf.ReadFrom(in) + + switch strings.ToLower(v.getConfigType()) { + case "yaml", "yml": + if err := yaml.Unmarshal(buf.Bytes(), &c); err != nil { + return ConfigParseError{err} + } + + case "json": + if err := json.Unmarshal(buf.Bytes(), &c); err != nil { + return ConfigParseError{err} + } + + case "hcl": + obj, err := hcl.Parse(string(buf.Bytes())) + if err != nil { + return ConfigParseError{err} + } + if err = hcl.DecodeObject(&c, obj); err != nil { + return ConfigParseError{err} + } + + case "toml": + tree, err := toml.LoadReader(buf) + if err != nil { + return ConfigParseError{err} + } + tmap := tree.ToMap() + for k, v := range tmap { + c[k] = v + } + + case "properties", "props", "prop": + v.properties = properties.NewProperties() + var err error + if v.properties, err = properties.Load(buf.Bytes(), properties.UTF8); err != nil { + return ConfigParseError{err} + } + for _, key := range v.properties.Keys() { + value, _ := v.properties.Get(key) + // recursively build nested maps + path := strings.Split(key, ".") + lastKey := strings.ToLower(path[len(path)-1]) + deepestMap := deepSearch(c, path[0:len(path)-1]) + // set innermost value + deepestMap[lastKey] = value + } + } + + insensitiviseMap(c) + return nil +} + +// Marshal a map into Writer. +func marshalWriter(f afero.File, configType string) error { + return v.marshalWriter(f, configType) +} +func (v *Viper) marshalWriter(f afero.File, configType string) error { + c := v.AllSettings() + switch configType { + case "json": + b, err := json.MarshalIndent(c, "", " ") + if err != nil { + return ConfigMarshalError{err} + } + _, err = f.WriteString(string(b)) + if err != nil { + return ConfigMarshalError{err} + } + + case "hcl": + b, err := json.Marshal(c) + ast, err := hcl.Parse(string(b)) + if err != nil { + return ConfigMarshalError{err} + } + err = printer.Fprint(f, ast.Node) + if err != nil { + return ConfigMarshalError{err} + } + + case "prop", "props", "properties": + if v.properties == nil { + v.properties = properties.NewProperties() + } + p := v.properties + for _, key := range v.AllKeys() { + _, _, err := p.Set(key, v.GetString(key)) + if err != nil { + return ConfigMarshalError{err} + } + } + _, err := p.WriteComment(f, "#", properties.UTF8) + if err != nil { + return ConfigMarshalError{err} + } + + case "toml": + t, err := toml.TreeFromMap(c) + if err != nil { + return ConfigMarshalError{err} + } + s := t.String() + if _, err := f.WriteString(s); err != nil { + return ConfigMarshalError{err} + } + + case "yaml", "yml": + b, err := yaml.Marshal(c) + if err != nil { + return ConfigMarshalError{err} + } + if _, err = f.WriteString(string(b)); err != nil { + return ConfigMarshalError{err} + } + } + return nil +} + +func keyExists(k string, m map[string]interface{}) string { + lk := strings.ToLower(k) + for mk := range m { + lmk := strings.ToLower(mk) + if lmk == lk { + return mk + } + } + return "" +} + +func castToMapStringInterface( + src map[interface{}]interface{}) map[string]interface{} { + tgt := map[string]interface{}{} + for k, v := range src { + tgt[fmt.Sprintf("%v", k)] = v + } + return tgt +} + +func castMapStringToMapInterface(src map[string]string) map[string]interface{} { + tgt := map[string]interface{}{} + for k, v := range src { + tgt[k] = v + } + return tgt +} + +func castMapFlagToMapInterface(src map[string]FlagValue) map[string]interface{} { + tgt := map[string]interface{}{} + for k, v := range src { + tgt[k] = v + } + return tgt +} + +// mergeMaps merges two maps. The `itgt` parameter is for handling go-yaml's +// insistence on parsing nested structures as `map[interface{}]interface{}` +// instead of using a `string` as the key for nest structures beyond one level +// deep. Both map types are supported as there is a go-yaml fork that uses +// `map[string]interface{}` instead. +func mergeMaps( + src, tgt map[string]interface{}, itgt map[interface{}]interface{}) { + for sk, sv := range src { + tk := keyExists(sk, tgt) + if tk == "" { + jww.TRACE.Printf("tk=\"\", tgt[%s]=%v", sk, sv) + tgt[sk] = sv + if itgt != nil { + itgt[sk] = sv + } + continue + } + + tv, ok := tgt[tk] + if !ok { + jww.TRACE.Printf("tgt[%s] != ok, tgt[%s]=%v", tk, sk, sv) + tgt[sk] = sv + if itgt != nil { + itgt[sk] = sv + } + continue + } + + svType := reflect.TypeOf(sv) + tvType := reflect.TypeOf(tv) + if svType != tvType { + jww.ERROR.Printf( + "svType != tvType; key=%s, st=%v, tt=%v, sv=%v, tv=%v", + sk, svType, tvType, sv, tv) + continue + } + + jww.TRACE.Printf("processing key=%s, st=%v, tt=%v, sv=%v, tv=%v", + sk, svType, tvType, sv, tv) + + switch ttv := tv.(type) { + case map[interface{}]interface{}: + jww.TRACE.Printf("merging maps (must convert)") + tsv := sv.(map[interface{}]interface{}) + ssv := castToMapStringInterface(tsv) + stv := castToMapStringInterface(ttv) + mergeMaps(ssv, stv, ttv) + case map[string]interface{}: + jww.TRACE.Printf("merging maps") + mergeMaps(sv.(map[string]interface{}), ttv, nil) + default: + jww.TRACE.Printf("setting value") + tgt[tk] = sv + if itgt != nil { + itgt[tk] = sv + } + } + } +} + +// ReadRemoteConfig attempts to get configuration from a remote source +// and read it in the remote configuration registry. +func ReadRemoteConfig() error { return v.ReadRemoteConfig() } +func (v *Viper) ReadRemoteConfig() error { + return v.getKeyValueConfig() +} + +func WatchRemoteConfig() error { return v.WatchRemoteConfig() } +func (v *Viper) WatchRemoteConfig() error { + return v.watchKeyValueConfig() +} + +func (v *Viper) WatchRemoteConfigOnChannel() error { + return v.watchKeyValueConfigOnChannel() +} + +func (v *Viper) insensitiviseMaps() { + insensitiviseMap(v.config) + insensitiviseMap(v.defaults) + insensitiviseMap(v.override) + insensitiviseMap(v.kvstore) +} + +// Retrieve the first found remote configuration. +func (v *Viper) getKeyValueConfig() error { + if RemoteConfig == nil { + return RemoteConfigError("Enable the remote features by doing a blank import of the viper/remote package: '_ github.com/spf13/viper/remote'") + } + + for _, rp := range v.remoteProviders { + val, err := v.getRemoteConfig(rp) + if err != nil { + continue + } + v.kvstore = val + return nil + } + return RemoteConfigError("No Files Found") +} + +func (v *Viper) getRemoteConfig(provider RemoteProvider) (map[string]interface{}, error) { + reader, err := RemoteConfig.Get(provider) + if err != nil { + return nil, err + } + err = v.unmarshalReader(reader, v.kvstore) + return v.kvstore, err +} + +// Retrieve the first found remote configuration. +func (v *Viper) watchKeyValueConfigOnChannel() error { + for _, rp := range v.remoteProviders { + respc, _ := RemoteConfig.WatchChannel(rp) + //Todo: Add quit channel + go func(rc <-chan *RemoteResponse) { + for { + b := <-rc + reader := bytes.NewReader(b.Value) + v.unmarshalReader(reader, v.kvstore) + } + }(respc) + return nil + } + return RemoteConfigError("No Files Found") +} + +// Retrieve the first found remote configuration. +func (v *Viper) watchKeyValueConfig() error { + for _, rp := range v.remoteProviders { + val, err := v.watchRemoteConfig(rp) + if err != nil { + continue + } + v.kvstore = val + return nil + } + return RemoteConfigError("No Files Found") +} + +func (v *Viper) watchRemoteConfig(provider RemoteProvider) (map[string]interface{}, error) { + reader, err := RemoteConfig.Watch(provider) + if err != nil { + return nil, err + } + err = v.unmarshalReader(reader, v.kvstore) + return v.kvstore, err +} + +// AllKeys returns all keys holding a value, regardless of where they are set. +// Nested keys are returned with a v.keyDelim (= ".") separator +func AllKeys() []string { return v.AllKeys() } +func (v *Viper) AllKeys() []string { + m := map[string]bool{} + // add all paths, by order of descending priority to ensure correct shadowing + m = v.flattenAndMergeMap(m, castMapStringToMapInterface(v.aliases), "") + m = v.flattenAndMergeMap(m, v.override, "") + m = v.mergeFlatMap(m, castMapFlagToMapInterface(v.pflags)) + m = v.mergeFlatMap(m, castMapStringToMapInterface(v.env)) + m = v.flattenAndMergeMap(m, v.config, "") + m = v.flattenAndMergeMap(m, v.kvstore, "") + m = v.flattenAndMergeMap(m, v.defaults, "") + + // convert set of paths to list + a := []string{} + for x := range m { + a = append(a, x) + } + return a +} + +// flattenAndMergeMap recursively flattens the given map into a map[string]bool +// of key paths (used as a set, easier to manipulate than a []string): +// - each path is merged into a single key string, delimited with v.keyDelim (= ".") +// - if a path is shadowed by an earlier value in the initial shadow map, +// it is skipped. +// The resulting set of paths is merged to the given shadow set at the same time. +func (v *Viper) flattenAndMergeMap(shadow map[string]bool, m map[string]interface{}, prefix string) map[string]bool { + if shadow != nil && prefix != "" && shadow[prefix] { + // prefix is shadowed => nothing more to flatten + return shadow + } + if shadow == nil { + shadow = make(map[string]bool) + } + + var m2 map[string]interface{} + if prefix != "" { + prefix += v.keyDelim + } + for k, val := range m { + fullKey := prefix + k + switch val.(type) { + case map[string]interface{}: + m2 = val.(map[string]interface{}) + case map[interface{}]interface{}: + m2 = cast.ToStringMap(val) + default: + // immediate value + shadow[strings.ToLower(fullKey)] = true + continue + } + // recursively merge to shadow map + shadow = v.flattenAndMergeMap(shadow, m2, fullKey) + } + return shadow +} + +// mergeFlatMap merges the given maps, excluding values of the second map +// shadowed by values from the first map. +func (v *Viper) mergeFlatMap(shadow map[string]bool, m map[string]interface{}) map[string]bool { + // scan keys +outer: + for k, _ := range m { + path := strings.Split(k, v.keyDelim) + // scan intermediate paths + var parentKey string + for i := 1; i < len(path); i++ { + parentKey = strings.Join(path[0:i], v.keyDelim) + if shadow[parentKey] { + // path is shadowed, continue + continue outer + } + } + // add key + shadow[strings.ToLower(k)] = true + } + return shadow +} + +// AllSettings merges all settings and returns them as a map[string]interface{}. +func AllSettings() map[string]interface{} { return v.AllSettings() } +func (v *Viper) AllSettings() map[string]interface{} { + m := map[string]interface{}{} + // start from the list of keys, and construct the map one value at a time + for _, k := range v.AllKeys() { + value := v.Get(k) + if value == nil { + // should not happen, since AllKeys() returns only keys holding a value, + // check just in case anything changes + continue + } + path := strings.Split(k, v.keyDelim) + lastKey := strings.ToLower(path[len(path)-1]) + deepestMap := deepSearch(m, path[0:len(path)-1]) + // set innermost value + deepestMap[lastKey] = value + } + return m +} + +// SetFs sets the filesystem to use to read configuration. +func SetFs(fs afero.Fs) { v.SetFs(fs) } +func (v *Viper) SetFs(fs afero.Fs) { + v.fs = fs +} + +// SetConfigName sets name for the config file. +// Does not include extension. +func SetConfigName(in string) { v.SetConfigName(in) } +func (v *Viper) SetConfigName(in string) { + if in != "" { + v.configName = in + v.configFile = "" + } +} + +// SetConfigType sets the type of the configuration returned by the +// remote source, e.g. "json". +func SetConfigType(in string) { v.SetConfigType(in) } +func (v *Viper) SetConfigType(in string) { + if in != "" { + v.configType = in + } +} + +func (v *Viper) getConfigType() string { + if v.configType != "" { + return v.configType + } + + cf, err := v.getConfigFile() + if err != nil { + return "" + } + + ext := filepath.Ext(cf) + + if len(ext) > 1 { + return ext[1:] + } + + return "" +} + +func (v *Viper) getConfigFile() (string, error) { + // if explicitly set, then use it + if v.configFile != "" { + return v.configFile, nil + } + + cf, err := v.findConfigFile() + if err != nil { + return "", err + } + + v.configFile = cf + return v.getConfigFile() +} + +func (v *Viper) searchInPath(in string) (filename string) { + jww.DEBUG.Println("Searching for config in ", in) + for _, ext := range SupportedExts { + jww.DEBUG.Println("Checking for", filepath.Join(in, v.configName+"."+ext)) + if b, _ := exists(v.fs, filepath.Join(in, v.configName+"."+ext)); b { + jww.DEBUG.Println("Found: ", filepath.Join(in, v.configName+"."+ext)) + return filepath.Join(in, v.configName+"."+ext) + } + } + + return "" +} + +// Search all configPaths for any config file. +// Returns the first path that exists (and is a config file). +func (v *Viper) findConfigFile() (string, error) { + jww.INFO.Println("Searching for config in ", v.configPaths) + + for _, cp := range v.configPaths { + file := v.searchInPath(cp) + if file != "" { + return file, nil + } + } + return "", ConfigFileNotFoundError{v.configName, fmt.Sprintf("%s", v.configPaths)} +} + +// Debug prints all configuration registries for debugging +// purposes. +func Debug() { v.Debug() } +func (v *Viper) Debug() { + fmt.Printf("Aliases:\n%#v\n", v.aliases) + fmt.Printf("Override:\n%#v\n", v.override) + fmt.Printf("PFlags:\n%#v\n", v.pflags) + fmt.Printf("Env:\n%#v\n", v.env) + fmt.Printf("Key/Value Store:\n%#v\n", v.kvstore) + fmt.Printf("Config:\n%#v\n", v.config) + fmt.Printf("Defaults:\n%#v\n", v.defaults) +} diff --git a/vendor/github.com/xordataexchange/crypt/backend/LICENSE b/vendor/github.com/xordataexchange/crypt/backend/LICENSE new file mode 100644 index 00000000..43846317 --- /dev/null +++ b/vendor/github.com/xordataexchange/crypt/backend/LICENSE @@ -0,0 +1,9 @@ +The MIT License (MIT) + +Copyright (c) 2014 XOR Data Exchange, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/xordataexchange/crypt/backend/backend.go b/vendor/github.com/xordataexchange/crypt/backend/backend.go new file mode 100644 index 00000000..bfe894dc --- /dev/null +++ b/vendor/github.com/xordataexchange/crypt/backend/backend.go @@ -0,0 +1,32 @@ +// Package backend provides the K/V store interface for crypt backends. +package backend + +// Response represents a response from a backend store. +type Response struct { + Value []byte + Error error +} + +// KVPair holds both a key and value when reading a list. +type KVPair struct { + Key string + Value []byte +} + +type KVPairs []*KVPair + +// A Store is a K/V store backend that retrieves and sets, and monitors +// data in a K/V store. +type Store interface { + // Get retrieves a value from a K/V store for the provided key. + Get(key string) ([]byte, error) + + // List retrieves all keys and values under a provided key. + List(key string) (KVPairs, error) + + // Set sets the provided key to value. + Set(key string, value []byte) error + + // Watch monitors a K/V store for changes to key. + Watch(key string, stop chan bool) <-chan *Response +} diff --git a/vendor/github.com/xordataexchange/crypt/backend/consul/consul.go b/vendor/github.com/xordataexchange/crypt/backend/consul/consul.go new file mode 100644 index 00000000..cf85ed53 --- /dev/null +++ b/vendor/github.com/xordataexchange/crypt/backend/consul/consul.go @@ -0,0 +1,87 @@ +package consul + +import ( + "fmt" + "strings" + "time" + + "github.com/xordataexchange/crypt/backend" + + "github.com/armon/consul-api" +) + +type Client struct { + client *consulapi.KV + waitIndex uint64 +} + +func New(machines []string) (*Client, error) { + conf := consulapi.DefaultConfig() + if len(machines) > 0 { + conf.Address = machines[0] + } + client, err := consulapi.NewClient(conf) + if err != nil { + return nil, err + } + return &Client{client.KV(), 0}, nil +} + +func (c *Client) Get(key string) ([]byte, error) { + kv, _, err := c.client.Get(key, nil) + if err != nil { + return nil, err + } + if kv == nil { + return nil, fmt.Errorf("Key ( %s ) was not found.", key) + } + return kv.Value, nil +} + +func (c *Client) List(key string) (backend.KVPairs, error) { + pairs, _, err := c.client.List(key, nil) + if err != nil { + return nil, err + } + if err != nil { + return nil, err + } + ret := make(backend.KVPairs, len(pairs), len(pairs)) + for i, kv := range pairs { + ret[i] = &backend.KVPair{Key: kv.Key, Value: kv.Value} + } + return ret, nil +} + +func (c *Client) Set(key string, value []byte) error { + key = strings.TrimPrefix(key, "/") + kv := &consulapi.KVPair{ + Key: key, + Value: value, + } + _, err := c.client.Put(kv, nil) + return err +} + +func (c *Client) Watch(key string, stop chan bool) <-chan *backend.Response { + respChan := make(chan *backend.Response, 0) + go func() { + for { + opts := consulapi.QueryOptions{ + WaitIndex: c.waitIndex, + } + keypair, meta, err := c.client.Get(key, &opts) + if keypair == nil && err == nil { + err = fmt.Errorf("Key ( %s ) was not found.", key) + } + if err != nil { + respChan <- &backend.Response{nil, err} + time.Sleep(time.Second * 5) + continue + } + c.waitIndex = meta.LastIndex + respChan <- &backend.Response{keypair.Value, nil} + } + }() + return respChan +} diff --git a/vendor/github.com/xordataexchange/crypt/backend/etcd/etcd.go b/vendor/github.com/xordataexchange/crypt/backend/etcd/etcd.go new file mode 100644 index 00000000..18f35510 --- /dev/null +++ b/vendor/github.com/xordataexchange/crypt/backend/etcd/etcd.go @@ -0,0 +1,116 @@ +package etcd + +import ( + "context" + "errors" + "fmt" + "time" + + "github.com/xordataexchange/crypt/backend" + + goetcd "github.com/coreos/etcd/client" +) + +type Client struct { + client goetcd.Client + keysAPI goetcd.KeysAPI + waitIndex uint64 +} + +func New(machines []string) (*Client, error) { + newClient, err := goetcd.New(goetcd.Config{ + Endpoints: machines, + }) + if err != nil { + return nil, fmt.Errorf("creating new etcd client for crypt.backend.Client: %v", err) + } + keysAPI := goetcd.NewKeysAPI(newClient) + return &Client{client: newClient, keysAPI: keysAPI, waitIndex: 0}, nil +} + +func (c *Client) Get(key string) ([]byte, error) { + return c.GetWithContext(context.TODO(), key) +} + +func (c *Client) GetWithContext(ctx context.Context, key string) ([]byte, error) { + resp, err := c.keysAPI.Get(ctx, key, nil) + if err != nil { + return nil, err + } + return []byte(resp.Node.Value), nil +} + +func addKVPairs(node *goetcd.Node, list backend.KVPairs) backend.KVPairs { + if node.Dir { + for _, n := range node.Nodes { + list = addKVPairs(n, list) + } + return list + } + return append(list, &backend.KVPair{Key: node.Key, Value: []byte(node.Value)}) +} + +func (c *Client) List(key string) (backend.KVPairs, error) { + return c.ListWithContext(context.TODO(), key) +} + +func (c *Client) ListWithContext(ctx context.Context, key string) (backend.KVPairs, error) { + resp, err := c.keysAPI.Get(ctx, key, nil) + if err != nil { + return nil, err + } + if !resp.Node.Dir { + return nil, errors.New("key is not a directory") + } + list := addKVPairs(resp.Node, nil) + return list, nil +} + +func (c *Client) Set(key string, value []byte) error { + return c.SetWithContext(context.TODO(), key, value) +} + +func (c *Client) SetWithContext(ctx context.Context, key string, value []byte) error { + _, err := c.keysAPI.Set(ctx, key, string(value), nil) + return err +} + +func (c *Client) Watch(key string, stop chan bool) <-chan *backend.Response { + return c.WatchWithContext(context.Background(), key, stop) +} + +func (c *Client) WatchWithContext(ctx context.Context, key string, stop chan bool) <-chan *backend.Response { + respChan := make(chan *backend.Response, 0) + go func() { + watcher := c.keysAPI.Watcher(key, nil) + ctx, cancel := context.WithCancel(ctx) + go func() { + <-stop + cancel() + }() + for { + var resp *goetcd.Response + var err error + // if c.waitIndex == 0 { + // resp, err = c.client.Get(key, false, false) + // if err != nil { + // respChan <- &backend.Response{nil, err} + // time.Sleep(time.Second * 5) + // continue + // } + // c.waitIndex = resp.EtcdIndex + // respChan <- &backend.Response{[]byte(resp.Node.Value), nil} + // } + // resp, err = c.client.Watch(key, c.waitIndex+1, false, nil, stop) + resp, err = watcher.Next(ctx) + if err != nil { + respChan <- &backend.Response{nil, err} + time.Sleep(time.Second * 5) + continue + } + c.waitIndex = resp.Node.ModifiedIndex + respChan <- &backend.Response{[]byte(resp.Node.Value), nil} + } + }() + return respChan +} diff --git a/vendor/github.com/xordataexchange/crypt/backend/mock/mock.go b/vendor/github.com/xordataexchange/crypt/backend/mock/mock.go new file mode 100644 index 00000000..68a9b1c7 --- /dev/null +++ b/vendor/github.com/xordataexchange/crypt/backend/mock/mock.go @@ -0,0 +1,61 @@ +package mock + +import ( + "errors" + "path" + "strings" + "time" + + "github.com/xordataexchange/crypt/backend" +) + +var mockedStore map[string][]byte + +type Client struct{} + +func New(machines []string) (*Client, error) { + if mockedStore == nil { + mockedStore = make(map[string][]byte, 2) + } + return &Client{}, nil +} + +func (c *Client) Get(key string) ([]byte, error) { + if v, ok := mockedStore[key]; ok { + return v, nil + } + err := errors.New("Could not find key: " + key) + return nil, err +} + +func (c *Client) List(key string) (backend.KVPairs, error) { + var list backend.KVPairs + dir := path.Clean(key) + "/" + for k, v := range mockedStore { + if strings.HasPrefix(k, dir) { + list = append(list, &backend.KVPair{Key: k, Value: v}) + } + } + return list, nil +} + +func (c *Client) Set(key string, value []byte) error { + mockedStore[key] = value + return nil +} + +func (c *Client) Watch(key string, stop chan bool) <-chan *backend.Response { + respChan := make(chan *backend.Response, 0) + go func() { + for { + b, err := c.Get(key) + if err != nil { + respChan <- &backend.Response{nil, err} + time.Sleep(time.Second * 5) + continue + } + respChan <- &backend.Response{b, nil} + } + }() + return respChan +} diff --git a/vendor/github.com/xordataexchange/crypt/config/LICENSE b/vendor/github.com/xordataexchange/crypt/config/LICENSE new file mode 100644 index 00000000..43846317 --- /dev/null +++ b/vendor/github.com/xordataexchange/crypt/config/LICENSE @@ -0,0 +1,9 @@ +The MIT License (MIT) + +Copyright (c) 2014 XOR Data Exchange, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/xordataexchange/crypt/config/config.go b/vendor/github.com/xordataexchange/crypt/config/config.go new file mode 100644 index 00000000..30864ae9 --- /dev/null +++ b/vendor/github.com/xordataexchange/crypt/config/config.go @@ -0,0 +1,201 @@ +package config + +import ( + "bytes" + "io" + "io/ioutil" + + "github.com/xordataexchange/crypt/backend" + "github.com/xordataexchange/crypt/backend/consul" + "github.com/xordataexchange/crypt/backend/etcd" + "github.com/xordataexchange/crypt/encoding/secconf" +) + +type KVPair struct { + backend.KVPair +} + +type KVPairs []*KVPair + +type configManager struct { + keystore []byte + store backend.Store +} + +// A ConfigManager retrieves and decrypts configuration from a key/value store. +type ConfigManager interface { + Get(key string) ([]byte, error) + List(key string) (KVPairs, error) + Set(key string, value []byte) error + Watch(key string, stop chan bool) <-chan *Response +} + +type standardConfigManager struct { + store backend.Store +} + +func NewStandardConfigManager(client backend.Store) (ConfigManager, error) { + return standardConfigManager{client}, nil +} + +func NewConfigManager(client backend.Store, keystore io.Reader) (ConfigManager, error) { + bytes, err := ioutil.ReadAll(keystore) + if err != nil { + return nil, err + } + return configManager{bytes, client}, nil +} + +// NewStandardEtcdConfigManager returns a new ConfigManager backed by etcd. +func NewStandardEtcdConfigManager(machines []string) (ConfigManager, error) { + store, err := etcd.New(machines) + if err != nil { + return nil, err + } + + return NewStandardConfigManager(store) +} + +// NewStandardConsulConfigManager returns a new ConfigManager backed by consul. +func NewStandardConsulConfigManager(machines []string) (ConfigManager, error) { + store, err := consul.New(machines) + if err != nil { + return nil, err + } + return NewStandardConfigManager(store) +} + +// NewEtcdConfigManager returns a new ConfigManager backed by etcd. +// Data will be encrypted. +func NewEtcdConfigManager(machines []string, keystore io.Reader) (ConfigManager, error) { + store, err := etcd.New(machines) + if err != nil { + return nil, err + } + return NewConfigManager(store, keystore) +} + +// NewConsulConfigManager returns a new ConfigManager backed by consul. +// Data will be encrypted. +func NewConsulConfigManager(machines []string, keystore io.Reader) (ConfigManager, error) { + store, err := consul.New(machines) + if err != nil { + return nil, err + } + return NewConfigManager(store, keystore) +} + +// Get retrieves and decodes a secconf value stored at key. +func (c configManager) Get(key string) ([]byte, error) { + value, err := c.store.Get(key) + if err != nil { + return nil, err + } + return secconf.Decode(value, bytes.NewBuffer(c.keystore)) +} + +// Get retrieves a value stored at key. +// convenience function, no additional value provided over +// `etcdctl` +func (c standardConfigManager) Get(key string) ([]byte, error) { + value, err := c.store.Get(key) + if err != nil { + return nil, err + } + return value, err +} + +// List retrieves and decodes all secconf value stored under key. +func (c configManager) List(key string) (KVPairs, error) { + list, err := c.store.List(key) + retList := make(KVPairs, len(list)) + if err != nil { + return nil, err + } + for i, kv := range list { + retList[i].Key = kv.Key + retList[i].Value, err = secconf.Decode(kv.Value, bytes.NewBuffer(c.keystore)) + if err != nil { + return nil, err + } + } + return retList, nil +} + +// List retrieves all values under key. +// convenience function, no additional value provided over +// `etcdctl` +func (c standardConfigManager) List(key string) (KVPairs, error) { + list, err := c.store.List(key) + retList := make(KVPairs, len(list)) + if err != nil { + return nil, err + } + for i, kv := range list { + retList[i].Key = kv.Key + retList[i].Value = kv.Value + } + return retList, err +} + +// Set will put a key/value into the data store +// and encode it with secconf +func (c configManager) Set(key string, value []byte) error { + encodedValue, err := secconf.Encode(value, bytes.NewBuffer(c.keystore)) + if err == nil { + err = c.store.Set(key, encodedValue) + } + return err +} + +// Set will put a key/value into the data store +func (c standardConfigManager) Set(key string, value []byte) error { + err := c.store.Set(key, value) + return err +} + +type Response struct { + Value []byte + Error error +} + +func (c configManager) Watch(key string, stop chan bool) <-chan *Response { + resp := make(chan *Response, 0) + backendResp := c.store.Watch(key, stop) + go func() { + for { + select { + case <-stop: + return + case r := <-backendResp: + if r.Error != nil { + resp <- &Response{nil, r.Error} + continue + } + value, err := secconf.Decode(r.Value, bytes.NewBuffer(c.keystore)) + resp <- &Response{value, err} + } + } + }() + return resp +} + +func (c standardConfigManager) Watch(key string, stop chan bool) <-chan *Response { + resp := make(chan *Response, 0) + backendResp := c.store.Watch(key, stop) + go func() { + for { + select { + case <-stop: + return + case r := <-backendResp: + if r.Error != nil { + resp <- &Response{nil, r.Error} + continue + } + resp <- &Response{r.Value, nil} + } + } + }() + return resp +} diff --git a/vendor/github.com/xordataexchange/crypt/encoding/secconf/LICENSE b/vendor/github.com/xordataexchange/crypt/encoding/secconf/LICENSE new file mode 100644 index 00000000..43846317 --- /dev/null +++ b/vendor/github.com/xordataexchange/crypt/encoding/secconf/LICENSE @@ -0,0 +1,9 @@ +The MIT License (MIT) + +Copyright (c) 2014 XOR Data Exchange, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/xordataexchange/crypt/encoding/secconf/secconf.go b/vendor/github.com/xordataexchange/crypt/encoding/secconf/secconf.go new file mode 100644 index 00000000..6e94565d --- /dev/null +++ b/vendor/github.com/xordataexchange/crypt/encoding/secconf/secconf.go @@ -0,0 +1,68 @@ +// Package secconf implements secconf encoding as specified in the following +// format: +// +// base64(gpg(gzip(data))) +// +package secconf + +import ( + "bytes" + "compress/gzip" + "encoding/base64" + "io" + "io/ioutil" + + "golang.org/x/crypto/openpgp" +) + +// Deocde decodes data using the secconf codec. +func Decode(data []byte, secertKeyring io.Reader) ([]byte, error) { + decoder := base64.NewDecoder(base64.StdEncoding, bytes.NewBuffer(data)) + entityList, err := openpgp.ReadArmoredKeyRing(secertKeyring) + if err != nil { + return nil, err + } + md, err := openpgp.ReadMessage(decoder, entityList, nil, nil) + if err != nil { + return nil, err + } + gzReader, err := gzip.NewReader(md.UnverifiedBody) + if err != nil { + return nil, err + } + defer gzReader.Close() + bytes, err := ioutil.ReadAll(gzReader) + if err != nil { + return nil, err + } + return bytes, nil +} + +// Encode encodes data to a base64 encoded using the secconf codec. +// data is encrypted with all public keys found in the supplied keyring. +func Encode(data []byte, keyring io.Reader) ([]byte, error) { + entityList, err := openpgp.ReadArmoredKeyRing(keyring) + if err != nil { + return nil, err + } + buffer := new(bytes.Buffer) + encoder := base64.NewEncoder(base64.StdEncoding, buffer) + pgpWriter, err := openpgp.Encrypt(encoder, entityList, nil, nil, nil) + if err != nil { + return nil, err + } + gzWriter := gzip.NewWriter(pgpWriter) + if _, err := gzWriter.Write(data); err != nil { + return nil, err + } + if err := gzWriter.Close(); err != nil { + return nil, err + } + if err := pgpWriter.Close(); err != nil { + return nil, err + } + if err := encoder.Close(); err != nil { + return nil, err + } + return buffer.Bytes(), nil +} diff --git a/vendor/golang.org/x/crypto/cast5/LICENSE b/vendor/golang.org/x/crypto/cast5/LICENSE new file mode 100644 index 00000000..6a66aea5 --- /dev/null +++ b/vendor/golang.org/x/crypto/cast5/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/crypto/cast5/cast5.go b/vendor/golang.org/x/crypto/cast5/cast5.go new file mode 100644 index 00000000..0b4af37b --- /dev/null +++ b/vendor/golang.org/x/crypto/cast5/cast5.go @@ -0,0 +1,526 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package cast5 implements CAST5, as defined in RFC 2144. CAST5 is a common +// OpenPGP cipher. +package cast5 // import "golang.org/x/crypto/cast5" + +import "errors" + +const BlockSize = 8 +const KeySize = 16 + +type Cipher struct { + masking [16]uint32 + rotate [16]uint8 +} + +func NewCipher(key []byte) (c *Cipher, err error) { + if len(key) != KeySize { + return nil, errors.New("CAST5: keys must be 16 bytes") + } + + c = new(Cipher) + c.keySchedule(key) + return +} + +func (c *Cipher) BlockSize() int { + return BlockSize +} + +func (c *Cipher) Encrypt(dst, src []byte) { + l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3]) + r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7]) + + l, r = r, l^f1(r, c.masking[0], c.rotate[0]) + l, r = r, l^f2(r, c.masking[1], c.rotate[1]) + l, r = r, l^f3(r, c.masking[2], c.rotate[2]) + l, r = r, l^f1(r, c.masking[3], c.rotate[3]) + + l, r = r, l^f2(r, c.masking[4], c.rotate[4]) + l, r = r, l^f3(r, c.masking[5], c.rotate[5]) + l, r = r, l^f1(r, c.masking[6], c.rotate[6]) + l, r = r, l^f2(r, c.masking[7], c.rotate[7]) + + l, r = r, l^f3(r, c.masking[8], c.rotate[8]) + l, r = r, l^f1(r, c.masking[9], c.rotate[9]) + l, r = r, l^f2(r, c.masking[10], c.rotate[10]) + l, r = r, l^f3(r, c.masking[11], c.rotate[11]) + + l, r = r, l^f1(r, c.masking[12], c.rotate[12]) + l, r = r, l^f2(r, c.masking[13], c.rotate[13]) + l, r = r, l^f3(r, c.masking[14], c.rotate[14]) + l, r = r, l^f1(r, c.masking[15], c.rotate[15]) + + dst[0] = uint8(r >> 24) + dst[1] = uint8(r >> 16) + dst[2] = uint8(r >> 8) + dst[3] = uint8(r) + dst[4] = uint8(l >> 24) + dst[5] = uint8(l >> 16) + dst[6] = uint8(l >> 8) + dst[7] = uint8(l) +} + +func (c *Cipher) Decrypt(dst, src []byte) { + l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3]) + r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7]) + + l, r = r, l^f1(r, c.masking[15], c.rotate[15]) + l, r = r, l^f3(r, c.masking[14], c.rotate[14]) + l, r = r, l^f2(r, c.masking[13], c.rotate[13]) + l, r = r, l^f1(r, c.masking[12], c.rotate[12]) + + l, r = r, l^f3(r, c.masking[11], c.rotate[11]) + l, r = r, l^f2(r, c.masking[10], c.rotate[10]) + l, r = r, l^f1(r, c.masking[9], c.rotate[9]) + l, r = r, l^f3(r, c.masking[8], c.rotate[8]) + + l, r = r, l^f2(r, c.masking[7], c.rotate[7]) + l, r = r, l^f1(r, c.masking[6], c.rotate[6]) + l, r = r, l^f3(r, c.masking[5], c.rotate[5]) + l, r = r, l^f2(r, c.masking[4], c.rotate[4]) + + l, r = r, l^f1(r, c.masking[3], c.rotate[3]) + l, r = r, l^f3(r, c.masking[2], c.rotate[2]) + l, r = r, l^f2(r, c.masking[1], c.rotate[1]) + l, r = r, l^f1(r, c.masking[0], c.rotate[0]) + + dst[0] = uint8(r >> 24) + dst[1] = uint8(r >> 16) + dst[2] = uint8(r >> 8) + dst[3] = uint8(r) + dst[4] = uint8(l >> 24) + dst[5] = uint8(l >> 16) + dst[6] = uint8(l >> 8) + dst[7] = uint8(l) +} + +type keyScheduleA [4][7]uint8 +type keyScheduleB [4][5]uint8 + +// keyScheduleRound contains the magic values for a round of the key schedule. +// The keyScheduleA deals with the lines like: +// z0z1z2z3 = x0x1x2x3 ^ S5[xD] ^ S6[xF] ^ S7[xC] ^ S8[xE] ^ S7[x8] +// Conceptually, both x and z are in the same array, x first. The first +// element describes which word of this array gets written to and the +// second, which word gets read. So, for the line above, it's "4, 0", because +// it's writing to the first word of z, which, being after x, is word 4, and +// reading from the first word of x: word 0. +// +// Next are the indexes into the S-boxes. Now the array is treated as bytes. So +// "xD" is 0xd. The first byte of z is written as "16 + 0", just to be clear +// that it's z that we're indexing. +// +// keyScheduleB deals with lines like: +// K1 = S5[z8] ^ S6[z9] ^ S7[z7] ^ S8[z6] ^ S5[z2] +// "K1" is ignored because key words are always written in order. So the five +// elements are the S-box indexes. They use the same form as in keyScheduleA, +// above. + +type keyScheduleRound struct{} +type keySchedule []keyScheduleRound + +var schedule = []struct { + a keyScheduleA + b keyScheduleB +}{ + { + keyScheduleA{ + {4, 0, 0xd, 0xf, 0xc, 0xe, 0x8}, + {5, 2, 16 + 0, 16 + 2, 16 + 1, 16 + 3, 0xa}, + {6, 3, 16 + 7, 16 + 6, 16 + 5, 16 + 4, 9}, + {7, 1, 16 + 0xa, 16 + 9, 16 + 0xb, 16 + 8, 0xb}, + }, + keyScheduleB{ + {16 + 8, 16 + 9, 16 + 7, 16 + 6, 16 + 2}, + {16 + 0xa, 16 + 0xb, 16 + 5, 16 + 4, 16 + 6}, + {16 + 0xc, 16 + 0xd, 16 + 3, 16 + 2, 16 + 9}, + {16 + 0xe, 16 + 0xf, 16 + 1, 16 + 0, 16 + 0xc}, + }, + }, + { + keyScheduleA{ + {0, 6, 16 + 5, 16 + 7, 16 + 4, 16 + 6, 16 + 0}, + {1, 4, 0, 2, 1, 3, 16 + 2}, + {2, 5, 7, 6, 5, 4, 16 + 1}, + {3, 7, 0xa, 9, 0xb, 8, 16 + 3}, + }, + keyScheduleB{ + {3, 2, 0xc, 0xd, 8}, + {1, 0, 0xe, 0xf, 0xd}, + {7, 6, 8, 9, 3}, + {5, 4, 0xa, 0xb, 7}, + }, + }, + { + keyScheduleA{ + {4, 0, 0xd, 0xf, 0xc, 0xe, 8}, + {5, 2, 16 + 0, 16 + 2, 16 + 1, 16 + 3, 0xa}, + {6, 3, 16 + 7, 16 + 6, 16 + 5, 16 + 4, 9}, + {7, 1, 16 + 0xa, 16 + 9, 16 + 0xb, 16 + 8, 0xb}, + }, + keyScheduleB{ + {16 + 3, 16 + 2, 16 + 0xc, 16 + 0xd, 16 + 9}, + {16 + 1, 16 + 0, 16 + 0xe, 16 + 0xf, 16 + 0xc}, + {16 + 7, 16 + 6, 16 + 8, 16 + 9, 16 + 2}, + {16 + 5, 16 + 4, 16 + 0xa, 16 + 0xb, 16 + 6}, + }, + }, + { + keyScheduleA{ + {0, 6, 16 + 5, 16 + 7, 16 + 4, 16 + 6, 16 + 0}, + {1, 4, 0, 2, 1, 3, 16 + 2}, + {2, 5, 7, 6, 5, 4, 16 + 1}, + {3, 7, 0xa, 9, 0xb, 8, 16 + 3}, + }, + keyScheduleB{ + {8, 9, 7, 6, 3}, + {0xa, 0xb, 5, 4, 7}, + {0xc, 0xd, 3, 2, 8}, + {0xe, 0xf, 1, 0, 0xd}, + }, + }, +} + +func (c *Cipher) keySchedule(in []byte) { + var t [8]uint32 + var k [32]uint32 + + for i := 0; i < 4; i++ { + j := i * 4 + t[i] = uint32(in[j])<<24 | uint32(in[j+1])<<16 | uint32(in[j+2])<<8 | uint32(in[j+3]) + } + + x := []byte{6, 7, 4, 5} + ki := 0 + + for half := 0; half < 2; half++ { + for _, round := range schedule { + for j := 0; j < 4; j++ { + var a [7]uint8 + copy(a[:], round.a[j][:]) + w := t[a[1]] + w ^= sBox[4][(t[a[2]>>2]>>(24-8*(a[2]&3)))&0xff] + w ^= sBox[5][(t[a[3]>>2]>>(24-8*(a[3]&3)))&0xff] + w ^= sBox[6][(t[a[4]>>2]>>(24-8*(a[4]&3)))&0xff] + w ^= sBox[7][(t[a[5]>>2]>>(24-8*(a[5]&3)))&0xff] + w ^= sBox[x[j]][(t[a[6]>>2]>>(24-8*(a[6]&3)))&0xff] + t[a[0]] = w + } + + for j := 0; j < 4; j++ { + var b [5]uint8 + copy(b[:], round.b[j][:]) + w := sBox[4][(t[b[0]>>2]>>(24-8*(b[0]&3)))&0xff] + w ^= sBox[5][(t[b[1]>>2]>>(24-8*(b[1]&3)))&0xff] + w ^= sBox[6][(t[b[2]>>2]>>(24-8*(b[2]&3)))&0xff] + w ^= sBox[7][(t[b[3]>>2]>>(24-8*(b[3]&3)))&0xff] + w ^= sBox[4+j][(t[b[4]>>2]>>(24-8*(b[4]&3)))&0xff] + k[ki] = w + ki++ + } + } + } + + for i := 0; i < 16; i++ { + c.masking[i] = k[i] + c.rotate[i] = uint8(k[16+i] & 0x1f) + } +} + +// These are the three 'f' functions. See RFC 2144, section 2.2. +func f1(d, m uint32, r uint8) uint32 { + t := m + d + I := (t << r) | (t >> (32 - r)) + return ((sBox[0][I>>24] ^ sBox[1][(I>>16)&0xff]) - sBox[2][(I>>8)&0xff]) + sBox[3][I&0xff] +} + +func f2(d, m uint32, r uint8) uint32 { + t := m ^ d + I := (t << r) | (t >> (32 - r)) + return ((sBox[0][I>>24] - sBox[1][(I>>16)&0xff]) + sBox[2][(I>>8)&0xff]) ^ sBox[3][I&0xff] +} + +func f3(d, m uint32, r uint8) uint32 { + t := m - d + I := (t << r) | (t >> (32 - r)) + return ((sBox[0][I>>24] + sBox[1][(I>>16)&0xff]) ^ sBox[2][(I>>8)&0xff]) - sBox[3][I&0xff] +} + +var sBox = [8][256]uint32{ + { + 0x30fb40d4, 0x9fa0ff0b, 0x6beccd2f, 0x3f258c7a, 0x1e213f2f, 0x9c004dd3, 0x6003e540, 0xcf9fc949, + 0xbfd4af27, 0x88bbbdb5, 0xe2034090, 0x98d09675, 0x6e63a0e0, 0x15c361d2, 0xc2e7661d, 0x22d4ff8e, + 0x28683b6f, 0xc07fd059, 0xff2379c8, 0x775f50e2, 0x43c340d3, 0xdf2f8656, 0x887ca41a, 0xa2d2bd2d, + 0xa1c9e0d6, 0x346c4819, 0x61b76d87, 0x22540f2f, 0x2abe32e1, 0xaa54166b, 0x22568e3a, 0xa2d341d0, + 0x66db40c8, 0xa784392f, 0x004dff2f, 0x2db9d2de, 0x97943fac, 0x4a97c1d8, 0x527644b7, 0xb5f437a7, + 0xb82cbaef, 0xd751d159, 0x6ff7f0ed, 0x5a097a1f, 0x827b68d0, 0x90ecf52e, 0x22b0c054, 0xbc8e5935, + 0x4b6d2f7f, 0x50bb64a2, 0xd2664910, 0xbee5812d, 0xb7332290, 0xe93b159f, 0xb48ee411, 0x4bff345d, + 0xfd45c240, 0xad31973f, 0xc4f6d02e, 0x55fc8165, 0xd5b1caad, 0xa1ac2dae, 0xa2d4b76d, 0xc19b0c50, + 0x882240f2, 0x0c6e4f38, 0xa4e4bfd7, 0x4f5ba272, 0x564c1d2f, 0xc59c5319, 0xb949e354, 0xb04669fe, + 0xb1b6ab8a, 0xc71358dd, 0x6385c545, 0x110f935d, 0x57538ad5, 0x6a390493, 0xe63d37e0, 0x2a54f6b3, + 0x3a787d5f, 0x6276a0b5, 0x19a6fcdf, 0x7a42206a, 0x29f9d4d5, 0xf61b1891, 0xbb72275e, 0xaa508167, + 0x38901091, 0xc6b505eb, 0x84c7cb8c, 0x2ad75a0f, 0x874a1427, 0xa2d1936b, 0x2ad286af, 0xaa56d291, + 0xd7894360, 0x425c750d, 0x93b39e26, 0x187184c9, 0x6c00b32d, 0x73e2bb14, 0xa0bebc3c, 0x54623779, + 0x64459eab, 0x3f328b82, 0x7718cf82, 0x59a2cea6, 0x04ee002e, 0x89fe78e6, 0x3fab0950, 0x325ff6c2, + 0x81383f05, 0x6963c5c8, 0x76cb5ad6, 0xd49974c9, 0xca180dcf, 0x380782d5, 0xc7fa5cf6, 0x8ac31511, + 0x35e79e13, 0x47da91d0, 0xf40f9086, 0xa7e2419e, 0x31366241, 0x051ef495, 0xaa573b04, 0x4a805d8d, + 0x548300d0, 0x00322a3c, 0xbf64cddf, 0xba57a68e, 0x75c6372b, 0x50afd341, 0xa7c13275, 0x915a0bf5, + 0x6b54bfab, 0x2b0b1426, 0xab4cc9d7, 0x449ccd82, 0xf7fbf265, 0xab85c5f3, 0x1b55db94, 0xaad4e324, + 0xcfa4bd3f, 0x2deaa3e2, 0x9e204d02, 0xc8bd25ac, 0xeadf55b3, 0xd5bd9e98, 0xe31231b2, 0x2ad5ad6c, + 0x954329de, 0xadbe4528, 0xd8710f69, 0xaa51c90f, 0xaa786bf6, 0x22513f1e, 0xaa51a79b, 0x2ad344cc, + 0x7b5a41f0, 0xd37cfbad, 0x1b069505, 0x41ece491, 0xb4c332e6, 0x032268d4, 0xc9600acc, 0xce387e6d, + 0xbf6bb16c, 0x6a70fb78, 0x0d03d9c9, 0xd4df39de, 0xe01063da, 0x4736f464, 0x5ad328d8, 0xb347cc96, + 0x75bb0fc3, 0x98511bfb, 0x4ffbcc35, 0xb58bcf6a, 0xe11f0abc, 0xbfc5fe4a, 0xa70aec10, 0xac39570a, + 0x3f04442f, 0x6188b153, 0xe0397a2e, 0x5727cb79, 0x9ceb418f, 0x1cacd68d, 0x2ad37c96, 0x0175cb9d, + 0xc69dff09, 0xc75b65f0, 0xd9db40d8, 0xec0e7779, 0x4744ead4, 0xb11c3274, 0xdd24cb9e, 0x7e1c54bd, + 0xf01144f9, 0xd2240eb1, 0x9675b3fd, 0xa3ac3755, 0xd47c27af, 0x51c85f4d, 0x56907596, 0xa5bb15e6, + 0x580304f0, 0xca042cf1, 0x011a37ea, 0x8dbfaadb, 0x35ba3e4a, 0x3526ffa0, 0xc37b4d09, 0xbc306ed9, + 0x98a52666, 0x5648f725, 0xff5e569d, 0x0ced63d0, 0x7c63b2cf, 0x700b45e1, 0xd5ea50f1, 0x85a92872, + 0xaf1fbda7, 0xd4234870, 0xa7870bf3, 0x2d3b4d79, 0x42e04198, 0x0cd0ede7, 0x26470db8, 0xf881814c, + 0x474d6ad7, 0x7c0c5e5c, 0xd1231959, 0x381b7298, 0xf5d2f4db, 0xab838653, 0x6e2f1e23, 0x83719c9e, + 0xbd91e046, 0x9a56456e, 0xdc39200c, 0x20c8c571, 0x962bda1c, 0xe1e696ff, 0xb141ab08, 0x7cca89b9, + 0x1a69e783, 0x02cc4843, 0xa2f7c579, 0x429ef47d, 0x427b169c, 0x5ac9f049, 0xdd8f0f00, 0x5c8165bf, + }, + { + 0x1f201094, 0xef0ba75b, 0x69e3cf7e, 0x393f4380, 0xfe61cf7a, 0xeec5207a, 0x55889c94, 0x72fc0651, + 0xada7ef79, 0x4e1d7235, 0xd55a63ce, 0xde0436ba, 0x99c430ef, 0x5f0c0794, 0x18dcdb7d, 0xa1d6eff3, + 0xa0b52f7b, 0x59e83605, 0xee15b094, 0xe9ffd909, 0xdc440086, 0xef944459, 0xba83ccb3, 0xe0c3cdfb, + 0xd1da4181, 0x3b092ab1, 0xf997f1c1, 0xa5e6cf7b, 0x01420ddb, 0xe4e7ef5b, 0x25a1ff41, 0xe180f806, + 0x1fc41080, 0x179bee7a, 0xd37ac6a9, 0xfe5830a4, 0x98de8b7f, 0x77e83f4e, 0x79929269, 0x24fa9f7b, + 0xe113c85b, 0xacc40083, 0xd7503525, 0xf7ea615f, 0x62143154, 0x0d554b63, 0x5d681121, 0xc866c359, + 0x3d63cf73, 0xcee234c0, 0xd4d87e87, 0x5c672b21, 0x071f6181, 0x39f7627f, 0x361e3084, 0xe4eb573b, + 0x602f64a4, 0xd63acd9c, 0x1bbc4635, 0x9e81032d, 0x2701f50c, 0x99847ab4, 0xa0e3df79, 0xba6cf38c, + 0x10843094, 0x2537a95e, 0xf46f6ffe, 0xa1ff3b1f, 0x208cfb6a, 0x8f458c74, 0xd9e0a227, 0x4ec73a34, + 0xfc884f69, 0x3e4de8df, 0xef0e0088, 0x3559648d, 0x8a45388c, 0x1d804366, 0x721d9bfd, 0xa58684bb, + 0xe8256333, 0x844e8212, 0x128d8098, 0xfed33fb4, 0xce280ae1, 0x27e19ba5, 0xd5a6c252, 0xe49754bd, + 0xc5d655dd, 0xeb667064, 0x77840b4d, 0xa1b6a801, 0x84db26a9, 0xe0b56714, 0x21f043b7, 0xe5d05860, + 0x54f03084, 0x066ff472, 0xa31aa153, 0xdadc4755, 0xb5625dbf, 0x68561be6, 0x83ca6b94, 0x2d6ed23b, + 0xeccf01db, 0xa6d3d0ba, 0xb6803d5c, 0xaf77a709, 0x33b4a34c, 0x397bc8d6, 0x5ee22b95, 0x5f0e5304, + 0x81ed6f61, 0x20e74364, 0xb45e1378, 0xde18639b, 0x881ca122, 0xb96726d1, 0x8049a7e8, 0x22b7da7b, + 0x5e552d25, 0x5272d237, 0x79d2951c, 0xc60d894c, 0x488cb402, 0x1ba4fe5b, 0xa4b09f6b, 0x1ca815cf, + 0xa20c3005, 0x8871df63, 0xb9de2fcb, 0x0cc6c9e9, 0x0beeff53, 0xe3214517, 0xb4542835, 0x9f63293c, + 0xee41e729, 0x6e1d2d7c, 0x50045286, 0x1e6685f3, 0xf33401c6, 0x30a22c95, 0x31a70850, 0x60930f13, + 0x73f98417, 0xa1269859, 0xec645c44, 0x52c877a9, 0xcdff33a6, 0xa02b1741, 0x7cbad9a2, 0x2180036f, + 0x50d99c08, 0xcb3f4861, 0xc26bd765, 0x64a3f6ab, 0x80342676, 0x25a75e7b, 0xe4e6d1fc, 0x20c710e6, + 0xcdf0b680, 0x17844d3b, 0x31eef84d, 0x7e0824e4, 0x2ccb49eb, 0x846a3bae, 0x8ff77888, 0xee5d60f6, + 0x7af75673, 0x2fdd5cdb, 0xa11631c1, 0x30f66f43, 0xb3faec54, 0x157fd7fa, 0xef8579cc, 0xd152de58, + 0xdb2ffd5e, 0x8f32ce19, 0x306af97a, 0x02f03ef8, 0x99319ad5, 0xc242fa0f, 0xa7e3ebb0, 0xc68e4906, + 0xb8da230c, 0x80823028, 0xdcdef3c8, 0xd35fb171, 0x088a1bc8, 0xbec0c560, 0x61a3c9e8, 0xbca8f54d, + 0xc72feffa, 0x22822e99, 0x82c570b4, 0xd8d94e89, 0x8b1c34bc, 0x301e16e6, 0x273be979, 0xb0ffeaa6, + 0x61d9b8c6, 0x00b24869, 0xb7ffce3f, 0x08dc283b, 0x43daf65a, 0xf7e19798, 0x7619b72f, 0x8f1c9ba4, + 0xdc8637a0, 0x16a7d3b1, 0x9fc393b7, 0xa7136eeb, 0xc6bcc63e, 0x1a513742, 0xef6828bc, 0x520365d6, + 0x2d6a77ab, 0x3527ed4b, 0x821fd216, 0x095c6e2e, 0xdb92f2fb, 0x5eea29cb, 0x145892f5, 0x91584f7f, + 0x5483697b, 0x2667a8cc, 0x85196048, 0x8c4bacea, 0x833860d4, 0x0d23e0f9, 0x6c387e8a, 0x0ae6d249, + 0xb284600c, 0xd835731d, 0xdcb1c647, 0xac4c56ea, 0x3ebd81b3, 0x230eabb0, 0x6438bc87, 0xf0b5b1fa, + 0x8f5ea2b3, 0xfc184642, 0x0a036b7a, 0x4fb089bd, 0x649da589, 0xa345415e, 0x5c038323, 0x3e5d3bb9, + 0x43d79572, 0x7e6dd07c, 0x06dfdf1e, 0x6c6cc4ef, 0x7160a539, 0x73bfbe70, 0x83877605, 0x4523ecf1, + }, + { + 0x8defc240, 0x25fa5d9f, 0xeb903dbf, 0xe810c907, 0x47607fff, 0x369fe44b, 0x8c1fc644, 0xaececa90, + 0xbeb1f9bf, 0xeefbcaea, 0xe8cf1950, 0x51df07ae, 0x920e8806, 0xf0ad0548, 0xe13c8d83, 0x927010d5, + 0x11107d9f, 0x07647db9, 0xb2e3e4d4, 0x3d4f285e, 0xb9afa820, 0xfade82e0, 0xa067268b, 0x8272792e, + 0x553fb2c0, 0x489ae22b, 0xd4ef9794, 0x125e3fbc, 0x21fffcee, 0x825b1bfd, 0x9255c5ed, 0x1257a240, + 0x4e1a8302, 0xbae07fff, 0x528246e7, 0x8e57140e, 0x3373f7bf, 0x8c9f8188, 0xa6fc4ee8, 0xc982b5a5, + 0xa8c01db7, 0x579fc264, 0x67094f31, 0xf2bd3f5f, 0x40fff7c1, 0x1fb78dfc, 0x8e6bd2c1, 0x437be59b, + 0x99b03dbf, 0xb5dbc64b, 0x638dc0e6, 0x55819d99, 0xa197c81c, 0x4a012d6e, 0xc5884a28, 0xccc36f71, + 0xb843c213, 0x6c0743f1, 0x8309893c, 0x0feddd5f, 0x2f7fe850, 0xd7c07f7e, 0x02507fbf, 0x5afb9a04, + 0xa747d2d0, 0x1651192e, 0xaf70bf3e, 0x58c31380, 0x5f98302e, 0x727cc3c4, 0x0a0fb402, 0x0f7fef82, + 0x8c96fdad, 0x5d2c2aae, 0x8ee99a49, 0x50da88b8, 0x8427f4a0, 0x1eac5790, 0x796fb449, 0x8252dc15, + 0xefbd7d9b, 0xa672597d, 0xada840d8, 0x45f54504, 0xfa5d7403, 0xe83ec305, 0x4f91751a, 0x925669c2, + 0x23efe941, 0xa903f12e, 0x60270df2, 0x0276e4b6, 0x94fd6574, 0x927985b2, 0x8276dbcb, 0x02778176, + 0xf8af918d, 0x4e48f79e, 0x8f616ddf, 0xe29d840e, 0x842f7d83, 0x340ce5c8, 0x96bbb682, 0x93b4b148, + 0xef303cab, 0x984faf28, 0x779faf9b, 0x92dc560d, 0x224d1e20, 0x8437aa88, 0x7d29dc96, 0x2756d3dc, + 0x8b907cee, 0xb51fd240, 0xe7c07ce3, 0xe566b4a1, 0xc3e9615e, 0x3cf8209d, 0x6094d1e3, 0xcd9ca341, + 0x5c76460e, 0x00ea983b, 0xd4d67881, 0xfd47572c, 0xf76cedd9, 0xbda8229c, 0x127dadaa, 0x438a074e, + 0x1f97c090, 0x081bdb8a, 0x93a07ebe, 0xb938ca15, 0x97b03cff, 0x3dc2c0f8, 0x8d1ab2ec, 0x64380e51, + 0x68cc7bfb, 0xd90f2788, 0x12490181, 0x5de5ffd4, 0xdd7ef86a, 0x76a2e214, 0xb9a40368, 0x925d958f, + 0x4b39fffa, 0xba39aee9, 0xa4ffd30b, 0xfaf7933b, 0x6d498623, 0x193cbcfa, 0x27627545, 0x825cf47a, + 0x61bd8ba0, 0xd11e42d1, 0xcead04f4, 0x127ea392, 0x10428db7, 0x8272a972, 0x9270c4a8, 0x127de50b, + 0x285ba1c8, 0x3c62f44f, 0x35c0eaa5, 0xe805d231, 0x428929fb, 0xb4fcdf82, 0x4fb66a53, 0x0e7dc15b, + 0x1f081fab, 0x108618ae, 0xfcfd086d, 0xf9ff2889, 0x694bcc11, 0x236a5cae, 0x12deca4d, 0x2c3f8cc5, + 0xd2d02dfe, 0xf8ef5896, 0xe4cf52da, 0x95155b67, 0x494a488c, 0xb9b6a80c, 0x5c8f82bc, 0x89d36b45, + 0x3a609437, 0xec00c9a9, 0x44715253, 0x0a874b49, 0xd773bc40, 0x7c34671c, 0x02717ef6, 0x4feb5536, + 0xa2d02fff, 0xd2bf60c4, 0xd43f03c0, 0x50b4ef6d, 0x07478cd1, 0x006e1888, 0xa2e53f55, 0xb9e6d4bc, + 0xa2048016, 0x97573833, 0xd7207d67, 0xde0f8f3d, 0x72f87b33, 0xabcc4f33, 0x7688c55d, 0x7b00a6b0, + 0x947b0001, 0x570075d2, 0xf9bb88f8, 0x8942019e, 0x4264a5ff, 0x856302e0, 0x72dbd92b, 0xee971b69, + 0x6ea22fde, 0x5f08ae2b, 0xaf7a616d, 0xe5c98767, 0xcf1febd2, 0x61efc8c2, 0xf1ac2571, 0xcc8239c2, + 0x67214cb8, 0xb1e583d1, 0xb7dc3e62, 0x7f10bdce, 0xf90a5c38, 0x0ff0443d, 0x606e6dc6, 0x60543a49, + 0x5727c148, 0x2be98a1d, 0x8ab41738, 0x20e1be24, 0xaf96da0f, 0x68458425, 0x99833be5, 0x600d457d, + 0x282f9350, 0x8334b362, 0xd91d1120, 0x2b6d8da0, 0x642b1e31, 0x9c305a00, 0x52bce688, 0x1b03588a, + 0xf7baefd5, 0x4142ed9c, 0xa4315c11, 0x83323ec5, 0xdfef4636, 0xa133c501, 0xe9d3531c, 0xee353783, + }, + { + 0x9db30420, 0x1fb6e9de, 0xa7be7bef, 0xd273a298, 0x4a4f7bdb, 0x64ad8c57, 0x85510443, 0xfa020ed1, + 0x7e287aff, 0xe60fb663, 0x095f35a1, 0x79ebf120, 0xfd059d43, 0x6497b7b1, 0xf3641f63, 0x241e4adf, + 0x28147f5f, 0x4fa2b8cd, 0xc9430040, 0x0cc32220, 0xfdd30b30, 0xc0a5374f, 0x1d2d00d9, 0x24147b15, + 0xee4d111a, 0x0fca5167, 0x71ff904c, 0x2d195ffe, 0x1a05645f, 0x0c13fefe, 0x081b08ca, 0x05170121, + 0x80530100, 0xe83e5efe, 0xac9af4f8, 0x7fe72701, 0xd2b8ee5f, 0x06df4261, 0xbb9e9b8a, 0x7293ea25, + 0xce84ffdf, 0xf5718801, 0x3dd64b04, 0xa26f263b, 0x7ed48400, 0x547eebe6, 0x446d4ca0, 0x6cf3d6f5, + 0x2649abdf, 0xaea0c7f5, 0x36338cc1, 0x503f7e93, 0xd3772061, 0x11b638e1, 0x72500e03, 0xf80eb2bb, + 0xabe0502e, 0xec8d77de, 0x57971e81, 0xe14f6746, 0xc9335400, 0x6920318f, 0x081dbb99, 0xffc304a5, + 0x4d351805, 0x7f3d5ce3, 0xa6c866c6, 0x5d5bcca9, 0xdaec6fea, 0x9f926f91, 0x9f46222f, 0x3991467d, + 0xa5bf6d8e, 0x1143c44f, 0x43958302, 0xd0214eeb, 0x022083b8, 0x3fb6180c, 0x18f8931e, 0x281658e6, + 0x26486e3e, 0x8bd78a70, 0x7477e4c1, 0xb506e07c, 0xf32d0a25, 0x79098b02, 0xe4eabb81, 0x28123b23, + 0x69dead38, 0x1574ca16, 0xdf871b62, 0x211c40b7, 0xa51a9ef9, 0x0014377b, 0x041e8ac8, 0x09114003, + 0xbd59e4d2, 0xe3d156d5, 0x4fe876d5, 0x2f91a340, 0x557be8de, 0x00eae4a7, 0x0ce5c2ec, 0x4db4bba6, + 0xe756bdff, 0xdd3369ac, 0xec17b035, 0x06572327, 0x99afc8b0, 0x56c8c391, 0x6b65811c, 0x5e146119, + 0x6e85cb75, 0xbe07c002, 0xc2325577, 0x893ff4ec, 0x5bbfc92d, 0xd0ec3b25, 0xb7801ab7, 0x8d6d3b24, + 0x20c763ef, 0xc366a5fc, 0x9c382880, 0x0ace3205, 0xaac9548a, 0xeca1d7c7, 0x041afa32, 0x1d16625a, + 0x6701902c, 0x9b757a54, 0x31d477f7, 0x9126b031, 0x36cc6fdb, 0xc70b8b46, 0xd9e66a48, 0x56e55a79, + 0x026a4ceb, 0x52437eff, 0x2f8f76b4, 0x0df980a5, 0x8674cde3, 0xedda04eb, 0x17a9be04, 0x2c18f4df, + 0xb7747f9d, 0xab2af7b4, 0xefc34d20, 0x2e096b7c, 0x1741a254, 0xe5b6a035, 0x213d42f6, 0x2c1c7c26, + 0x61c2f50f, 0x6552daf9, 0xd2c231f8, 0x25130f69, 0xd8167fa2, 0x0418f2c8, 0x001a96a6, 0x0d1526ab, + 0x63315c21, 0x5e0a72ec, 0x49bafefd, 0x187908d9, 0x8d0dbd86, 0x311170a7, 0x3e9b640c, 0xcc3e10d7, + 0xd5cad3b6, 0x0caec388, 0xf73001e1, 0x6c728aff, 0x71eae2a1, 0x1f9af36e, 0xcfcbd12f, 0xc1de8417, + 0xac07be6b, 0xcb44a1d8, 0x8b9b0f56, 0x013988c3, 0xb1c52fca, 0xb4be31cd, 0xd8782806, 0x12a3a4e2, + 0x6f7de532, 0x58fd7eb6, 0xd01ee900, 0x24adffc2, 0xf4990fc5, 0x9711aac5, 0x001d7b95, 0x82e5e7d2, + 0x109873f6, 0x00613096, 0xc32d9521, 0xada121ff, 0x29908415, 0x7fbb977f, 0xaf9eb3db, 0x29c9ed2a, + 0x5ce2a465, 0xa730f32c, 0xd0aa3fe8, 0x8a5cc091, 0xd49e2ce7, 0x0ce454a9, 0xd60acd86, 0x015f1919, + 0x77079103, 0xdea03af6, 0x78a8565e, 0xdee356df, 0x21f05cbe, 0x8b75e387, 0xb3c50651, 0xb8a5c3ef, + 0xd8eeb6d2, 0xe523be77, 0xc2154529, 0x2f69efdf, 0xafe67afb, 0xf470c4b2, 0xf3e0eb5b, 0xd6cc9876, + 0x39e4460c, 0x1fda8538, 0x1987832f, 0xca007367, 0xa99144f8, 0x296b299e, 0x492fc295, 0x9266beab, + 0xb5676e69, 0x9bd3ddda, 0xdf7e052f, 0xdb25701c, 0x1b5e51ee, 0xf65324e6, 0x6afce36c, 0x0316cc04, + 0x8644213e, 0xb7dc59d0, 0x7965291f, 0xccd6fd43, 0x41823979, 0x932bcdf6, 0xb657c34d, 0x4edfd282, + 0x7ae5290c, 0x3cb9536b, 0x851e20fe, 0x9833557e, 0x13ecf0b0, 0xd3ffb372, 0x3f85c5c1, 0x0aef7ed2, + }, + { + 0x7ec90c04, 0x2c6e74b9, 0x9b0e66df, 0xa6337911, 0xb86a7fff, 0x1dd358f5, 0x44dd9d44, 0x1731167f, + 0x08fbf1fa, 0xe7f511cc, 0xd2051b00, 0x735aba00, 0x2ab722d8, 0x386381cb, 0xacf6243a, 0x69befd7a, + 0xe6a2e77f, 0xf0c720cd, 0xc4494816, 0xccf5c180, 0x38851640, 0x15b0a848, 0xe68b18cb, 0x4caadeff, + 0x5f480a01, 0x0412b2aa, 0x259814fc, 0x41d0efe2, 0x4e40b48d, 0x248eb6fb, 0x8dba1cfe, 0x41a99b02, + 0x1a550a04, 0xba8f65cb, 0x7251f4e7, 0x95a51725, 0xc106ecd7, 0x97a5980a, 0xc539b9aa, 0x4d79fe6a, + 0xf2f3f763, 0x68af8040, 0xed0c9e56, 0x11b4958b, 0xe1eb5a88, 0x8709e6b0, 0xd7e07156, 0x4e29fea7, + 0x6366e52d, 0x02d1c000, 0xc4ac8e05, 0x9377f571, 0x0c05372a, 0x578535f2, 0x2261be02, 0xd642a0c9, + 0xdf13a280, 0x74b55bd2, 0x682199c0, 0xd421e5ec, 0x53fb3ce8, 0xc8adedb3, 0x28a87fc9, 0x3d959981, + 0x5c1ff900, 0xfe38d399, 0x0c4eff0b, 0x062407ea, 0xaa2f4fb1, 0x4fb96976, 0x90c79505, 0xb0a8a774, + 0xef55a1ff, 0xe59ca2c2, 0xa6b62d27, 0xe66a4263, 0xdf65001f, 0x0ec50966, 0xdfdd55bc, 0x29de0655, + 0x911e739a, 0x17af8975, 0x32c7911c, 0x89f89468, 0x0d01e980, 0x524755f4, 0x03b63cc9, 0x0cc844b2, + 0xbcf3f0aa, 0x87ac36e9, 0xe53a7426, 0x01b3d82b, 0x1a9e7449, 0x64ee2d7e, 0xcddbb1da, 0x01c94910, + 0xb868bf80, 0x0d26f3fd, 0x9342ede7, 0x04a5c284, 0x636737b6, 0x50f5b616, 0xf24766e3, 0x8eca36c1, + 0x136e05db, 0xfef18391, 0xfb887a37, 0xd6e7f7d4, 0xc7fb7dc9, 0x3063fcdf, 0xb6f589de, 0xec2941da, + 0x26e46695, 0xb7566419, 0xf654efc5, 0xd08d58b7, 0x48925401, 0xc1bacb7f, 0xe5ff550f, 0xb6083049, + 0x5bb5d0e8, 0x87d72e5a, 0xab6a6ee1, 0x223a66ce, 0xc62bf3cd, 0x9e0885f9, 0x68cb3e47, 0x086c010f, + 0xa21de820, 0xd18b69de, 0xf3f65777, 0xfa02c3f6, 0x407edac3, 0xcbb3d550, 0x1793084d, 0xb0d70eba, + 0x0ab378d5, 0xd951fb0c, 0xded7da56, 0x4124bbe4, 0x94ca0b56, 0x0f5755d1, 0xe0e1e56e, 0x6184b5be, + 0x580a249f, 0x94f74bc0, 0xe327888e, 0x9f7b5561, 0xc3dc0280, 0x05687715, 0x646c6bd7, 0x44904db3, + 0x66b4f0a3, 0xc0f1648a, 0x697ed5af, 0x49e92ff6, 0x309e374f, 0x2cb6356a, 0x85808573, 0x4991f840, + 0x76f0ae02, 0x083be84d, 0x28421c9a, 0x44489406, 0x736e4cb8, 0xc1092910, 0x8bc95fc6, 0x7d869cf4, + 0x134f616f, 0x2e77118d, 0xb31b2be1, 0xaa90b472, 0x3ca5d717, 0x7d161bba, 0x9cad9010, 0xaf462ba2, + 0x9fe459d2, 0x45d34559, 0xd9f2da13, 0xdbc65487, 0xf3e4f94e, 0x176d486f, 0x097c13ea, 0x631da5c7, + 0x445f7382, 0x175683f4, 0xcdc66a97, 0x70be0288, 0xb3cdcf72, 0x6e5dd2f3, 0x20936079, 0x459b80a5, + 0xbe60e2db, 0xa9c23101, 0xeba5315c, 0x224e42f2, 0x1c5c1572, 0xf6721b2c, 0x1ad2fff3, 0x8c25404e, + 0x324ed72f, 0x4067b7fd, 0x0523138e, 0x5ca3bc78, 0xdc0fd66e, 0x75922283, 0x784d6b17, 0x58ebb16e, + 0x44094f85, 0x3f481d87, 0xfcfeae7b, 0x77b5ff76, 0x8c2302bf, 0xaaf47556, 0x5f46b02a, 0x2b092801, + 0x3d38f5f7, 0x0ca81f36, 0x52af4a8a, 0x66d5e7c0, 0xdf3b0874, 0x95055110, 0x1b5ad7a8, 0xf61ed5ad, + 0x6cf6e479, 0x20758184, 0xd0cefa65, 0x88f7be58, 0x4a046826, 0x0ff6f8f3, 0xa09c7f70, 0x5346aba0, + 0x5ce96c28, 0xe176eda3, 0x6bac307f, 0x376829d2, 0x85360fa9, 0x17e3fe2a, 0x24b79767, 0xf5a96b20, + 0xd6cd2595, 0x68ff1ebf, 0x7555442c, 0xf19f06be, 0xf9e0659a, 0xeeb9491d, 0x34010718, 0xbb30cab8, + 0xe822fe15, 0x88570983, 0x750e6249, 0xda627e55, 0x5e76ffa8, 0xb1534546, 0x6d47de08, 0xefe9e7d4, + }, + { + 0xf6fa8f9d, 0x2cac6ce1, 0x4ca34867, 0xe2337f7c, 0x95db08e7, 0x016843b4, 0xeced5cbc, 0x325553ac, + 0xbf9f0960, 0xdfa1e2ed, 0x83f0579d, 0x63ed86b9, 0x1ab6a6b8, 0xde5ebe39, 0xf38ff732, 0x8989b138, + 0x33f14961, 0xc01937bd, 0xf506c6da, 0xe4625e7e, 0xa308ea99, 0x4e23e33c, 0x79cbd7cc, 0x48a14367, + 0xa3149619, 0xfec94bd5, 0xa114174a, 0xeaa01866, 0xa084db2d, 0x09a8486f, 0xa888614a, 0x2900af98, + 0x01665991, 0xe1992863, 0xc8f30c60, 0x2e78ef3c, 0xd0d51932, 0xcf0fec14, 0xf7ca07d2, 0xd0a82072, + 0xfd41197e, 0x9305a6b0, 0xe86be3da, 0x74bed3cd, 0x372da53c, 0x4c7f4448, 0xdab5d440, 0x6dba0ec3, + 0x083919a7, 0x9fbaeed9, 0x49dbcfb0, 0x4e670c53, 0x5c3d9c01, 0x64bdb941, 0x2c0e636a, 0xba7dd9cd, + 0xea6f7388, 0xe70bc762, 0x35f29adb, 0x5c4cdd8d, 0xf0d48d8c, 0xb88153e2, 0x08a19866, 0x1ae2eac8, + 0x284caf89, 0xaa928223, 0x9334be53, 0x3b3a21bf, 0x16434be3, 0x9aea3906, 0xefe8c36e, 0xf890cdd9, + 0x80226dae, 0xc340a4a3, 0xdf7e9c09, 0xa694a807, 0x5b7c5ecc, 0x221db3a6, 0x9a69a02f, 0x68818a54, + 0xceb2296f, 0x53c0843a, 0xfe893655, 0x25bfe68a, 0xb4628abc, 0xcf222ebf, 0x25ac6f48, 0xa9a99387, + 0x53bddb65, 0xe76ffbe7, 0xe967fd78, 0x0ba93563, 0x8e342bc1, 0xe8a11be9, 0x4980740d, 0xc8087dfc, + 0x8de4bf99, 0xa11101a0, 0x7fd37975, 0xda5a26c0, 0xe81f994f, 0x9528cd89, 0xfd339fed, 0xb87834bf, + 0x5f04456d, 0x22258698, 0xc9c4c83b, 0x2dc156be, 0x4f628daa, 0x57f55ec5, 0xe2220abe, 0xd2916ebf, + 0x4ec75b95, 0x24f2c3c0, 0x42d15d99, 0xcd0d7fa0, 0x7b6e27ff, 0xa8dc8af0, 0x7345c106, 0xf41e232f, + 0x35162386, 0xe6ea8926, 0x3333b094, 0x157ec6f2, 0x372b74af, 0x692573e4, 0xe9a9d848, 0xf3160289, + 0x3a62ef1d, 0xa787e238, 0xf3a5f676, 0x74364853, 0x20951063, 0x4576698d, 0xb6fad407, 0x592af950, + 0x36f73523, 0x4cfb6e87, 0x7da4cec0, 0x6c152daa, 0xcb0396a8, 0xc50dfe5d, 0xfcd707ab, 0x0921c42f, + 0x89dff0bb, 0x5fe2be78, 0x448f4f33, 0x754613c9, 0x2b05d08d, 0x48b9d585, 0xdc049441, 0xc8098f9b, + 0x7dede786, 0xc39a3373, 0x42410005, 0x6a091751, 0x0ef3c8a6, 0x890072d6, 0x28207682, 0xa9a9f7be, + 0xbf32679d, 0xd45b5b75, 0xb353fd00, 0xcbb0e358, 0x830f220a, 0x1f8fb214, 0xd372cf08, 0xcc3c4a13, + 0x8cf63166, 0x061c87be, 0x88c98f88, 0x6062e397, 0x47cf8e7a, 0xb6c85283, 0x3cc2acfb, 0x3fc06976, + 0x4e8f0252, 0x64d8314d, 0xda3870e3, 0x1e665459, 0xc10908f0, 0x513021a5, 0x6c5b68b7, 0x822f8aa0, + 0x3007cd3e, 0x74719eef, 0xdc872681, 0x073340d4, 0x7e432fd9, 0x0c5ec241, 0x8809286c, 0xf592d891, + 0x08a930f6, 0x957ef305, 0xb7fbffbd, 0xc266e96f, 0x6fe4ac98, 0xb173ecc0, 0xbc60b42a, 0x953498da, + 0xfba1ae12, 0x2d4bd736, 0x0f25faab, 0xa4f3fceb, 0xe2969123, 0x257f0c3d, 0x9348af49, 0x361400bc, + 0xe8816f4a, 0x3814f200, 0xa3f94043, 0x9c7a54c2, 0xbc704f57, 0xda41e7f9, 0xc25ad33a, 0x54f4a084, + 0xb17f5505, 0x59357cbe, 0xedbd15c8, 0x7f97c5ab, 0xba5ac7b5, 0xb6f6deaf, 0x3a479c3a, 0x5302da25, + 0x653d7e6a, 0x54268d49, 0x51a477ea, 0x5017d55b, 0xd7d25d88, 0x44136c76, 0x0404a8c8, 0xb8e5a121, + 0xb81a928a, 0x60ed5869, 0x97c55b96, 0xeaec991b, 0x29935913, 0x01fdb7f1, 0x088e8dfa, 0x9ab6f6f5, + 0x3b4cbf9f, 0x4a5de3ab, 0xe6051d35, 0xa0e1d855, 0xd36b4cf1, 0xf544edeb, 0xb0e93524, 0xbebb8fbd, + 0xa2d762cf, 0x49c92f54, 0x38b5f331, 0x7128a454, 0x48392905, 0xa65b1db8, 0x851c97bd, 0xd675cf2f, + }, + { + 0x85e04019, 0x332bf567, 0x662dbfff, 0xcfc65693, 0x2a8d7f6f, 0xab9bc912, 0xde6008a1, 0x2028da1f, + 0x0227bce7, 0x4d642916, 0x18fac300, 0x50f18b82, 0x2cb2cb11, 0xb232e75c, 0x4b3695f2, 0xb28707de, + 0xa05fbcf6, 0xcd4181e9, 0xe150210c, 0xe24ef1bd, 0xb168c381, 0xfde4e789, 0x5c79b0d8, 0x1e8bfd43, + 0x4d495001, 0x38be4341, 0x913cee1d, 0x92a79c3f, 0x089766be, 0xbaeeadf4, 0x1286becf, 0xb6eacb19, + 0x2660c200, 0x7565bde4, 0x64241f7a, 0x8248dca9, 0xc3b3ad66, 0x28136086, 0x0bd8dfa8, 0x356d1cf2, + 0x107789be, 0xb3b2e9ce, 0x0502aa8f, 0x0bc0351e, 0x166bf52a, 0xeb12ff82, 0xe3486911, 0xd34d7516, + 0x4e7b3aff, 0x5f43671b, 0x9cf6e037, 0x4981ac83, 0x334266ce, 0x8c9341b7, 0xd0d854c0, 0xcb3a6c88, + 0x47bc2829, 0x4725ba37, 0xa66ad22b, 0x7ad61f1e, 0x0c5cbafa, 0x4437f107, 0xb6e79962, 0x42d2d816, + 0x0a961288, 0xe1a5c06e, 0x13749e67, 0x72fc081a, 0xb1d139f7, 0xf9583745, 0xcf19df58, 0xbec3f756, + 0xc06eba30, 0x07211b24, 0x45c28829, 0xc95e317f, 0xbc8ec511, 0x38bc46e9, 0xc6e6fa14, 0xbae8584a, + 0xad4ebc46, 0x468f508b, 0x7829435f, 0xf124183b, 0x821dba9f, 0xaff60ff4, 0xea2c4e6d, 0x16e39264, + 0x92544a8b, 0x009b4fc3, 0xaba68ced, 0x9ac96f78, 0x06a5b79a, 0xb2856e6e, 0x1aec3ca9, 0xbe838688, + 0x0e0804e9, 0x55f1be56, 0xe7e5363b, 0xb3a1f25d, 0xf7debb85, 0x61fe033c, 0x16746233, 0x3c034c28, + 0xda6d0c74, 0x79aac56c, 0x3ce4e1ad, 0x51f0c802, 0x98f8f35a, 0x1626a49f, 0xeed82b29, 0x1d382fe3, + 0x0c4fb99a, 0xbb325778, 0x3ec6d97b, 0x6e77a6a9, 0xcb658b5c, 0xd45230c7, 0x2bd1408b, 0x60c03eb7, + 0xb9068d78, 0xa33754f4, 0xf430c87d, 0xc8a71302, 0xb96d8c32, 0xebd4e7be, 0xbe8b9d2d, 0x7979fb06, + 0xe7225308, 0x8b75cf77, 0x11ef8da4, 0xe083c858, 0x8d6b786f, 0x5a6317a6, 0xfa5cf7a0, 0x5dda0033, + 0xf28ebfb0, 0xf5b9c310, 0xa0eac280, 0x08b9767a, 0xa3d9d2b0, 0x79d34217, 0x021a718d, 0x9ac6336a, + 0x2711fd60, 0x438050e3, 0x069908a8, 0x3d7fedc4, 0x826d2bef, 0x4eeb8476, 0x488dcf25, 0x36c9d566, + 0x28e74e41, 0xc2610aca, 0x3d49a9cf, 0xbae3b9df, 0xb65f8de6, 0x92aeaf64, 0x3ac7d5e6, 0x9ea80509, + 0xf22b017d, 0xa4173f70, 0xdd1e16c3, 0x15e0d7f9, 0x50b1b887, 0x2b9f4fd5, 0x625aba82, 0x6a017962, + 0x2ec01b9c, 0x15488aa9, 0xd716e740, 0x40055a2c, 0x93d29a22, 0xe32dbf9a, 0x058745b9, 0x3453dc1e, + 0xd699296e, 0x496cff6f, 0x1c9f4986, 0xdfe2ed07, 0xb87242d1, 0x19de7eae, 0x053e561a, 0x15ad6f8c, + 0x66626c1c, 0x7154c24c, 0xea082b2a, 0x93eb2939, 0x17dcb0f0, 0x58d4f2ae, 0x9ea294fb, 0x52cf564c, + 0x9883fe66, 0x2ec40581, 0x763953c3, 0x01d6692e, 0xd3a0c108, 0xa1e7160e, 0xe4f2dfa6, 0x693ed285, + 0x74904698, 0x4c2b0edd, 0x4f757656, 0x5d393378, 0xa132234f, 0x3d321c5d, 0xc3f5e194, 0x4b269301, + 0xc79f022f, 0x3c997e7e, 0x5e4f9504, 0x3ffafbbd, 0x76f7ad0e, 0x296693f4, 0x3d1fce6f, 0xc61e45be, + 0xd3b5ab34, 0xf72bf9b7, 0x1b0434c0, 0x4e72b567, 0x5592a33d, 0xb5229301, 0xcfd2a87f, 0x60aeb767, + 0x1814386b, 0x30bcc33d, 0x38a0c07d, 0xfd1606f2, 0xc363519b, 0x589dd390, 0x5479f8e6, 0x1cb8d647, + 0x97fd61a9, 0xea7759f4, 0x2d57539d, 0x569a58cf, 0xe84e63ad, 0x462e1b78, 0x6580f87e, 0xf3817914, + 0x91da55f4, 0x40a230f3, 0xd1988f35, 0xb6e318d2, 0x3ffa50bc, 0x3d40f021, 0xc3c0bdae, 0x4958c24c, + 0x518f36b2, 0x84b1d370, 0x0fedce83, 0x878ddada, 0xf2a279c7, 0x94e01be8, 0x90716f4b, 0x954b8aa3, + }, + { + 0xe216300d, 0xbbddfffc, 0xa7ebdabd, 0x35648095, 0x7789f8b7, 0xe6c1121b, 0x0e241600, 0x052ce8b5, + 0x11a9cfb0, 0xe5952f11, 0xece7990a, 0x9386d174, 0x2a42931c, 0x76e38111, 0xb12def3a, 0x37ddddfc, + 0xde9adeb1, 0x0a0cc32c, 0xbe197029, 0x84a00940, 0xbb243a0f, 0xb4d137cf, 0xb44e79f0, 0x049eedfd, + 0x0b15a15d, 0x480d3168, 0x8bbbde5a, 0x669ded42, 0xc7ece831, 0x3f8f95e7, 0x72df191b, 0x7580330d, + 0x94074251, 0x5c7dcdfa, 0xabbe6d63, 0xaa402164, 0xb301d40a, 0x02e7d1ca, 0x53571dae, 0x7a3182a2, + 0x12a8ddec, 0xfdaa335d, 0x176f43e8, 0x71fb46d4, 0x38129022, 0xce949ad4, 0xb84769ad, 0x965bd862, + 0x82f3d055, 0x66fb9767, 0x15b80b4e, 0x1d5b47a0, 0x4cfde06f, 0xc28ec4b8, 0x57e8726e, 0x647a78fc, + 0x99865d44, 0x608bd593, 0x6c200e03, 0x39dc5ff6, 0x5d0b00a3, 0xae63aff2, 0x7e8bd632, 0x70108c0c, + 0xbbd35049, 0x2998df04, 0x980cf42a, 0x9b6df491, 0x9e7edd53, 0x06918548, 0x58cb7e07, 0x3b74ef2e, + 0x522fffb1, 0xd24708cc, 0x1c7e27cd, 0xa4eb215b, 0x3cf1d2e2, 0x19b47a38, 0x424f7618, 0x35856039, + 0x9d17dee7, 0x27eb35e6, 0xc9aff67b, 0x36baf5b8, 0x09c467cd, 0xc18910b1, 0xe11dbf7b, 0x06cd1af8, + 0x7170c608, 0x2d5e3354, 0xd4de495a, 0x64c6d006, 0xbcc0c62c, 0x3dd00db3, 0x708f8f34, 0x77d51b42, + 0x264f620f, 0x24b8d2bf, 0x15c1b79e, 0x46a52564, 0xf8d7e54e, 0x3e378160, 0x7895cda5, 0x859c15a5, + 0xe6459788, 0xc37bc75f, 0xdb07ba0c, 0x0676a3ab, 0x7f229b1e, 0x31842e7b, 0x24259fd7, 0xf8bef472, + 0x835ffcb8, 0x6df4c1f2, 0x96f5b195, 0xfd0af0fc, 0xb0fe134c, 0xe2506d3d, 0x4f9b12ea, 0xf215f225, + 0xa223736f, 0x9fb4c428, 0x25d04979, 0x34c713f8, 0xc4618187, 0xea7a6e98, 0x7cd16efc, 0x1436876c, + 0xf1544107, 0xbedeee14, 0x56e9af27, 0xa04aa441, 0x3cf7c899, 0x92ecbae6, 0xdd67016d, 0x151682eb, + 0xa842eedf, 0xfdba60b4, 0xf1907b75, 0x20e3030f, 0x24d8c29e, 0xe139673b, 0xefa63fb8, 0x71873054, + 0xb6f2cf3b, 0x9f326442, 0xcb15a4cc, 0xb01a4504, 0xf1e47d8d, 0x844a1be5, 0xbae7dfdc, 0x42cbda70, + 0xcd7dae0a, 0x57e85b7a, 0xd53f5af6, 0x20cf4d8c, 0xcea4d428, 0x79d130a4, 0x3486ebfb, 0x33d3cddc, + 0x77853b53, 0x37effcb5, 0xc5068778, 0xe580b3e6, 0x4e68b8f4, 0xc5c8b37e, 0x0d809ea2, 0x398feb7c, + 0x132a4f94, 0x43b7950e, 0x2fee7d1c, 0x223613bd, 0xdd06caa2, 0x37df932b, 0xc4248289, 0xacf3ebc3, + 0x5715f6b7, 0xef3478dd, 0xf267616f, 0xc148cbe4, 0x9052815e, 0x5e410fab, 0xb48a2465, 0x2eda7fa4, + 0xe87b40e4, 0xe98ea084, 0x5889e9e1, 0xefd390fc, 0xdd07d35b, 0xdb485694, 0x38d7e5b2, 0x57720101, + 0x730edebc, 0x5b643113, 0x94917e4f, 0x503c2fba, 0x646f1282, 0x7523d24a, 0xe0779695, 0xf9c17a8f, + 0x7a5b2121, 0xd187b896, 0x29263a4d, 0xba510cdf, 0x81f47c9f, 0xad1163ed, 0xea7b5965, 0x1a00726e, + 0x11403092, 0x00da6d77, 0x4a0cdd61, 0xad1f4603, 0x605bdfb0, 0x9eedc364, 0x22ebe6a8, 0xcee7d28a, + 0xa0e736a0, 0x5564a6b9, 0x10853209, 0xc7eb8f37, 0x2de705ca, 0x8951570f, 0xdf09822b, 0xbd691a6c, + 0xaa12e4f2, 0x87451c0f, 0xe0f6a27a, 0x3ada4819, 0x4cf1764f, 0x0d771c2b, 0x67cdb156, 0x350d8384, + 0x5938fa0f, 0x42399ef3, 0x36997b07, 0x0e84093d, 0x4aa93e61, 0x8360d87b, 0x1fa98b0c, 0x1149382c, + 0xe97625a5, 0x0614d1b7, 0x0e25244b, 0x0c768347, 0x589e8d82, 0x0d2059d1, 0xa466bb1e, 0xf8da0a82, + 0x04f19130, 0xba6e4ec0, 0x99265164, 0x1ee7230d, 0x50b2ad80, 0xeaee6801, 0x8db2a283, 0xea8bf59e, + }, +} diff --git a/vendor/golang.org/x/crypto/openpgp/LICENSE b/vendor/golang.org/x/crypto/openpgp/LICENSE new file mode 100644 index 00000000..6a66aea5 --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/crypto/openpgp/armor/armor.go b/vendor/golang.org/x/crypto/openpgp/armor/armor.go new file mode 100644 index 00000000..592d1864 --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/armor/armor.go @@ -0,0 +1,219 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package armor implements OpenPGP ASCII Armor, see RFC 4880. OpenPGP Armor is +// very similar to PEM except that it has an additional CRC checksum. +package armor // import "golang.org/x/crypto/openpgp/armor" + +import ( + "bufio" + "bytes" + "encoding/base64" + "golang.org/x/crypto/openpgp/errors" + "io" +) + +// A Block represents an OpenPGP armored structure. +// +// The encoded form is: +// -----BEGIN Type----- +// Headers +// +// base64-encoded Bytes +// '=' base64 encoded checksum +// -----END Type----- +// where Headers is a possibly empty sequence of Key: Value lines. +// +// Since the armored data can be very large, this package presents a streaming +// interface. +type Block struct { + Type string // The type, taken from the preamble (i.e. "PGP SIGNATURE"). + Header map[string]string // Optional headers. + Body io.Reader // A Reader from which the contents can be read + lReader lineReader + oReader openpgpReader +} + +var ArmorCorrupt error = errors.StructuralError("armor invalid") + +const crc24Init = 0xb704ce +const crc24Poly = 0x1864cfb +const crc24Mask = 0xffffff + +// crc24 calculates the OpenPGP checksum as specified in RFC 4880, section 6.1 +func crc24(crc uint32, d []byte) uint32 { + for _, b := range d { + crc ^= uint32(b) << 16 + for i := 0; i < 8; i++ { + crc <<= 1 + if crc&0x1000000 != 0 { + crc ^= crc24Poly + } + } + } + return crc +} + +var armorStart = []byte("-----BEGIN ") +var armorEnd = []byte("-----END ") +var armorEndOfLine = []byte("-----") + +// lineReader wraps a line based reader. It watches for the end of an armor +// block and records the expected CRC value. +type lineReader struct { + in *bufio.Reader + buf []byte + eof bool + crc uint32 +} + +func (l *lineReader) Read(p []byte) (n int, err error) { + if l.eof { + return 0, io.EOF + } + + if len(l.buf) > 0 { + n = copy(p, l.buf) + l.buf = l.buf[n:] + return + } + + line, isPrefix, err := l.in.ReadLine() + if err != nil { + return + } + if isPrefix { + return 0, ArmorCorrupt + } + + if len(line) == 5 && line[0] == '=' { + // This is the checksum line + var expectedBytes [3]byte + var m int + m, err = base64.StdEncoding.Decode(expectedBytes[0:], line[1:]) + if m != 3 || err != nil { + return + } + l.crc = uint32(expectedBytes[0])<<16 | + uint32(expectedBytes[1])<<8 | + uint32(expectedBytes[2]) + + line, _, err = l.in.ReadLine() + if err != nil && err != io.EOF { + return + } + if !bytes.HasPrefix(line, armorEnd) { + return 0, ArmorCorrupt + } + + l.eof = true + return 0, io.EOF + } + + if len(line) > 96 { + return 0, ArmorCorrupt + } + + n = copy(p, line) + bytesToSave := len(line) - n + if bytesToSave > 0 { + if cap(l.buf) < bytesToSave { + l.buf = make([]byte, 0, bytesToSave) + } + l.buf = l.buf[0:bytesToSave] + copy(l.buf, line[n:]) + } + + return +} + +// openpgpReader passes Read calls to the underlying base64 decoder, but keeps +// a running CRC of the resulting data and checks the CRC against the value +// found by the lineReader at EOF. +type openpgpReader struct { + lReader *lineReader + b64Reader io.Reader + currentCRC uint32 +} + +func (r *openpgpReader) Read(p []byte) (n int, err error) { + n, err = r.b64Reader.Read(p) + r.currentCRC = crc24(r.currentCRC, p[:n]) + + if err == io.EOF { + if r.lReader.crc != uint32(r.currentCRC&crc24Mask) { + return 0, ArmorCorrupt + } + } + + return +} + +// Decode reads a PGP armored block from the given Reader. It will ignore +// leading garbage. If it doesn't find a block, it will return nil, io.EOF. The +// given Reader is not usable after calling this function: an arbitrary amount +// of data may have been read past the end of the block. +func Decode(in io.Reader) (p *Block, err error) { + r := bufio.NewReaderSize(in, 100) + var line []byte + ignoreNext := false + +TryNextBlock: + p = nil + + // Skip leading garbage + for { + ignoreThis := ignoreNext + line, ignoreNext, err = r.ReadLine() + if err != nil { + return + } + if ignoreNext || ignoreThis { + continue + } + line = bytes.TrimSpace(line) + if len(line) > len(armorStart)+len(armorEndOfLine) && bytes.HasPrefix(line, armorStart) { + break + } + } + + p = new(Block) + p.Type = string(line[len(armorStart) : len(line)-len(armorEndOfLine)]) + p.Header = make(map[string]string) + nextIsContinuation := false + var lastKey string + + // Read headers + for { + isContinuation := nextIsContinuation + line, nextIsContinuation, err = r.ReadLine() + if err != nil { + p = nil + return + } + if isContinuation { + p.Header[lastKey] += string(line) + continue + } + line = bytes.TrimSpace(line) + if len(line) == 0 { + break + } + + i := bytes.Index(line, []byte(": ")) + if i == -1 { + goto TryNextBlock + } + lastKey = string(line[:i]) + p.Header[lastKey] = string(line[i+2:]) + } + + p.lReader.in = r + p.oReader.currentCRC = crc24Init + p.oReader.lReader = &p.lReader + p.oReader.b64Reader = base64.NewDecoder(base64.StdEncoding, &p.lReader) + p.Body = &p.oReader + + return +} diff --git a/vendor/golang.org/x/crypto/openpgp/armor/encode.go b/vendor/golang.org/x/crypto/openpgp/armor/encode.go new file mode 100644 index 00000000..6f07582c --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/armor/encode.go @@ -0,0 +1,160 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package armor + +import ( + "encoding/base64" + "io" +) + +var armorHeaderSep = []byte(": ") +var blockEnd = []byte("\n=") +var newline = []byte("\n") +var armorEndOfLineOut = []byte("-----\n") + +// writeSlices writes its arguments to the given Writer. +func writeSlices(out io.Writer, slices ...[]byte) (err error) { + for _, s := range slices { + _, err = out.Write(s) + if err != nil { + return err + } + } + return +} + +// lineBreaker breaks data across several lines, all of the same byte length +// (except possibly the last). Lines are broken with a single '\n'. +type lineBreaker struct { + lineLength int + line []byte + used int + out io.Writer + haveWritten bool +} + +func newLineBreaker(out io.Writer, lineLength int) *lineBreaker { + return &lineBreaker{ + lineLength: lineLength, + line: make([]byte, lineLength), + used: 0, + out: out, + } +} + +func (l *lineBreaker) Write(b []byte) (n int, err error) { + n = len(b) + + if n == 0 { + return + } + + if l.used == 0 && l.haveWritten { + _, err = l.out.Write([]byte{'\n'}) + if err != nil { + return + } + } + + if l.used+len(b) < l.lineLength { + l.used += copy(l.line[l.used:], b) + return + } + + l.haveWritten = true + _, err = l.out.Write(l.line[0:l.used]) + if err != nil { + return + } + excess := l.lineLength - l.used + l.used = 0 + + _, err = l.out.Write(b[0:excess]) + if err != nil { + return + } + + _, err = l.Write(b[excess:]) + return +} + +func (l *lineBreaker) Close() (err error) { + if l.used > 0 { + _, err = l.out.Write(l.line[0:l.used]) + if err != nil { + return + } + } + + return +} + +// encoding keeps track of a running CRC24 over the data which has been written +// to it and outputs a OpenPGP checksum when closed, followed by an armor +// trailer. +// +// It's built into a stack of io.Writers: +// encoding -> base64 encoder -> lineBreaker -> out +type encoding struct { + out io.Writer + breaker *lineBreaker + b64 io.WriteCloser + crc uint32 + blockType []byte +} + +func (e *encoding) Write(data []byte) (n int, err error) { + e.crc = crc24(e.crc, data) + return e.b64.Write(data) +} + +func (e *encoding) Close() (err error) { + err = e.b64.Close() + if err != nil { + return + } + e.breaker.Close() + + var checksumBytes [3]byte + checksumBytes[0] = byte(e.crc >> 16) + checksumBytes[1] = byte(e.crc >> 8) + checksumBytes[2] = byte(e.crc) + + var b64ChecksumBytes [4]byte + base64.StdEncoding.Encode(b64ChecksumBytes[:], checksumBytes[:]) + + return writeSlices(e.out, blockEnd, b64ChecksumBytes[:], newline, armorEnd, e.blockType, armorEndOfLine) +} + +// Encode returns a WriteCloser which will encode the data written to it in +// OpenPGP armor. +func Encode(out io.Writer, blockType string, headers map[string]string) (w io.WriteCloser, err error) { + bType := []byte(blockType) + err = writeSlices(out, armorStart, bType, armorEndOfLineOut) + if err != nil { + return + } + + for k, v := range headers { + err = writeSlices(out, []byte(k), armorHeaderSep, []byte(v), newline) + if err != nil { + return + } + } + + _, err = out.Write(newline) + if err != nil { + return + } + + e := &encoding{ + out: out, + breaker: newLineBreaker(out, 64), + crc: crc24Init, + blockType: bType, + } + e.b64 = base64.NewEncoder(base64.StdEncoding, e.breaker) + return e, nil +} diff --git a/vendor/golang.org/x/crypto/openpgp/canonical_text.go b/vendor/golang.org/x/crypto/openpgp/canonical_text.go new file mode 100644 index 00000000..e601e389 --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/canonical_text.go @@ -0,0 +1,59 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package openpgp + +import "hash" + +// NewCanonicalTextHash reformats text written to it into the canonical +// form and then applies the hash h. See RFC 4880, section 5.2.1. +func NewCanonicalTextHash(h hash.Hash) hash.Hash { + return &canonicalTextHash{h, 0} +} + +type canonicalTextHash struct { + h hash.Hash + s int +} + +var newline = []byte{'\r', '\n'} + +func (cth *canonicalTextHash) Write(buf []byte) (int, error) { + start := 0 + + for i, c := range buf { + switch cth.s { + case 0: + if c == '\r' { + cth.s = 1 + } else if c == '\n' { + cth.h.Write(buf[start:i]) + cth.h.Write(newline) + start = i + 1 + } + case 1: + cth.s = 0 + } + } + + cth.h.Write(buf[start:]) + return len(buf), nil +} + +func (cth *canonicalTextHash) Sum(in []byte) []byte { + return cth.h.Sum(in) +} + +func (cth *canonicalTextHash) Reset() { + cth.h.Reset() + cth.s = 0 +} + +func (cth *canonicalTextHash) Size() int { + return cth.h.Size() +} + +func (cth *canonicalTextHash) BlockSize() int { + return cth.h.BlockSize() +} diff --git a/vendor/golang.org/x/crypto/openpgp/clearsign/clearsign.go b/vendor/golang.org/x/crypto/openpgp/clearsign/clearsign.go new file mode 100644 index 00000000..def4caba --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/clearsign/clearsign.go @@ -0,0 +1,376 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package clearsign generates and processes OpenPGP, clear-signed data. See +// RFC 4880, section 7. +// +// Clearsigned messages are cryptographically signed, but the contents of the +// message are kept in plaintext so that it can be read without special tools. +package clearsign // import "golang.org/x/crypto/openpgp/clearsign" + +import ( + "bufio" + "bytes" + "crypto" + "hash" + "io" + "net/textproto" + "strconv" + + "golang.org/x/crypto/openpgp/armor" + "golang.org/x/crypto/openpgp/errors" + "golang.org/x/crypto/openpgp/packet" +) + +// A Block represents a clearsigned message. A signature on a Block can +// be checked by passing Bytes into openpgp.CheckDetachedSignature. +type Block struct { + Headers textproto.MIMEHeader // Optional message headers + Plaintext []byte // The original message text + Bytes []byte // The signed message + ArmoredSignature *armor.Block // The signature block +} + +// start is the marker which denotes the beginning of a clearsigned message. +var start = []byte("\n-----BEGIN PGP SIGNED MESSAGE-----") + +// dashEscape is prefixed to any lines that begin with a hyphen so that they +// can't be confused with endText. +var dashEscape = []byte("- ") + +// endText is a marker which denotes the end of the message and the start of +// an armored signature. +var endText = []byte("-----BEGIN PGP SIGNATURE-----") + +// end is a marker which denotes the end of the armored signature. +var end = []byte("\n-----END PGP SIGNATURE-----") + +var crlf = []byte("\r\n") +var lf = byte('\n') + +// getLine returns the first \r\n or \n delineated line from the given byte +// array. The line does not include the \r\n or \n. The remainder of the byte +// array (also not including the new line bytes) is also returned and this will +// always be smaller than the original argument. +func getLine(data []byte) (line, rest []byte) { + i := bytes.Index(data, []byte{'\n'}) + var j int + if i < 0 { + i = len(data) + j = i + } else { + j = i + 1 + if i > 0 && data[i-1] == '\r' { + i-- + } + } + return data[0:i], data[j:] +} + +// Decode finds the first clearsigned message in data and returns it, as well +// as the suffix of data which remains after the message. +func Decode(data []byte) (b *Block, rest []byte) { + // start begins with a newline. However, at the very beginning of + // the byte array, we'll accept the start string without it. + rest = data + if bytes.HasPrefix(data, start[1:]) { + rest = rest[len(start)-1:] + } else if i := bytes.Index(data, start); i >= 0 { + rest = rest[i+len(start):] + } else { + return nil, data + } + + // Consume the start line. + _, rest = getLine(rest) + + var line []byte + b = &Block{ + Headers: make(textproto.MIMEHeader), + } + + // Next come a series of header lines. + for { + // This loop terminates because getLine's second result is + // always smaller than its argument. + if len(rest) == 0 { + return nil, data + } + // An empty line marks the end of the headers. + if line, rest = getLine(rest); len(line) == 0 { + break + } + + i := bytes.Index(line, []byte{':'}) + if i == -1 { + return nil, data + } + + key, val := line[0:i], line[i+1:] + key = bytes.TrimSpace(key) + val = bytes.TrimSpace(val) + b.Headers.Add(string(key), string(val)) + } + + firstLine := true + for { + start := rest + + line, rest = getLine(rest) + if len(line) == 0 && len(rest) == 0 { + // No armored data was found, so this isn't a complete message. + return nil, data + } + if bytes.Equal(line, endText) { + // Back up to the start of the line because armor expects to see the + // header line. + rest = start + break + } + + // The final CRLF isn't included in the hash so we don't write it until + // we've seen the next line. + if firstLine { + firstLine = false + } else { + b.Bytes = append(b.Bytes, crlf...) + } + + if bytes.HasPrefix(line, dashEscape) { + line = line[2:] + } + line = bytes.TrimRight(line, " \t") + b.Bytes = append(b.Bytes, line...) + + b.Plaintext = append(b.Plaintext, line...) + b.Plaintext = append(b.Plaintext, lf) + } + + // We want to find the extent of the armored data (including any newlines at + // the end). + i := bytes.Index(rest, end) + if i == -1 { + return nil, data + } + i += len(end) + for i < len(rest) && (rest[i] == '\r' || rest[i] == '\n') { + i++ + } + armored := rest[:i] + rest = rest[i:] + + var err error + b.ArmoredSignature, err = armor.Decode(bytes.NewBuffer(armored)) + if err != nil { + return nil, data + } + + return b, rest +} + +// A dashEscaper is an io.WriteCloser which processes the body of a clear-signed +// message. The clear-signed message is written to buffered and a hash, suitable +// for signing, is maintained in h. +// +// When closed, an armored signature is created and written to complete the +// message. +type dashEscaper struct { + buffered *bufio.Writer + h hash.Hash + hashType crypto.Hash + + atBeginningOfLine bool + isFirstLine bool + + whitespace []byte + byteBuf []byte // a one byte buffer to save allocations + + privateKey *packet.PrivateKey + config *packet.Config +} + +func (d *dashEscaper) Write(data []byte) (n int, err error) { + for _, b := range data { + d.byteBuf[0] = b + + if d.atBeginningOfLine { + // The final CRLF isn't included in the hash so we have to wait + // until this point (the start of the next line) before writing it. + if !d.isFirstLine { + d.h.Write(crlf) + } + d.isFirstLine = false + } + + // Any whitespace at the end of the line has to be removed so we + // buffer it until we find out whether there's more on this line. + if b == ' ' || b == '\t' || b == '\r' { + d.whitespace = append(d.whitespace, b) + d.atBeginningOfLine = false + continue + } + + if d.atBeginningOfLine { + // At the beginning of a line, hyphens have to be escaped. + if b == '-' { + // The signature isn't calculated over the dash-escaped text so + // the escape is only written to buffered. + if _, err = d.buffered.Write(dashEscape); err != nil { + return + } + d.h.Write(d.byteBuf) + d.atBeginningOfLine = false + } else if b == '\n' { + // Nothing to do because we delay writing CRLF to the hash. + } else { + d.h.Write(d.byteBuf) + d.atBeginningOfLine = false + } + if err = d.buffered.WriteByte(b); err != nil { + return + } + } else { + if b == '\n' { + // We got a raw \n. Drop any trailing whitespace and write a + // CRLF. + d.whitespace = d.whitespace[:0] + // We delay writing CRLF to the hash until the start of the + // next line. + if err = d.buffered.WriteByte(b); err != nil { + return + } + d.atBeginningOfLine = true + } else { + // Any buffered whitespace wasn't at the end of the line so + // we need to write it out. + if len(d.whitespace) > 0 { + d.h.Write(d.whitespace) + if _, err = d.buffered.Write(d.whitespace); err != nil { + return + } + d.whitespace = d.whitespace[:0] + } + d.h.Write(d.byteBuf) + if err = d.buffered.WriteByte(b); err != nil { + return + } + } + } + } + + n = len(data) + return +} + +func (d *dashEscaper) Close() (err error) { + if !d.atBeginningOfLine { + if err = d.buffered.WriteByte(lf); err != nil { + return + } + } + sig := new(packet.Signature) + sig.SigType = packet.SigTypeText + sig.PubKeyAlgo = d.privateKey.PubKeyAlgo + sig.Hash = d.hashType + sig.CreationTime = d.config.Now() + sig.IssuerKeyId = &d.privateKey.KeyId + + if err = sig.Sign(d.h, d.privateKey, d.config); err != nil { + return + } + + out, err := armor.Encode(d.buffered, "PGP SIGNATURE", nil) + if err != nil { + return + } + + if err = sig.Serialize(out); err != nil { + return + } + if err = out.Close(); err != nil { + return + } + if err = d.buffered.Flush(); err != nil { + return + } + return +} + +// Encode returns a WriteCloser which will clear-sign a message with privateKey +// and write it to w. If config is nil, sensible defaults are used. +func Encode(w io.Writer, privateKey *packet.PrivateKey, config *packet.Config) (plaintext io.WriteCloser, err error) { + if privateKey.Encrypted { + return nil, errors.InvalidArgumentError("signing key is encrypted") + } + + hashType := config.Hash() + name := nameOfHash(hashType) + if len(name) == 0 { + return nil, errors.UnsupportedError("unknown hash type: " + strconv.Itoa(int(hashType))) + } + + if !hashType.Available() { + return nil, errors.UnsupportedError("unsupported hash type: " + strconv.Itoa(int(hashType))) + } + h := hashType.New() + + buffered := bufio.NewWriter(w) + // start has a \n at the beginning that we don't want here. + if _, err = buffered.Write(start[1:]); err != nil { + return + } + if err = buffered.WriteByte(lf); err != nil { + return + } + if _, err = buffered.WriteString("Hash: "); err != nil { + return + } + if _, err = buffered.WriteString(name); err != nil { + return + } + if err = buffered.WriteByte(lf); err != nil { + return + } + if err = buffered.WriteByte(lf); err != nil { + return + } + + plaintext = &dashEscaper{ + buffered: buffered, + h: h, + hashType: hashType, + + atBeginningOfLine: true, + isFirstLine: true, + + byteBuf: make([]byte, 1), + + privateKey: privateKey, + config: config, + } + + return +} + +// nameOfHash returns the OpenPGP name for the given hash, or the empty string +// if the name isn't known. See RFC 4880, section 9.4. +func nameOfHash(h crypto.Hash) string { + switch h { + case crypto.MD5: + return "MD5" + case crypto.SHA1: + return "SHA1" + case crypto.RIPEMD160: + return "RIPEMD160" + case crypto.SHA224: + return "SHA224" + case crypto.SHA256: + return "SHA256" + case crypto.SHA384: + return "SHA384" + case crypto.SHA512: + return "SHA512" + } + return "" +} diff --git a/vendor/golang.org/x/crypto/openpgp/elgamal/elgamal.go b/vendor/golang.org/x/crypto/openpgp/elgamal/elgamal.go new file mode 100644 index 00000000..73f4fe37 --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/elgamal/elgamal.go @@ -0,0 +1,122 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package elgamal implements ElGamal encryption, suitable for OpenPGP, +// as specified in "A Public-Key Cryptosystem and a Signature Scheme Based on +// Discrete Logarithms," IEEE Transactions on Information Theory, v. IT-31, +// n. 4, 1985, pp. 469-472. +// +// This form of ElGamal embeds PKCS#1 v1.5 padding, which may make it +// unsuitable for other protocols. RSA should be used in preference in any +// case. +package elgamal // import "golang.org/x/crypto/openpgp/elgamal" + +import ( + "crypto/rand" + "crypto/subtle" + "errors" + "io" + "math/big" +) + +// PublicKey represents an ElGamal public key. +type PublicKey struct { + G, P, Y *big.Int +} + +// PrivateKey represents an ElGamal private key. +type PrivateKey struct { + PublicKey + X *big.Int +} + +// Encrypt encrypts the given message to the given public key. The result is a +// pair of integers. Errors can result from reading random, or because msg is +// too large to be encrypted to the public key. +func Encrypt(random io.Reader, pub *PublicKey, msg []byte) (c1, c2 *big.Int, err error) { + pLen := (pub.P.BitLen() + 7) / 8 + if len(msg) > pLen-11 { + err = errors.New("elgamal: message too long") + return + } + + // EM = 0x02 || PS || 0x00 || M + em := make([]byte, pLen-1) + em[0] = 2 + ps, mm := em[1:len(em)-len(msg)-1], em[len(em)-len(msg):] + err = nonZeroRandomBytes(ps, random) + if err != nil { + return + } + em[len(em)-len(msg)-1] = 0 + copy(mm, msg) + + m := new(big.Int).SetBytes(em) + + k, err := rand.Int(random, pub.P) + if err != nil { + return + } + + c1 = new(big.Int).Exp(pub.G, k, pub.P) + s := new(big.Int).Exp(pub.Y, k, pub.P) + c2 = s.Mul(s, m) + c2.Mod(c2, pub.P) + + return +} + +// Decrypt takes two integers, resulting from an ElGamal encryption, and +// returns the plaintext of the message. An error can result only if the +// ciphertext is invalid. Users should keep in mind that this is a padding +// oracle and thus, if exposed to an adaptive chosen ciphertext attack, can +// be used to break the cryptosystem. See ``Chosen Ciphertext Attacks +// Against Protocols Based on the RSA Encryption Standard PKCS #1'', Daniel +// Bleichenbacher, Advances in Cryptology (Crypto '98), +func Decrypt(priv *PrivateKey, c1, c2 *big.Int) (msg []byte, err error) { + s := new(big.Int).Exp(c1, priv.X, priv.P) + s.ModInverse(s, priv.P) + s.Mul(s, c2) + s.Mod(s, priv.P) + em := s.Bytes() + + firstByteIsTwo := subtle.ConstantTimeByteEq(em[0], 2) + + // The remainder of the plaintext must be a string of non-zero random + // octets, followed by a 0, followed by the message. + // lookingForIndex: 1 iff we are still looking for the zero. + // index: the offset of the first zero byte. + var lookingForIndex, index int + lookingForIndex = 1 + + for i := 1; i < len(em); i++ { + equals0 := subtle.ConstantTimeByteEq(em[i], 0) + index = subtle.ConstantTimeSelect(lookingForIndex&equals0, i, index) + lookingForIndex = subtle.ConstantTimeSelect(equals0, 0, lookingForIndex) + } + + if firstByteIsTwo != 1 || lookingForIndex != 0 || index < 9 { + return nil, errors.New("elgamal: decryption error") + } + return em[index+1:], nil +} + +// nonZeroRandomBytes fills the given slice with non-zero random octets. +func nonZeroRandomBytes(s []byte, rand io.Reader) (err error) { + _, err = io.ReadFull(rand, s) + if err != nil { + return + } + + for i := 0; i < len(s); i++ { + for s[i] == 0 { + _, err = io.ReadFull(rand, s[i:i+1]) + if err != nil { + return + } + } + } + + return +} diff --git a/vendor/golang.org/x/crypto/openpgp/errors/errors.go b/vendor/golang.org/x/crypto/openpgp/errors/errors.go new file mode 100644 index 00000000..eb0550b2 --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/errors/errors.go @@ -0,0 +1,72 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package errors contains common error types for the OpenPGP packages. +package errors // import "golang.org/x/crypto/openpgp/errors" + +import ( + "strconv" +) + +// A StructuralError is returned when OpenPGP data is found to be syntactically +// invalid. +type StructuralError string + +func (s StructuralError) Error() string { + return "openpgp: invalid data: " + string(s) +} + +// UnsupportedError indicates that, although the OpenPGP data is valid, it +// makes use of currently unimplemented features. +type UnsupportedError string + +func (s UnsupportedError) Error() string { + return "openpgp: unsupported feature: " + string(s) +} + +// InvalidArgumentError indicates that the caller is in error and passed an +// incorrect value. +type InvalidArgumentError string + +func (i InvalidArgumentError) Error() string { + return "openpgp: invalid argument: " + string(i) +} + +// SignatureError indicates that a syntactically valid signature failed to +// validate. +type SignatureError string + +func (b SignatureError) Error() string { + return "openpgp: invalid signature: " + string(b) +} + +type keyIncorrectError int + +func (ki keyIncorrectError) Error() string { + return "openpgp: incorrect key" +} + +var ErrKeyIncorrect error = keyIncorrectError(0) + +type unknownIssuerError int + +func (unknownIssuerError) Error() string { + return "openpgp: signature made by unknown entity" +} + +var ErrUnknownIssuer error = unknownIssuerError(0) + +type keyRevokedError int + +func (keyRevokedError) Error() string { + return "openpgp: signature made by revoked key" +} + +var ErrKeyRevoked error = keyRevokedError(0) + +type UnknownPacketTypeError uint8 + +func (upte UnknownPacketTypeError) Error() string { + return "openpgp: unknown packet type: " + strconv.Itoa(int(upte)) +} diff --git a/vendor/golang.org/x/crypto/openpgp/keys.go b/vendor/golang.org/x/crypto/openpgp/keys.go new file mode 100644 index 00000000..fd582a89 --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/keys.go @@ -0,0 +1,641 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package openpgp + +import ( + "crypto/rsa" + "io" + "time" + + "golang.org/x/crypto/openpgp/armor" + "golang.org/x/crypto/openpgp/errors" + "golang.org/x/crypto/openpgp/packet" +) + +// PublicKeyType is the armor type for a PGP public key. +var PublicKeyType = "PGP PUBLIC KEY BLOCK" + +// PrivateKeyType is the armor type for a PGP private key. +var PrivateKeyType = "PGP PRIVATE KEY BLOCK" + +// An Entity represents the components of an OpenPGP key: a primary public key +// (which must be a signing key), one or more identities claimed by that key, +// and zero or more subkeys, which may be encryption keys. +type Entity struct { + PrimaryKey *packet.PublicKey + PrivateKey *packet.PrivateKey + Identities map[string]*Identity // indexed by Identity.Name + Revocations []*packet.Signature + Subkeys []Subkey +} + +// An Identity represents an identity claimed by an Entity and zero or more +// assertions by other entities about that claim. +type Identity struct { + Name string // by convention, has the form "Full Name (comment) " + UserId *packet.UserId + SelfSignature *packet.Signature + Signatures []*packet.Signature +} + +// A Subkey is an additional public key in an Entity. Subkeys can be used for +// encryption. +type Subkey struct { + PublicKey *packet.PublicKey + PrivateKey *packet.PrivateKey + Sig *packet.Signature +} + +// A Key identifies a specific public key in an Entity. This is either the +// Entity's primary key or a subkey. +type Key struct { + Entity *Entity + PublicKey *packet.PublicKey + PrivateKey *packet.PrivateKey + SelfSignature *packet.Signature +} + +// A KeyRing provides access to public and private keys. +type KeyRing interface { + // KeysById returns the set of keys that have the given key id. + KeysById(id uint64) []Key + // KeysByIdAndUsage returns the set of keys with the given id + // that also meet the key usage given by requiredUsage. + // The requiredUsage is expressed as the bitwise-OR of + // packet.KeyFlag* values. + KeysByIdUsage(id uint64, requiredUsage byte) []Key + // DecryptionKeys returns all private keys that are valid for + // decryption. + DecryptionKeys() []Key +} + +// primaryIdentity returns the Identity marked as primary or the first identity +// if none are so marked. +func (e *Entity) primaryIdentity() *Identity { + var firstIdentity *Identity + for _, ident := range e.Identities { + if firstIdentity == nil { + firstIdentity = ident + } + if ident.SelfSignature.IsPrimaryId != nil && *ident.SelfSignature.IsPrimaryId { + return ident + } + } + return firstIdentity +} + +// encryptionKey returns the best candidate Key for encrypting a message to the +// given Entity. +func (e *Entity) encryptionKey(now time.Time) (Key, bool) { + candidateSubkey := -1 + + // Iterate the keys to find the newest key + var maxTime time.Time + for i, subkey := range e.Subkeys { + if subkey.Sig.FlagsValid && + subkey.Sig.FlagEncryptCommunications && + subkey.PublicKey.PubKeyAlgo.CanEncrypt() && + !subkey.Sig.KeyExpired(now) && + (maxTime.IsZero() || subkey.Sig.CreationTime.After(maxTime)) { + candidateSubkey = i + maxTime = subkey.Sig.CreationTime + } + } + + if candidateSubkey != -1 { + subkey := e.Subkeys[candidateSubkey] + return Key{e, subkey.PublicKey, subkey.PrivateKey, subkey.Sig}, true + } + + // If we don't have any candidate subkeys for encryption and + // the primary key doesn't have any usage metadata then we + // assume that the primary key is ok. Or, if the primary key is + // marked as ok to encrypt to, then we can obviously use it. + i := e.primaryIdentity() + if !i.SelfSignature.FlagsValid || i.SelfSignature.FlagEncryptCommunications && + e.PrimaryKey.PubKeyAlgo.CanEncrypt() && + !i.SelfSignature.KeyExpired(now) { + return Key{e, e.PrimaryKey, e.PrivateKey, i.SelfSignature}, true + } + + // This Entity appears to be signing only. + return Key{}, false +} + +// signingKey return the best candidate Key for signing a message with this +// Entity. +func (e *Entity) signingKey(now time.Time) (Key, bool) { + candidateSubkey := -1 + + for i, subkey := range e.Subkeys { + if subkey.Sig.FlagsValid && + subkey.Sig.FlagSign && + subkey.PublicKey.PubKeyAlgo.CanSign() && + !subkey.Sig.KeyExpired(now) { + candidateSubkey = i + break + } + } + + if candidateSubkey != -1 { + subkey := e.Subkeys[candidateSubkey] + return Key{e, subkey.PublicKey, subkey.PrivateKey, subkey.Sig}, true + } + + // If we have no candidate subkey then we assume that it's ok to sign + // with the primary key. + i := e.primaryIdentity() + if !i.SelfSignature.FlagsValid || i.SelfSignature.FlagSign && + !i.SelfSignature.KeyExpired(now) { + return Key{e, e.PrimaryKey, e.PrivateKey, i.SelfSignature}, true + } + + return Key{}, false +} + +// An EntityList contains one or more Entities. +type EntityList []*Entity + +// KeysById returns the set of keys that have the given key id. +func (el EntityList) KeysById(id uint64) (keys []Key) { + for _, e := range el { + if e.PrimaryKey.KeyId == id { + var selfSig *packet.Signature + for _, ident := range e.Identities { + if selfSig == nil { + selfSig = ident.SelfSignature + } else if ident.SelfSignature.IsPrimaryId != nil && *ident.SelfSignature.IsPrimaryId { + selfSig = ident.SelfSignature + break + } + } + keys = append(keys, Key{e, e.PrimaryKey, e.PrivateKey, selfSig}) + } + + for _, subKey := range e.Subkeys { + if subKey.PublicKey.KeyId == id { + keys = append(keys, Key{e, subKey.PublicKey, subKey.PrivateKey, subKey.Sig}) + } + } + } + return +} + +// KeysByIdAndUsage returns the set of keys with the given id that also meet +// the key usage given by requiredUsage. The requiredUsage is expressed as +// the bitwise-OR of packet.KeyFlag* values. +func (el EntityList) KeysByIdUsage(id uint64, requiredUsage byte) (keys []Key) { + for _, key := range el.KeysById(id) { + if len(key.Entity.Revocations) > 0 { + continue + } + + if key.SelfSignature.RevocationReason != nil { + continue + } + + if key.SelfSignature.FlagsValid && requiredUsage != 0 { + var usage byte + if key.SelfSignature.FlagCertify { + usage |= packet.KeyFlagCertify + } + if key.SelfSignature.FlagSign { + usage |= packet.KeyFlagSign + } + if key.SelfSignature.FlagEncryptCommunications { + usage |= packet.KeyFlagEncryptCommunications + } + if key.SelfSignature.FlagEncryptStorage { + usage |= packet.KeyFlagEncryptStorage + } + if usage&requiredUsage != requiredUsage { + continue + } + } + + keys = append(keys, key) + } + return +} + +// DecryptionKeys returns all private keys that are valid for decryption. +func (el EntityList) DecryptionKeys() (keys []Key) { + for _, e := range el { + for _, subKey := range e.Subkeys { + if subKey.PrivateKey != nil && (!subKey.Sig.FlagsValid || subKey.Sig.FlagEncryptStorage || subKey.Sig.FlagEncryptCommunications) { + keys = append(keys, Key{e, subKey.PublicKey, subKey.PrivateKey, subKey.Sig}) + } + } + } + return +} + +// ReadArmoredKeyRing reads one or more public/private keys from an armor keyring file. +func ReadArmoredKeyRing(r io.Reader) (EntityList, error) { + block, err := armor.Decode(r) + if err == io.EOF { + return nil, errors.InvalidArgumentError("no armored data found") + } + if err != nil { + return nil, err + } + if block.Type != PublicKeyType && block.Type != PrivateKeyType { + return nil, errors.InvalidArgumentError("expected public or private key block, got: " + block.Type) + } + + return ReadKeyRing(block.Body) +} + +// ReadKeyRing reads one or more public/private keys. Unsupported keys are +// ignored as long as at least a single valid key is found. +func ReadKeyRing(r io.Reader) (el EntityList, err error) { + packets := packet.NewReader(r) + var lastUnsupportedError error + + for { + var e *Entity + e, err = ReadEntity(packets) + if err != nil { + // TODO: warn about skipped unsupported/unreadable keys + if _, ok := err.(errors.UnsupportedError); ok { + lastUnsupportedError = err + err = readToNextPublicKey(packets) + } else if _, ok := err.(errors.StructuralError); ok { + // Skip unreadable, badly-formatted keys + lastUnsupportedError = err + err = readToNextPublicKey(packets) + } + if err == io.EOF { + err = nil + break + } + if err != nil { + el = nil + break + } + } else { + el = append(el, e) + } + } + + if len(el) == 0 && err == nil { + err = lastUnsupportedError + } + return +} + +// readToNextPublicKey reads packets until the start of the entity and leaves +// the first packet of the new entity in the Reader. +func readToNextPublicKey(packets *packet.Reader) (err error) { + var p packet.Packet + for { + p, err = packets.Next() + if err == io.EOF { + return + } else if err != nil { + if _, ok := err.(errors.UnsupportedError); ok { + err = nil + continue + } + return + } + + if pk, ok := p.(*packet.PublicKey); ok && !pk.IsSubkey { + packets.Unread(p) + return + } + } +} + +// ReadEntity reads an entity (public key, identities, subkeys etc) from the +// given Reader. +func ReadEntity(packets *packet.Reader) (*Entity, error) { + e := new(Entity) + e.Identities = make(map[string]*Identity) + + p, err := packets.Next() + if err != nil { + return nil, err + } + + var ok bool + if e.PrimaryKey, ok = p.(*packet.PublicKey); !ok { + if e.PrivateKey, ok = p.(*packet.PrivateKey); !ok { + packets.Unread(p) + return nil, errors.StructuralError("first packet was not a public/private key") + } + e.PrimaryKey = &e.PrivateKey.PublicKey + } + + if !e.PrimaryKey.PubKeyAlgo.CanSign() { + return nil, errors.StructuralError("primary key cannot be used for signatures") + } + + var current *Identity + var revocations []*packet.Signature +EachPacket: + for { + p, err := packets.Next() + if err == io.EOF { + break + } else if err != nil { + return nil, err + } + + switch pkt := p.(type) { + case *packet.UserId: + current = new(Identity) + current.Name = pkt.Id + current.UserId = pkt + e.Identities[pkt.Id] = current + + for { + p, err = packets.Next() + if err == io.EOF { + return nil, io.ErrUnexpectedEOF + } else if err != nil { + return nil, err + } + + sig, ok := p.(*packet.Signature) + if !ok { + return nil, errors.StructuralError("user ID packet not followed by self-signature") + } + + if (sig.SigType == packet.SigTypePositiveCert || sig.SigType == packet.SigTypeGenericCert) && sig.IssuerKeyId != nil && *sig.IssuerKeyId == e.PrimaryKey.KeyId { + if err = e.PrimaryKey.VerifyUserIdSignature(pkt.Id, e.PrimaryKey, sig); err != nil { + return nil, errors.StructuralError("user ID self-signature invalid: " + err.Error()) + } + current.SelfSignature = sig + break + } + current.Signatures = append(current.Signatures, sig) + } + case *packet.Signature: + if pkt.SigType == packet.SigTypeKeyRevocation { + revocations = append(revocations, pkt) + } else if pkt.SigType == packet.SigTypeDirectSignature { + // TODO: RFC4880 5.2.1 permits signatures + // directly on keys (eg. to bind additional + // revocation keys). + } else if current == nil { + return nil, errors.StructuralError("signature packet found before user id packet") + } else { + current.Signatures = append(current.Signatures, pkt) + } + case *packet.PrivateKey: + if pkt.IsSubkey == false { + packets.Unread(p) + break EachPacket + } + err = addSubkey(e, packets, &pkt.PublicKey, pkt) + if err != nil { + return nil, err + } + case *packet.PublicKey: + if pkt.IsSubkey == false { + packets.Unread(p) + break EachPacket + } + err = addSubkey(e, packets, pkt, nil) + if err != nil { + return nil, err + } + default: + // we ignore unknown packets + } + } + + if len(e.Identities) == 0 { + return nil, errors.StructuralError("entity without any identities") + } + + for _, revocation := range revocations { + err = e.PrimaryKey.VerifyRevocationSignature(revocation) + if err == nil { + e.Revocations = append(e.Revocations, revocation) + } else { + // TODO: RFC 4880 5.2.3.15 defines revocation keys. + return nil, errors.StructuralError("revocation signature signed by alternate key") + } + } + + return e, nil +} + +func addSubkey(e *Entity, packets *packet.Reader, pub *packet.PublicKey, priv *packet.PrivateKey) error { + var subKey Subkey + subKey.PublicKey = pub + subKey.PrivateKey = priv + p, err := packets.Next() + if err == io.EOF { + return io.ErrUnexpectedEOF + } + if err != nil { + return errors.StructuralError("subkey signature invalid: " + err.Error()) + } + var ok bool + subKey.Sig, ok = p.(*packet.Signature) + if !ok { + return errors.StructuralError("subkey packet not followed by signature") + } + if subKey.Sig.SigType != packet.SigTypeSubkeyBinding && subKey.Sig.SigType != packet.SigTypeSubkeyRevocation { + return errors.StructuralError("subkey signature with wrong type") + } + err = e.PrimaryKey.VerifyKeySignature(subKey.PublicKey, subKey.Sig) + if err != nil { + return errors.StructuralError("subkey signature invalid: " + err.Error()) + } + e.Subkeys = append(e.Subkeys, subKey) + return nil +} + +const defaultRSAKeyBits = 2048 + +// NewEntity returns an Entity that contains a fresh RSA/RSA keypair with a +// single identity composed of the given full name, comment and email, any of +// which may be empty but must not contain any of "()<>\x00". +// If config is nil, sensible defaults will be used. +func NewEntity(name, comment, email string, config *packet.Config) (*Entity, error) { + currentTime := config.Now() + + bits := defaultRSAKeyBits + if config != nil && config.RSABits != 0 { + bits = config.RSABits + } + + uid := packet.NewUserId(name, comment, email) + if uid == nil { + return nil, errors.InvalidArgumentError("user id field contained invalid characters") + } + signingPriv, err := rsa.GenerateKey(config.Random(), bits) + if err != nil { + return nil, err + } + encryptingPriv, err := rsa.GenerateKey(config.Random(), bits) + if err != nil { + return nil, err + } + + e := &Entity{ + PrimaryKey: packet.NewRSAPublicKey(currentTime, &signingPriv.PublicKey), + PrivateKey: packet.NewRSAPrivateKey(currentTime, signingPriv), + Identities: make(map[string]*Identity), + } + isPrimaryId := true + e.Identities[uid.Id] = &Identity{ + Name: uid.Id, + UserId: uid, + SelfSignature: &packet.Signature{ + CreationTime: currentTime, + SigType: packet.SigTypePositiveCert, + PubKeyAlgo: packet.PubKeyAlgoRSA, + Hash: config.Hash(), + IsPrimaryId: &isPrimaryId, + FlagsValid: true, + FlagSign: true, + FlagCertify: true, + IssuerKeyId: &e.PrimaryKey.KeyId, + }, + } + + // If the user passes in a DefaultHash via packet.Config, + // set the PreferredHash for the SelfSignature. + if config != nil && config.DefaultHash != 0 { + e.Identities[uid.Id].SelfSignature.PreferredHash = []uint8{hashToHashId(config.DefaultHash)} + } + + // Likewise for DefaultCipher. + if config != nil && config.DefaultCipher != 0 { + e.Identities[uid.Id].SelfSignature.PreferredSymmetric = []uint8{uint8(config.DefaultCipher)} + } + + e.Subkeys = make([]Subkey, 1) + e.Subkeys[0] = Subkey{ + PublicKey: packet.NewRSAPublicKey(currentTime, &encryptingPriv.PublicKey), + PrivateKey: packet.NewRSAPrivateKey(currentTime, encryptingPriv), + Sig: &packet.Signature{ + CreationTime: currentTime, + SigType: packet.SigTypeSubkeyBinding, + PubKeyAlgo: packet.PubKeyAlgoRSA, + Hash: config.Hash(), + FlagsValid: true, + FlagEncryptStorage: true, + FlagEncryptCommunications: true, + IssuerKeyId: &e.PrimaryKey.KeyId, + }, + } + e.Subkeys[0].PublicKey.IsSubkey = true + e.Subkeys[0].PrivateKey.IsSubkey = true + + return e, nil +} + +// SerializePrivate serializes an Entity, including private key material, to +// the given Writer. For now, it must only be used on an Entity returned from +// NewEntity. +// If config is nil, sensible defaults will be used. +func (e *Entity) SerializePrivate(w io.Writer, config *packet.Config) (err error) { + err = e.PrivateKey.Serialize(w) + if err != nil { + return + } + for _, ident := range e.Identities { + err = ident.UserId.Serialize(w) + if err != nil { + return + } + err = ident.SelfSignature.SignUserId(ident.UserId.Id, e.PrimaryKey, e.PrivateKey, config) + if err != nil { + return + } + err = ident.SelfSignature.Serialize(w) + if err != nil { + return + } + } + for _, subkey := range e.Subkeys { + err = subkey.PrivateKey.Serialize(w) + if err != nil { + return + } + err = subkey.Sig.SignKey(subkey.PublicKey, e.PrivateKey, config) + if err != nil { + return + } + err = subkey.Sig.Serialize(w) + if err != nil { + return + } + } + return nil +} + +// Serialize writes the public part of the given Entity to w. (No private +// key material will be output). +func (e *Entity) Serialize(w io.Writer) error { + err := e.PrimaryKey.Serialize(w) + if err != nil { + return err + } + for _, ident := range e.Identities { + err = ident.UserId.Serialize(w) + if err != nil { + return err + } + err = ident.SelfSignature.Serialize(w) + if err != nil { + return err + } + for _, sig := range ident.Signatures { + err = sig.Serialize(w) + if err != nil { + return err + } + } + } + for _, subkey := range e.Subkeys { + err = subkey.PublicKey.Serialize(w) + if err != nil { + return err + } + err = subkey.Sig.Serialize(w) + if err != nil { + return err + } + } + return nil +} + +// SignIdentity adds a signature to e, from signer, attesting that identity is +// associated with e. The provided identity must already be an element of +// e.Identities and the private key of signer must have been decrypted if +// necessary. +// If config is nil, sensible defaults will be used. +func (e *Entity) SignIdentity(identity string, signer *Entity, config *packet.Config) error { + if signer.PrivateKey == nil { + return errors.InvalidArgumentError("signing Entity must have a private key") + } + if signer.PrivateKey.Encrypted { + return errors.InvalidArgumentError("signing Entity's private key must be decrypted") + } + ident, ok := e.Identities[identity] + if !ok { + return errors.InvalidArgumentError("given identity string not found in Entity") + } + + sig := &packet.Signature{ + SigType: packet.SigTypeGenericCert, + PubKeyAlgo: signer.PrivateKey.PubKeyAlgo, + Hash: config.Hash(), + CreationTime: config.Now(), + IssuerKeyId: &signer.PrivateKey.KeyId, + } + if err := sig.SignUserId(identity, e.PrimaryKey, signer.PrivateKey, config); err != nil { + return err + } + ident.Signatures = append(ident.Signatures, sig) + return nil +} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/compressed.go b/vendor/golang.org/x/crypto/openpgp/packet/compressed.go new file mode 100644 index 00000000..e8f0b5ca --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/packet/compressed.go @@ -0,0 +1,123 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "compress/bzip2" + "compress/flate" + "compress/zlib" + "golang.org/x/crypto/openpgp/errors" + "io" + "strconv" +) + +// Compressed represents a compressed OpenPGP packet. The decompressed contents +// will contain more OpenPGP packets. See RFC 4880, section 5.6. +type Compressed struct { + Body io.Reader +} + +const ( + NoCompression = flate.NoCompression + BestSpeed = flate.BestSpeed + BestCompression = flate.BestCompression + DefaultCompression = flate.DefaultCompression +) + +// CompressionConfig contains compressor configuration settings. +type CompressionConfig struct { + // Level is the compression level to use. It must be set to + // between -1 and 9, with -1 causing the compressor to use the + // default compression level, 0 causing the compressor to use + // no compression and 1 to 9 representing increasing (better, + // slower) compression levels. If Level is less than -1 or + // more then 9, a non-nil error will be returned during + // encryption. See the constants above for convenient common + // settings for Level. + Level int +} + +func (c *Compressed) parse(r io.Reader) error { + var buf [1]byte + _, err := readFull(r, buf[:]) + if err != nil { + return err + } + + switch buf[0] { + case 1: + c.Body = flate.NewReader(r) + case 2: + c.Body, err = zlib.NewReader(r) + case 3: + c.Body = bzip2.NewReader(r) + default: + err = errors.UnsupportedError("unknown compression algorithm: " + strconv.Itoa(int(buf[0]))) + } + + return err +} + +// compressedWriterCloser represents the serialized compression stream +// header and the compressor. Its Close() method ensures that both the +// compressor and serialized stream header are closed. Its Write() +// method writes to the compressor. +type compressedWriteCloser struct { + sh io.Closer // Stream Header + c io.WriteCloser // Compressor +} + +func (cwc compressedWriteCloser) Write(p []byte) (int, error) { + return cwc.c.Write(p) +} + +func (cwc compressedWriteCloser) Close() (err error) { + err = cwc.c.Close() + if err != nil { + return err + } + + return cwc.sh.Close() +} + +// SerializeCompressed serializes a compressed data packet to w and +// returns a WriteCloser to which the literal data packets themselves +// can be written and which MUST be closed on completion. If cc is +// nil, sensible defaults will be used to configure the compression +// algorithm. +func SerializeCompressed(w io.WriteCloser, algo CompressionAlgo, cc *CompressionConfig) (literaldata io.WriteCloser, err error) { + compressed, err := serializeStreamHeader(w, packetTypeCompressed) + if err != nil { + return + } + + _, err = compressed.Write([]byte{uint8(algo)}) + if err != nil { + return + } + + level := DefaultCompression + if cc != nil { + level = cc.Level + } + + var compressor io.WriteCloser + switch algo { + case CompressionZIP: + compressor, err = flate.NewWriter(compressed, level) + case CompressionZLIB: + compressor, err = zlib.NewWriterLevel(compressed, level) + default: + s := strconv.Itoa(int(algo)) + err = errors.UnsupportedError("Unsupported compression algorithm: " + s) + } + if err != nil { + return + } + + literaldata = compressedWriteCloser{compressed, compressor} + + return +} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/config.go b/vendor/golang.org/x/crypto/openpgp/packet/config.go new file mode 100644 index 00000000..c76eecc9 --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/packet/config.go @@ -0,0 +1,91 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "crypto" + "crypto/rand" + "io" + "time" +) + +// Config collects a number of parameters along with sensible defaults. +// A nil *Config is valid and results in all default values. +type Config struct { + // Rand provides the source of entropy. + // If nil, the crypto/rand Reader is used. + Rand io.Reader + // DefaultHash is the default hash function to be used. + // If zero, SHA-256 is used. + DefaultHash crypto.Hash + // DefaultCipher is the cipher to be used. + // If zero, AES-128 is used. + DefaultCipher CipherFunction + // Time returns the current time as the number of seconds since the + // epoch. If Time is nil, time.Now is used. + Time func() time.Time + // DefaultCompressionAlgo is the compression algorithm to be + // applied to the plaintext before encryption. If zero, no + // compression is done. + DefaultCompressionAlgo CompressionAlgo + // CompressionConfig configures the compression settings. + CompressionConfig *CompressionConfig + // S2KCount is only used for symmetric encryption. It + // determines the strength of the passphrase stretching when + // the said passphrase is hashed to produce a key. S2KCount + // should be between 1024 and 65011712, inclusive. If Config + // is nil or S2KCount is 0, the value 65536 used. Not all + // values in the above range can be represented. S2KCount will + // be rounded up to the next representable value if it cannot + // be encoded exactly. When set, it is strongly encrouraged to + // use a value that is at least 65536. See RFC 4880 Section + // 3.7.1.3. + S2KCount int + // RSABits is the number of bits in new RSA keys made with NewEntity. + // If zero, then 2048 bit keys are created. + RSABits int +} + +func (c *Config) Random() io.Reader { + if c == nil || c.Rand == nil { + return rand.Reader + } + return c.Rand +} + +func (c *Config) Hash() crypto.Hash { + if c == nil || uint(c.DefaultHash) == 0 { + return crypto.SHA256 + } + return c.DefaultHash +} + +func (c *Config) Cipher() CipherFunction { + if c == nil || uint8(c.DefaultCipher) == 0 { + return CipherAES128 + } + return c.DefaultCipher +} + +func (c *Config) Now() time.Time { + if c == nil || c.Time == nil { + return time.Now() + } + return c.Time() +} + +func (c *Config) Compression() CompressionAlgo { + if c == nil { + return CompressionNone + } + return c.DefaultCompressionAlgo +} + +func (c *Config) PasswordHashIterations() int { + if c == nil || c.S2KCount == 0 { + return 0 + } + return c.S2KCount +} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/encrypted_key.go b/vendor/golang.org/x/crypto/openpgp/packet/encrypted_key.go new file mode 100644 index 00000000..266840d0 --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/packet/encrypted_key.go @@ -0,0 +1,199 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "crypto/rsa" + "encoding/binary" + "io" + "math/big" + "strconv" + + "golang.org/x/crypto/openpgp/elgamal" + "golang.org/x/crypto/openpgp/errors" +) + +const encryptedKeyVersion = 3 + +// EncryptedKey represents a public-key encrypted session key. See RFC 4880, +// section 5.1. +type EncryptedKey struct { + KeyId uint64 + Algo PublicKeyAlgorithm + CipherFunc CipherFunction // only valid after a successful Decrypt + Key []byte // only valid after a successful Decrypt + + encryptedMPI1, encryptedMPI2 parsedMPI +} + +func (e *EncryptedKey) parse(r io.Reader) (err error) { + var buf [10]byte + _, err = readFull(r, buf[:]) + if err != nil { + return + } + if buf[0] != encryptedKeyVersion { + return errors.UnsupportedError("unknown EncryptedKey version " + strconv.Itoa(int(buf[0]))) + } + e.KeyId = binary.BigEndian.Uint64(buf[1:9]) + e.Algo = PublicKeyAlgorithm(buf[9]) + switch e.Algo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: + e.encryptedMPI1.bytes, e.encryptedMPI1.bitLength, err = readMPI(r) + case PubKeyAlgoElGamal: + e.encryptedMPI1.bytes, e.encryptedMPI1.bitLength, err = readMPI(r) + if err != nil { + return + } + e.encryptedMPI2.bytes, e.encryptedMPI2.bitLength, err = readMPI(r) + } + _, err = consumeAll(r) + return +} + +func checksumKeyMaterial(key []byte) uint16 { + var checksum uint16 + for _, v := range key { + checksum += uint16(v) + } + return checksum +} + +// Decrypt decrypts an encrypted session key with the given private key. The +// private key must have been decrypted first. +// If config is nil, sensible defaults will be used. +func (e *EncryptedKey) Decrypt(priv *PrivateKey, config *Config) error { + var err error + var b []byte + + // TODO(agl): use session key decryption routines here to avoid + // padding oracle attacks. + switch priv.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: + b, err = rsa.DecryptPKCS1v15(config.Random(), priv.PrivateKey.(*rsa.PrivateKey), e.encryptedMPI1.bytes) + case PubKeyAlgoElGamal: + c1 := new(big.Int).SetBytes(e.encryptedMPI1.bytes) + c2 := new(big.Int).SetBytes(e.encryptedMPI2.bytes) + b, err = elgamal.Decrypt(priv.PrivateKey.(*elgamal.PrivateKey), c1, c2) + default: + err = errors.InvalidArgumentError("cannot decrypted encrypted session key with private key of type " + strconv.Itoa(int(priv.PubKeyAlgo))) + } + + if err != nil { + return err + } + + e.CipherFunc = CipherFunction(b[0]) + e.Key = b[1 : len(b)-2] + expectedChecksum := uint16(b[len(b)-2])<<8 | uint16(b[len(b)-1]) + checksum := checksumKeyMaterial(e.Key) + if checksum != expectedChecksum { + return errors.StructuralError("EncryptedKey checksum incorrect") + } + + return nil +} + +// Serialize writes the encrypted key packet, e, to w. +func (e *EncryptedKey) Serialize(w io.Writer) error { + var mpiLen int + switch e.Algo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: + mpiLen = 2 + len(e.encryptedMPI1.bytes) + case PubKeyAlgoElGamal: + mpiLen = 2 + len(e.encryptedMPI1.bytes) + 2 + len(e.encryptedMPI2.bytes) + default: + return errors.InvalidArgumentError("don't know how to serialize encrypted key type " + strconv.Itoa(int(e.Algo))) + } + + serializeHeader(w, packetTypeEncryptedKey, 1 /* version */ +8 /* key id */ +1 /* algo */ +mpiLen) + + w.Write([]byte{encryptedKeyVersion}) + binary.Write(w, binary.BigEndian, e.KeyId) + w.Write([]byte{byte(e.Algo)}) + + switch e.Algo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: + writeMPIs(w, e.encryptedMPI1) + case PubKeyAlgoElGamal: + writeMPIs(w, e.encryptedMPI1, e.encryptedMPI2) + default: + panic("internal error") + } + + return nil +} + +// SerializeEncryptedKey serializes an encrypted key packet to w that contains +// key, encrypted to pub. +// If config is nil, sensible defaults will be used. +func SerializeEncryptedKey(w io.Writer, pub *PublicKey, cipherFunc CipherFunction, key []byte, config *Config) error { + var buf [10]byte + buf[0] = encryptedKeyVersion + binary.BigEndian.PutUint64(buf[1:9], pub.KeyId) + buf[9] = byte(pub.PubKeyAlgo) + + keyBlock := make([]byte, 1 /* cipher type */ +len(key)+2 /* checksum */) + keyBlock[0] = byte(cipherFunc) + copy(keyBlock[1:], key) + checksum := checksumKeyMaterial(key) + keyBlock[1+len(key)] = byte(checksum >> 8) + keyBlock[1+len(key)+1] = byte(checksum) + + switch pub.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: + return serializeEncryptedKeyRSA(w, config.Random(), buf, pub.PublicKey.(*rsa.PublicKey), keyBlock) + case PubKeyAlgoElGamal: + return serializeEncryptedKeyElGamal(w, config.Random(), buf, pub.PublicKey.(*elgamal.PublicKey), keyBlock) + case PubKeyAlgoDSA, PubKeyAlgoRSASignOnly: + return errors.InvalidArgumentError("cannot encrypt to public key of type " + strconv.Itoa(int(pub.PubKeyAlgo))) + } + + return errors.UnsupportedError("encrypting a key to public key of type " + strconv.Itoa(int(pub.PubKeyAlgo))) +} + +func serializeEncryptedKeyRSA(w io.Writer, rand io.Reader, header [10]byte, pub *rsa.PublicKey, keyBlock []byte) error { + cipherText, err := rsa.EncryptPKCS1v15(rand, pub, keyBlock) + if err != nil { + return errors.InvalidArgumentError("RSA encryption failed: " + err.Error()) + } + + packetLen := 10 /* header length */ + 2 /* mpi size */ + len(cipherText) + + err = serializeHeader(w, packetTypeEncryptedKey, packetLen) + if err != nil { + return err + } + _, err = w.Write(header[:]) + if err != nil { + return err + } + return writeMPI(w, 8*uint16(len(cipherText)), cipherText) +} + +func serializeEncryptedKeyElGamal(w io.Writer, rand io.Reader, header [10]byte, pub *elgamal.PublicKey, keyBlock []byte) error { + c1, c2, err := elgamal.Encrypt(rand, pub, keyBlock) + if err != nil { + return errors.InvalidArgumentError("ElGamal encryption failed: " + err.Error()) + } + + packetLen := 10 /* header length */ + packetLen += 2 /* mpi size */ + (c1.BitLen()+7)/8 + packetLen += 2 /* mpi size */ + (c2.BitLen()+7)/8 + + err = serializeHeader(w, packetTypeEncryptedKey, packetLen) + if err != nil { + return err + } + _, err = w.Write(header[:]) + if err != nil { + return err + } + err = writeBig(w, c1) + if err != nil { + return err + } + return writeBig(w, c2) +} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/literal.go b/vendor/golang.org/x/crypto/openpgp/packet/literal.go new file mode 100644 index 00000000..1a9ec6e5 --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/packet/literal.go @@ -0,0 +1,89 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "encoding/binary" + "io" +) + +// LiteralData represents an encrypted file. See RFC 4880, section 5.9. +type LiteralData struct { + IsBinary bool + FileName string + Time uint32 // Unix epoch time. Either creation time or modification time. 0 means undefined. + Body io.Reader +} + +// ForEyesOnly returns whether the contents of the LiteralData have been marked +// as especially sensitive. +func (l *LiteralData) ForEyesOnly() bool { + return l.FileName == "_CONSOLE" +} + +func (l *LiteralData) parse(r io.Reader) (err error) { + var buf [256]byte + + _, err = readFull(r, buf[:2]) + if err != nil { + return + } + + l.IsBinary = buf[0] == 'b' + fileNameLen := int(buf[1]) + + _, err = readFull(r, buf[:fileNameLen]) + if err != nil { + return + } + + l.FileName = string(buf[:fileNameLen]) + + _, err = readFull(r, buf[:4]) + if err != nil { + return + } + + l.Time = binary.BigEndian.Uint32(buf[:4]) + l.Body = r + return +} + +// SerializeLiteral serializes a literal data packet to w and returns a +// WriteCloser to which the data itself can be written and which MUST be closed +// on completion. The fileName is truncated to 255 bytes. +func SerializeLiteral(w io.WriteCloser, isBinary bool, fileName string, time uint32) (plaintext io.WriteCloser, err error) { + var buf [4]byte + buf[0] = 't' + if isBinary { + buf[0] = 'b' + } + if len(fileName) > 255 { + fileName = fileName[:255] + } + buf[1] = byte(len(fileName)) + + inner, err := serializeStreamHeader(w, packetTypeLiteralData) + if err != nil { + return + } + + _, err = inner.Write(buf[:2]) + if err != nil { + return + } + _, err = inner.Write([]byte(fileName)) + if err != nil { + return + } + binary.BigEndian.PutUint32(buf[:], time) + _, err = inner.Write(buf[:]) + if err != nil { + return + } + + plaintext = inner + return +} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/ocfb.go b/vendor/golang.org/x/crypto/openpgp/packet/ocfb.go new file mode 100644 index 00000000..ce2a33a5 --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/packet/ocfb.go @@ -0,0 +1,143 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// OpenPGP CFB Mode. http://tools.ietf.org/html/rfc4880#section-13.9 + +package packet + +import ( + "crypto/cipher" +) + +type ocfbEncrypter struct { + b cipher.Block + fre []byte + outUsed int +} + +// An OCFBResyncOption determines if the "resynchronization step" of OCFB is +// performed. +type OCFBResyncOption bool + +const ( + OCFBResync OCFBResyncOption = true + OCFBNoResync OCFBResyncOption = false +) + +// NewOCFBEncrypter returns a cipher.Stream which encrypts data with OpenPGP's +// cipher feedback mode using the given cipher.Block, and an initial amount of +// ciphertext. randData must be random bytes and be the same length as the +// cipher.Block's block size. Resync determines if the "resynchronization step" +// from RFC 4880, 13.9 step 7 is performed. Different parts of OpenPGP vary on +// this point. +func NewOCFBEncrypter(block cipher.Block, randData []byte, resync OCFBResyncOption) (cipher.Stream, []byte) { + blockSize := block.BlockSize() + if len(randData) != blockSize { + return nil, nil + } + + x := &ocfbEncrypter{ + b: block, + fre: make([]byte, blockSize), + outUsed: 0, + } + prefix := make([]byte, blockSize+2) + + block.Encrypt(x.fre, x.fre) + for i := 0; i < blockSize; i++ { + prefix[i] = randData[i] ^ x.fre[i] + } + + block.Encrypt(x.fre, prefix[:blockSize]) + prefix[blockSize] = x.fre[0] ^ randData[blockSize-2] + prefix[blockSize+1] = x.fre[1] ^ randData[blockSize-1] + + if resync { + block.Encrypt(x.fre, prefix[2:]) + } else { + x.fre[0] = prefix[blockSize] + x.fre[1] = prefix[blockSize+1] + x.outUsed = 2 + } + return x, prefix +} + +func (x *ocfbEncrypter) XORKeyStream(dst, src []byte) { + for i := 0; i < len(src); i++ { + if x.outUsed == len(x.fre) { + x.b.Encrypt(x.fre, x.fre) + x.outUsed = 0 + } + + x.fre[x.outUsed] ^= src[i] + dst[i] = x.fre[x.outUsed] + x.outUsed++ + } +} + +type ocfbDecrypter struct { + b cipher.Block + fre []byte + outUsed int +} + +// NewOCFBDecrypter returns a cipher.Stream which decrypts data with OpenPGP's +// cipher feedback mode using the given cipher.Block. Prefix must be the first +// blockSize + 2 bytes of the ciphertext, where blockSize is the cipher.Block's +// block size. If an incorrect key is detected then nil is returned. On +// successful exit, blockSize+2 bytes of decrypted data are written into +// prefix. Resync determines if the "resynchronization step" from RFC 4880, +// 13.9 step 7 is performed. Different parts of OpenPGP vary on this point. +func NewOCFBDecrypter(block cipher.Block, prefix []byte, resync OCFBResyncOption) cipher.Stream { + blockSize := block.BlockSize() + if len(prefix) != blockSize+2 { + return nil + } + + x := &ocfbDecrypter{ + b: block, + fre: make([]byte, blockSize), + outUsed: 0, + } + prefixCopy := make([]byte, len(prefix)) + copy(prefixCopy, prefix) + + block.Encrypt(x.fre, x.fre) + for i := 0; i < blockSize; i++ { + prefixCopy[i] ^= x.fre[i] + } + + block.Encrypt(x.fre, prefix[:blockSize]) + prefixCopy[blockSize] ^= x.fre[0] + prefixCopy[blockSize+1] ^= x.fre[1] + + if prefixCopy[blockSize-2] != prefixCopy[blockSize] || + prefixCopy[blockSize-1] != prefixCopy[blockSize+1] { + return nil + } + + if resync { + block.Encrypt(x.fre, prefix[2:]) + } else { + x.fre[0] = prefix[blockSize] + x.fre[1] = prefix[blockSize+1] + x.outUsed = 2 + } + copy(prefix, prefixCopy) + return x +} + +func (x *ocfbDecrypter) XORKeyStream(dst, src []byte) { + for i := 0; i < len(src); i++ { + if x.outUsed == len(x.fre) { + x.b.Encrypt(x.fre, x.fre) + x.outUsed = 0 + } + + c := src[i] + dst[i] = x.fre[x.outUsed] ^ src[i] + x.fre[x.outUsed] = c + x.outUsed++ + } +} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/one_pass_signature.go b/vendor/golang.org/x/crypto/openpgp/packet/one_pass_signature.go new file mode 100644 index 00000000..17135033 --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/packet/one_pass_signature.go @@ -0,0 +1,73 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "crypto" + "encoding/binary" + "golang.org/x/crypto/openpgp/errors" + "golang.org/x/crypto/openpgp/s2k" + "io" + "strconv" +) + +// OnePassSignature represents a one-pass signature packet. See RFC 4880, +// section 5.4. +type OnePassSignature struct { + SigType SignatureType + Hash crypto.Hash + PubKeyAlgo PublicKeyAlgorithm + KeyId uint64 + IsLast bool +} + +const onePassSignatureVersion = 3 + +func (ops *OnePassSignature) parse(r io.Reader) (err error) { + var buf [13]byte + + _, err = readFull(r, buf[:]) + if err != nil { + return + } + if buf[0] != onePassSignatureVersion { + err = errors.UnsupportedError("one-pass-signature packet version " + strconv.Itoa(int(buf[0]))) + } + + var ok bool + ops.Hash, ok = s2k.HashIdToHash(buf[2]) + if !ok { + return errors.UnsupportedError("hash function: " + strconv.Itoa(int(buf[2]))) + } + + ops.SigType = SignatureType(buf[1]) + ops.PubKeyAlgo = PublicKeyAlgorithm(buf[3]) + ops.KeyId = binary.BigEndian.Uint64(buf[4:12]) + ops.IsLast = buf[12] != 0 + return +} + +// Serialize marshals the given OnePassSignature to w. +func (ops *OnePassSignature) Serialize(w io.Writer) error { + var buf [13]byte + buf[0] = onePassSignatureVersion + buf[1] = uint8(ops.SigType) + var ok bool + buf[2], ok = s2k.HashToHashId(ops.Hash) + if !ok { + return errors.UnsupportedError("hash type: " + strconv.Itoa(int(ops.Hash))) + } + buf[3] = uint8(ops.PubKeyAlgo) + binary.BigEndian.PutUint64(buf[4:12], ops.KeyId) + if ops.IsLast { + buf[12] = 1 + } + + if err := serializeHeader(w, packetTypeOnePassSignature, len(buf)); err != nil { + return err + } + _, err := w.Write(buf[:]) + return err +} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/opaque.go b/vendor/golang.org/x/crypto/openpgp/packet/opaque.go new file mode 100644 index 00000000..456d807f --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/packet/opaque.go @@ -0,0 +1,162 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "bytes" + "io" + "io/ioutil" + + "golang.org/x/crypto/openpgp/errors" +) + +// OpaquePacket represents an OpenPGP packet as raw, unparsed data. This is +// useful for splitting and storing the original packet contents separately, +// handling unsupported packet types or accessing parts of the packet not yet +// implemented by this package. +type OpaquePacket struct { + // Packet type + Tag uint8 + // Reason why the packet was parsed opaquely + Reason error + // Binary contents of the packet data + Contents []byte +} + +func (op *OpaquePacket) parse(r io.Reader) (err error) { + op.Contents, err = ioutil.ReadAll(r) + return +} + +// Serialize marshals the packet to a writer in its original form, including +// the packet header. +func (op *OpaquePacket) Serialize(w io.Writer) (err error) { + err = serializeHeader(w, packetType(op.Tag), len(op.Contents)) + if err == nil { + _, err = w.Write(op.Contents) + } + return +} + +// Parse attempts to parse the opaque contents into a structure supported by +// this package. If the packet is not known then the result will be another +// OpaquePacket. +func (op *OpaquePacket) Parse() (p Packet, err error) { + hdr := bytes.NewBuffer(nil) + err = serializeHeader(hdr, packetType(op.Tag), len(op.Contents)) + if err != nil { + op.Reason = err + return op, err + } + p, err = Read(io.MultiReader(hdr, bytes.NewBuffer(op.Contents))) + if err != nil { + op.Reason = err + p = op + } + return +} + +// OpaqueReader reads OpaquePackets from an io.Reader. +type OpaqueReader struct { + r io.Reader +} + +func NewOpaqueReader(r io.Reader) *OpaqueReader { + return &OpaqueReader{r: r} +} + +// Read the next OpaquePacket. +func (or *OpaqueReader) Next() (op *OpaquePacket, err error) { + tag, _, contents, err := readHeader(or.r) + if err != nil { + return + } + op = &OpaquePacket{Tag: uint8(tag), Reason: err} + err = op.parse(contents) + if err != nil { + consumeAll(contents) + } + return +} + +// OpaqueSubpacket represents an unparsed OpenPGP subpacket, +// as found in signature and user attribute packets. +type OpaqueSubpacket struct { + SubType uint8 + Contents []byte +} + +// OpaqueSubpackets extracts opaque, unparsed OpenPGP subpackets from +// their byte representation. +func OpaqueSubpackets(contents []byte) (result []*OpaqueSubpacket, err error) { + var ( + subHeaderLen int + subPacket *OpaqueSubpacket + ) + for len(contents) > 0 { + subHeaderLen, subPacket, err = nextSubpacket(contents) + if err != nil { + break + } + result = append(result, subPacket) + contents = contents[subHeaderLen+len(subPacket.Contents):] + } + return +} + +func nextSubpacket(contents []byte) (subHeaderLen int, subPacket *OpaqueSubpacket, err error) { + // RFC 4880, section 5.2.3.1 + var subLen uint32 + if len(contents) < 1 { + goto Truncated + } + subPacket = &OpaqueSubpacket{} + switch { + case contents[0] < 192: + subHeaderLen = 2 // 1 length byte, 1 subtype byte + if len(contents) < subHeaderLen { + goto Truncated + } + subLen = uint32(contents[0]) + contents = contents[1:] + case contents[0] < 255: + subHeaderLen = 3 // 2 length bytes, 1 subtype + if len(contents) < subHeaderLen { + goto Truncated + } + subLen = uint32(contents[0]-192)<<8 + uint32(contents[1]) + 192 + contents = contents[2:] + default: + subHeaderLen = 6 // 5 length bytes, 1 subtype + if len(contents) < subHeaderLen { + goto Truncated + } + subLen = uint32(contents[1])<<24 | + uint32(contents[2])<<16 | + uint32(contents[3])<<8 | + uint32(contents[4]) + contents = contents[5:] + } + if subLen > uint32(len(contents)) || subLen == 0 { + goto Truncated + } + subPacket.SubType = contents[0] + subPacket.Contents = contents[1:subLen] + return +Truncated: + err = errors.StructuralError("subpacket truncated") + return +} + +func (osp *OpaqueSubpacket) Serialize(w io.Writer) (err error) { + buf := make([]byte, 6) + n := serializeSubpacketLength(buf, len(osp.Contents)+1) + buf[n] = osp.SubType + if _, err = w.Write(buf[:n+1]); err != nil { + return + } + _, err = w.Write(osp.Contents) + return +} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/packet.go b/vendor/golang.org/x/crypto/openpgp/packet/packet.go new file mode 100644 index 00000000..3eded93f --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/packet/packet.go @@ -0,0 +1,537 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package packet implements parsing and serialization of OpenPGP packets, as +// specified in RFC 4880. +package packet // import "golang.org/x/crypto/openpgp/packet" + +import ( + "bufio" + "crypto/aes" + "crypto/cipher" + "crypto/des" + "golang.org/x/crypto/cast5" + "golang.org/x/crypto/openpgp/errors" + "io" + "math/big" +) + +// readFull is the same as io.ReadFull except that reading zero bytes returns +// ErrUnexpectedEOF rather than EOF. +func readFull(r io.Reader, buf []byte) (n int, err error) { + n, err = io.ReadFull(r, buf) + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return +} + +// readLength reads an OpenPGP length from r. See RFC 4880, section 4.2.2. +func readLength(r io.Reader) (length int64, isPartial bool, err error) { + var buf [4]byte + _, err = readFull(r, buf[:1]) + if err != nil { + return + } + switch { + case buf[0] < 192: + length = int64(buf[0]) + case buf[0] < 224: + length = int64(buf[0]-192) << 8 + _, err = readFull(r, buf[0:1]) + if err != nil { + return + } + length += int64(buf[0]) + 192 + case buf[0] < 255: + length = int64(1) << (buf[0] & 0x1f) + isPartial = true + default: + _, err = readFull(r, buf[0:4]) + if err != nil { + return + } + length = int64(buf[0])<<24 | + int64(buf[1])<<16 | + int64(buf[2])<<8 | + int64(buf[3]) + } + return +} + +// partialLengthReader wraps an io.Reader and handles OpenPGP partial lengths. +// The continuation lengths are parsed and removed from the stream and EOF is +// returned at the end of the packet. See RFC 4880, section 4.2.2.4. +type partialLengthReader struct { + r io.Reader + remaining int64 + isPartial bool +} + +func (r *partialLengthReader) Read(p []byte) (n int, err error) { + for r.remaining == 0 { + if !r.isPartial { + return 0, io.EOF + } + r.remaining, r.isPartial, err = readLength(r.r) + if err != nil { + return 0, err + } + } + + toRead := int64(len(p)) + if toRead > r.remaining { + toRead = r.remaining + } + + n, err = r.r.Read(p[:int(toRead)]) + r.remaining -= int64(n) + if n < int(toRead) && err == io.EOF { + err = io.ErrUnexpectedEOF + } + return +} + +// partialLengthWriter writes a stream of data using OpenPGP partial lengths. +// See RFC 4880, section 4.2.2.4. +type partialLengthWriter struct { + w io.WriteCloser + lengthByte [1]byte +} + +func (w *partialLengthWriter) Write(p []byte) (n int, err error) { + for len(p) > 0 { + for power := uint(14); power < 32; power-- { + l := 1 << power + if len(p) >= l { + w.lengthByte[0] = 224 + uint8(power) + _, err = w.w.Write(w.lengthByte[:]) + if err != nil { + return + } + var m int + m, err = w.w.Write(p[:l]) + n += m + if err != nil { + return + } + p = p[l:] + break + } + } + } + return +} + +func (w *partialLengthWriter) Close() error { + w.lengthByte[0] = 0 + _, err := w.w.Write(w.lengthByte[:]) + if err != nil { + return err + } + return w.w.Close() +} + +// A spanReader is an io.LimitReader, but it returns ErrUnexpectedEOF if the +// underlying Reader returns EOF before the limit has been reached. +type spanReader struct { + r io.Reader + n int64 +} + +func (l *spanReader) Read(p []byte) (n int, err error) { + if l.n <= 0 { + return 0, io.EOF + } + if int64(len(p)) > l.n { + p = p[0:l.n] + } + n, err = l.r.Read(p) + l.n -= int64(n) + if l.n > 0 && err == io.EOF { + err = io.ErrUnexpectedEOF + } + return +} + +// readHeader parses a packet header and returns an io.Reader which will return +// the contents of the packet. See RFC 4880, section 4.2. +func readHeader(r io.Reader) (tag packetType, length int64, contents io.Reader, err error) { + var buf [4]byte + _, err = io.ReadFull(r, buf[:1]) + if err != nil { + return + } + if buf[0]&0x80 == 0 { + err = errors.StructuralError("tag byte does not have MSB set") + return + } + if buf[0]&0x40 == 0 { + // Old format packet + tag = packetType((buf[0] & 0x3f) >> 2) + lengthType := buf[0] & 3 + if lengthType == 3 { + length = -1 + contents = r + return + } + lengthBytes := 1 << lengthType + _, err = readFull(r, buf[0:lengthBytes]) + if err != nil { + return + } + for i := 0; i < lengthBytes; i++ { + length <<= 8 + length |= int64(buf[i]) + } + contents = &spanReader{r, length} + return + } + + // New format packet + tag = packetType(buf[0] & 0x3f) + length, isPartial, err := readLength(r) + if err != nil { + return + } + if isPartial { + contents = &partialLengthReader{ + remaining: length, + isPartial: true, + r: r, + } + length = -1 + } else { + contents = &spanReader{r, length} + } + return +} + +// serializeHeader writes an OpenPGP packet header to w. See RFC 4880, section +// 4.2. +func serializeHeader(w io.Writer, ptype packetType, length int) (err error) { + var buf [6]byte + var n int + + buf[0] = 0x80 | 0x40 | byte(ptype) + if length < 192 { + buf[1] = byte(length) + n = 2 + } else if length < 8384 { + length -= 192 + buf[1] = 192 + byte(length>>8) + buf[2] = byte(length) + n = 3 + } else { + buf[1] = 255 + buf[2] = byte(length >> 24) + buf[3] = byte(length >> 16) + buf[4] = byte(length >> 8) + buf[5] = byte(length) + n = 6 + } + + _, err = w.Write(buf[:n]) + return +} + +// serializeStreamHeader writes an OpenPGP packet header to w where the +// length of the packet is unknown. It returns a io.WriteCloser which can be +// used to write the contents of the packet. See RFC 4880, section 4.2. +func serializeStreamHeader(w io.WriteCloser, ptype packetType) (out io.WriteCloser, err error) { + var buf [1]byte + buf[0] = 0x80 | 0x40 | byte(ptype) + _, err = w.Write(buf[:]) + if err != nil { + return + } + out = &partialLengthWriter{w: w} + return +} + +// Packet represents an OpenPGP packet. Users are expected to try casting +// instances of this interface to specific packet types. +type Packet interface { + parse(io.Reader) error +} + +// consumeAll reads from the given Reader until error, returning the number of +// bytes read. +func consumeAll(r io.Reader) (n int64, err error) { + var m int + var buf [1024]byte + + for { + m, err = r.Read(buf[:]) + n += int64(m) + if err == io.EOF { + err = nil + return + } + if err != nil { + return + } + } +} + +// packetType represents the numeric ids of the different OpenPGP packet types. See +// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-2 +type packetType uint8 + +const ( + packetTypeEncryptedKey packetType = 1 + packetTypeSignature packetType = 2 + packetTypeSymmetricKeyEncrypted packetType = 3 + packetTypeOnePassSignature packetType = 4 + packetTypePrivateKey packetType = 5 + packetTypePublicKey packetType = 6 + packetTypePrivateSubkey packetType = 7 + packetTypeCompressed packetType = 8 + packetTypeSymmetricallyEncrypted packetType = 9 + packetTypeLiteralData packetType = 11 + packetTypeUserId packetType = 13 + packetTypePublicSubkey packetType = 14 + packetTypeUserAttribute packetType = 17 + packetTypeSymmetricallyEncryptedMDC packetType = 18 +) + +// peekVersion detects the version of a public key packet about to +// be read. A bufio.Reader at the original position of the io.Reader +// is returned. +func peekVersion(r io.Reader) (bufr *bufio.Reader, ver byte, err error) { + bufr = bufio.NewReader(r) + var verBuf []byte + if verBuf, err = bufr.Peek(1); err != nil { + return + } + ver = verBuf[0] + return +} + +// Read reads a single OpenPGP packet from the given io.Reader. If there is an +// error parsing a packet, the whole packet is consumed from the input. +func Read(r io.Reader) (p Packet, err error) { + tag, _, contents, err := readHeader(r) + if err != nil { + return + } + + switch tag { + case packetTypeEncryptedKey: + p = new(EncryptedKey) + case packetTypeSignature: + var version byte + // Detect signature version + if contents, version, err = peekVersion(contents); err != nil { + return + } + if version < 4 { + p = new(SignatureV3) + } else { + p = new(Signature) + } + case packetTypeSymmetricKeyEncrypted: + p = new(SymmetricKeyEncrypted) + case packetTypeOnePassSignature: + p = new(OnePassSignature) + case packetTypePrivateKey, packetTypePrivateSubkey: + pk := new(PrivateKey) + if tag == packetTypePrivateSubkey { + pk.IsSubkey = true + } + p = pk + case packetTypePublicKey, packetTypePublicSubkey: + var version byte + if contents, version, err = peekVersion(contents); err != nil { + return + } + isSubkey := tag == packetTypePublicSubkey + if version < 4 { + p = &PublicKeyV3{IsSubkey: isSubkey} + } else { + p = &PublicKey{IsSubkey: isSubkey} + } + case packetTypeCompressed: + p = new(Compressed) + case packetTypeSymmetricallyEncrypted: + p = new(SymmetricallyEncrypted) + case packetTypeLiteralData: + p = new(LiteralData) + case packetTypeUserId: + p = new(UserId) + case packetTypeUserAttribute: + p = new(UserAttribute) + case packetTypeSymmetricallyEncryptedMDC: + se := new(SymmetricallyEncrypted) + se.MDC = true + p = se + default: + err = errors.UnknownPacketTypeError(tag) + } + if p != nil { + err = p.parse(contents) + } + if err != nil { + consumeAll(contents) + } + return +} + +// SignatureType represents the different semantic meanings of an OpenPGP +// signature. See RFC 4880, section 5.2.1. +type SignatureType uint8 + +const ( + SigTypeBinary SignatureType = 0 + SigTypeText = 1 + SigTypeGenericCert = 0x10 + SigTypePersonaCert = 0x11 + SigTypeCasualCert = 0x12 + SigTypePositiveCert = 0x13 + SigTypeSubkeyBinding = 0x18 + SigTypePrimaryKeyBinding = 0x19 + SigTypeDirectSignature = 0x1F + SigTypeKeyRevocation = 0x20 + SigTypeSubkeyRevocation = 0x28 +) + +// PublicKeyAlgorithm represents the different public key system specified for +// OpenPGP. See +// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-12 +type PublicKeyAlgorithm uint8 + +const ( + PubKeyAlgoRSA PublicKeyAlgorithm = 1 + PubKeyAlgoRSAEncryptOnly PublicKeyAlgorithm = 2 + PubKeyAlgoRSASignOnly PublicKeyAlgorithm = 3 + PubKeyAlgoElGamal PublicKeyAlgorithm = 16 + PubKeyAlgoDSA PublicKeyAlgorithm = 17 + // RFC 6637, Section 5. + PubKeyAlgoECDH PublicKeyAlgorithm = 18 + PubKeyAlgoECDSA PublicKeyAlgorithm = 19 +) + +// CanEncrypt returns true if it's possible to encrypt a message to a public +// key of the given type. +func (pka PublicKeyAlgorithm) CanEncrypt() bool { + switch pka { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoElGamal: + return true + } + return false +} + +// CanSign returns true if it's possible for a public key of the given type to +// sign a message. +func (pka PublicKeyAlgorithm) CanSign() bool { + switch pka { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoDSA, PubKeyAlgoECDSA: + return true + } + return false +} + +// CipherFunction represents the different block ciphers specified for OpenPGP. See +// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-13 +type CipherFunction uint8 + +const ( + Cipher3DES CipherFunction = 2 + CipherCAST5 CipherFunction = 3 + CipherAES128 CipherFunction = 7 + CipherAES192 CipherFunction = 8 + CipherAES256 CipherFunction = 9 +) + +// KeySize returns the key size, in bytes, of cipher. +func (cipher CipherFunction) KeySize() int { + switch cipher { + case Cipher3DES: + return 24 + case CipherCAST5: + return cast5.KeySize + case CipherAES128: + return 16 + case CipherAES192: + return 24 + case CipherAES256: + return 32 + } + return 0 +} + +// blockSize returns the block size, in bytes, of cipher. +func (cipher CipherFunction) blockSize() int { + switch cipher { + case Cipher3DES: + return des.BlockSize + case CipherCAST5: + return 8 + case CipherAES128, CipherAES192, CipherAES256: + return 16 + } + return 0 +} + +// new returns a fresh instance of the given cipher. +func (cipher CipherFunction) new(key []byte) (block cipher.Block) { + switch cipher { + case Cipher3DES: + block, _ = des.NewTripleDESCipher(key) + case CipherCAST5: + block, _ = cast5.NewCipher(key) + case CipherAES128, CipherAES192, CipherAES256: + block, _ = aes.NewCipher(key) + } + return +} + +// readMPI reads a big integer from r. The bit length returned is the bit +// length that was specified in r. This is preserved so that the integer can be +// reserialized exactly. +func readMPI(r io.Reader) (mpi []byte, bitLength uint16, err error) { + var buf [2]byte + _, err = readFull(r, buf[0:]) + if err != nil { + return + } + bitLength = uint16(buf[0])<<8 | uint16(buf[1]) + numBytes := (int(bitLength) + 7) / 8 + mpi = make([]byte, numBytes) + _, err = readFull(r, mpi) + return +} + +// mpiLength returns the length of the given *big.Int when serialized as an +// MPI. +func mpiLength(n *big.Int) (mpiLengthInBytes int) { + mpiLengthInBytes = 2 /* MPI length */ + mpiLengthInBytes += (n.BitLen() + 7) / 8 + return +} + +// writeMPI serializes a big integer to w. +func writeMPI(w io.Writer, bitLength uint16, mpiBytes []byte) (err error) { + _, err = w.Write([]byte{byte(bitLength >> 8), byte(bitLength)}) + if err == nil { + _, err = w.Write(mpiBytes) + } + return +} + +// writeBig serializes a *big.Int to w. +func writeBig(w io.Writer, i *big.Int) error { + return writeMPI(w, uint16(i.BitLen()), i.Bytes()) +} + +// CompressionAlgo Represents the different compression algorithms +// supported by OpenPGP (except for BZIP2, which is not currently +// supported). See Section 9.3 of RFC 4880. +type CompressionAlgo uint8 + +const ( + CompressionNone CompressionAlgo = 0 + CompressionZIP CompressionAlgo = 1 + CompressionZLIB CompressionAlgo = 2 +) diff --git a/vendor/golang.org/x/crypto/openpgp/packet/private_key.go b/vendor/golang.org/x/crypto/openpgp/packet/private_key.go new file mode 100644 index 00000000..34734cc6 --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/packet/private_key.go @@ -0,0 +1,380 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "bytes" + "crypto" + "crypto/cipher" + "crypto/dsa" + "crypto/ecdsa" + "crypto/rsa" + "crypto/sha1" + "io" + "io/ioutil" + "math/big" + "strconv" + "time" + + "golang.org/x/crypto/openpgp/elgamal" + "golang.org/x/crypto/openpgp/errors" + "golang.org/x/crypto/openpgp/s2k" +) + +// PrivateKey represents a possibly encrypted private key. See RFC 4880, +// section 5.5.3. +type PrivateKey struct { + PublicKey + Encrypted bool // if true then the private key is unavailable until Decrypt has been called. + encryptedData []byte + cipher CipherFunction + s2k func(out, in []byte) + PrivateKey interface{} // An *{rsa|dsa|ecdsa}.PrivateKey or a crypto.Signer. + sha1Checksum bool + iv []byte +} + +func NewRSAPrivateKey(currentTime time.Time, priv *rsa.PrivateKey) *PrivateKey { + pk := new(PrivateKey) + pk.PublicKey = *NewRSAPublicKey(currentTime, &priv.PublicKey) + pk.PrivateKey = priv + return pk +} + +func NewDSAPrivateKey(currentTime time.Time, priv *dsa.PrivateKey) *PrivateKey { + pk := new(PrivateKey) + pk.PublicKey = *NewDSAPublicKey(currentTime, &priv.PublicKey) + pk.PrivateKey = priv + return pk +} + +func NewElGamalPrivateKey(currentTime time.Time, priv *elgamal.PrivateKey) *PrivateKey { + pk := new(PrivateKey) + pk.PublicKey = *NewElGamalPublicKey(currentTime, &priv.PublicKey) + pk.PrivateKey = priv + return pk +} + +func NewECDSAPrivateKey(currentTime time.Time, priv *ecdsa.PrivateKey) *PrivateKey { + pk := new(PrivateKey) + pk.PublicKey = *NewECDSAPublicKey(currentTime, &priv.PublicKey) + pk.PrivateKey = priv + return pk +} + +// NewSignerPrivateKey creates a sign-only PrivateKey from a crypto.Signer that +// implements RSA or ECDSA. +func NewSignerPrivateKey(currentTime time.Time, signer crypto.Signer) *PrivateKey { + pk := new(PrivateKey) + switch pubkey := signer.Public().(type) { + case rsa.PublicKey: + pk.PublicKey = *NewRSAPublicKey(currentTime, &pubkey) + pk.PubKeyAlgo = PubKeyAlgoRSASignOnly + case ecdsa.PublicKey: + pk.PublicKey = *NewECDSAPublicKey(currentTime, &pubkey) + default: + panic("openpgp: unknown crypto.Signer type in NewSignerPrivateKey") + } + pk.PrivateKey = signer + return pk +} + +func (pk *PrivateKey) parse(r io.Reader) (err error) { + err = (&pk.PublicKey).parse(r) + if err != nil { + return + } + var buf [1]byte + _, err = readFull(r, buf[:]) + if err != nil { + return + } + + s2kType := buf[0] + + switch s2kType { + case 0: + pk.s2k = nil + pk.Encrypted = false + case 254, 255: + _, err = readFull(r, buf[:]) + if err != nil { + return + } + pk.cipher = CipherFunction(buf[0]) + pk.Encrypted = true + pk.s2k, err = s2k.Parse(r) + if err != nil { + return + } + if s2kType == 254 { + pk.sha1Checksum = true + } + default: + return errors.UnsupportedError("deprecated s2k function in private key") + } + + if pk.Encrypted { + blockSize := pk.cipher.blockSize() + if blockSize == 0 { + return errors.UnsupportedError("unsupported cipher in private key: " + strconv.Itoa(int(pk.cipher))) + } + pk.iv = make([]byte, blockSize) + _, err = readFull(r, pk.iv) + if err != nil { + return + } + } + + pk.encryptedData, err = ioutil.ReadAll(r) + if err != nil { + return + } + + if !pk.Encrypted { + return pk.parsePrivateKey(pk.encryptedData) + } + + return +} + +func mod64kHash(d []byte) uint16 { + var h uint16 + for _, b := range d { + h += uint16(b) + } + return h +} + +func (pk *PrivateKey) Serialize(w io.Writer) (err error) { + // TODO(agl): support encrypted private keys + buf := bytes.NewBuffer(nil) + err = pk.PublicKey.serializeWithoutHeaders(buf) + if err != nil { + return + } + buf.WriteByte(0 /* no encryption */) + + privateKeyBuf := bytes.NewBuffer(nil) + + switch priv := pk.PrivateKey.(type) { + case *rsa.PrivateKey: + err = serializeRSAPrivateKey(privateKeyBuf, priv) + case *dsa.PrivateKey: + err = serializeDSAPrivateKey(privateKeyBuf, priv) + case *elgamal.PrivateKey: + err = serializeElGamalPrivateKey(privateKeyBuf, priv) + case *ecdsa.PrivateKey: + err = serializeECDSAPrivateKey(privateKeyBuf, priv) + default: + err = errors.InvalidArgumentError("unknown private key type") + } + if err != nil { + return + } + + ptype := packetTypePrivateKey + contents := buf.Bytes() + privateKeyBytes := privateKeyBuf.Bytes() + if pk.IsSubkey { + ptype = packetTypePrivateSubkey + } + err = serializeHeader(w, ptype, len(contents)+len(privateKeyBytes)+2) + if err != nil { + return + } + _, err = w.Write(contents) + if err != nil { + return + } + _, err = w.Write(privateKeyBytes) + if err != nil { + return + } + + checksum := mod64kHash(privateKeyBytes) + var checksumBytes [2]byte + checksumBytes[0] = byte(checksum >> 8) + checksumBytes[1] = byte(checksum) + _, err = w.Write(checksumBytes[:]) + + return +} + +func serializeRSAPrivateKey(w io.Writer, priv *rsa.PrivateKey) error { + err := writeBig(w, priv.D) + if err != nil { + return err + } + err = writeBig(w, priv.Primes[1]) + if err != nil { + return err + } + err = writeBig(w, priv.Primes[0]) + if err != nil { + return err + } + return writeBig(w, priv.Precomputed.Qinv) +} + +func serializeDSAPrivateKey(w io.Writer, priv *dsa.PrivateKey) error { + return writeBig(w, priv.X) +} + +func serializeElGamalPrivateKey(w io.Writer, priv *elgamal.PrivateKey) error { + return writeBig(w, priv.X) +} + +func serializeECDSAPrivateKey(w io.Writer, priv *ecdsa.PrivateKey) error { + return writeBig(w, priv.D) +} + +// Decrypt decrypts an encrypted private key using a passphrase. +func (pk *PrivateKey) Decrypt(passphrase []byte) error { + if !pk.Encrypted { + return nil + } + + key := make([]byte, pk.cipher.KeySize()) + pk.s2k(key, passphrase) + block := pk.cipher.new(key) + cfb := cipher.NewCFBDecrypter(block, pk.iv) + + data := make([]byte, len(pk.encryptedData)) + cfb.XORKeyStream(data, pk.encryptedData) + + if pk.sha1Checksum { + if len(data) < sha1.Size { + return errors.StructuralError("truncated private key data") + } + h := sha1.New() + h.Write(data[:len(data)-sha1.Size]) + sum := h.Sum(nil) + if !bytes.Equal(sum, data[len(data)-sha1.Size:]) { + return errors.StructuralError("private key checksum failure") + } + data = data[:len(data)-sha1.Size] + } else { + if len(data) < 2 { + return errors.StructuralError("truncated private key data") + } + var sum uint16 + for i := 0; i < len(data)-2; i++ { + sum += uint16(data[i]) + } + if data[len(data)-2] != uint8(sum>>8) || + data[len(data)-1] != uint8(sum) { + return errors.StructuralError("private key checksum failure") + } + data = data[:len(data)-2] + } + + return pk.parsePrivateKey(data) +} + +func (pk *PrivateKey) parsePrivateKey(data []byte) (err error) { + switch pk.PublicKey.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoRSAEncryptOnly: + return pk.parseRSAPrivateKey(data) + case PubKeyAlgoDSA: + return pk.parseDSAPrivateKey(data) + case PubKeyAlgoElGamal: + return pk.parseElGamalPrivateKey(data) + case PubKeyAlgoECDSA: + return pk.parseECDSAPrivateKey(data) + } + panic("impossible") +} + +func (pk *PrivateKey) parseRSAPrivateKey(data []byte) (err error) { + rsaPub := pk.PublicKey.PublicKey.(*rsa.PublicKey) + rsaPriv := new(rsa.PrivateKey) + rsaPriv.PublicKey = *rsaPub + + buf := bytes.NewBuffer(data) + d, _, err := readMPI(buf) + if err != nil { + return + } + p, _, err := readMPI(buf) + if err != nil { + return + } + q, _, err := readMPI(buf) + if err != nil { + return + } + + rsaPriv.D = new(big.Int).SetBytes(d) + rsaPriv.Primes = make([]*big.Int, 2) + rsaPriv.Primes[0] = new(big.Int).SetBytes(p) + rsaPriv.Primes[1] = new(big.Int).SetBytes(q) + if err := rsaPriv.Validate(); err != nil { + return err + } + rsaPriv.Precompute() + pk.PrivateKey = rsaPriv + pk.Encrypted = false + pk.encryptedData = nil + + return nil +} + +func (pk *PrivateKey) parseDSAPrivateKey(data []byte) (err error) { + dsaPub := pk.PublicKey.PublicKey.(*dsa.PublicKey) + dsaPriv := new(dsa.PrivateKey) + dsaPriv.PublicKey = *dsaPub + + buf := bytes.NewBuffer(data) + x, _, err := readMPI(buf) + if err != nil { + return + } + + dsaPriv.X = new(big.Int).SetBytes(x) + pk.PrivateKey = dsaPriv + pk.Encrypted = false + pk.encryptedData = nil + + return nil +} + +func (pk *PrivateKey) parseElGamalPrivateKey(data []byte) (err error) { + pub := pk.PublicKey.PublicKey.(*elgamal.PublicKey) + priv := new(elgamal.PrivateKey) + priv.PublicKey = *pub + + buf := bytes.NewBuffer(data) + x, _, err := readMPI(buf) + if err != nil { + return + } + + priv.X = new(big.Int).SetBytes(x) + pk.PrivateKey = priv + pk.Encrypted = false + pk.encryptedData = nil + + return nil +} + +func (pk *PrivateKey) parseECDSAPrivateKey(data []byte) (err error) { + ecdsaPub := pk.PublicKey.PublicKey.(*ecdsa.PublicKey) + + buf := bytes.NewBuffer(data) + d, _, err := readMPI(buf) + if err != nil { + return + } + + pk.PrivateKey = &ecdsa.PrivateKey{ + PublicKey: *ecdsaPub, + D: new(big.Int).SetBytes(d), + } + pk.Encrypted = false + pk.encryptedData = nil + + return nil +} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/public_key.go b/vendor/golang.org/x/crypto/openpgp/packet/public_key.go new file mode 100644 index 00000000..ead26233 --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/packet/public_key.go @@ -0,0 +1,748 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "bytes" + "crypto" + "crypto/dsa" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rsa" + "crypto/sha1" + _ "crypto/sha256" + _ "crypto/sha512" + "encoding/binary" + "fmt" + "hash" + "io" + "math/big" + "strconv" + "time" + + "golang.org/x/crypto/openpgp/elgamal" + "golang.org/x/crypto/openpgp/errors" +) + +var ( + // NIST curve P-256 + oidCurveP256 []byte = []byte{0x2A, 0x86, 0x48, 0xCE, 0x3D, 0x03, 0x01, 0x07} + // NIST curve P-384 + oidCurveP384 []byte = []byte{0x2B, 0x81, 0x04, 0x00, 0x22} + // NIST curve P-521 + oidCurveP521 []byte = []byte{0x2B, 0x81, 0x04, 0x00, 0x23} +) + +const maxOIDLength = 8 + +// ecdsaKey stores the algorithm-specific fields for ECDSA keys. +// as defined in RFC 6637, Section 9. +type ecdsaKey struct { + // oid contains the OID byte sequence identifying the elliptic curve used + oid []byte + // p contains the elliptic curve point that represents the public key + p parsedMPI +} + +// parseOID reads the OID for the curve as defined in RFC 6637, Section 9. +func parseOID(r io.Reader) (oid []byte, err error) { + buf := make([]byte, maxOIDLength) + if _, err = readFull(r, buf[:1]); err != nil { + return + } + oidLen := buf[0] + if int(oidLen) > len(buf) { + err = errors.UnsupportedError("invalid oid length: " + strconv.Itoa(int(oidLen))) + return + } + oid = buf[:oidLen] + _, err = readFull(r, oid) + return +} + +func (f *ecdsaKey) parse(r io.Reader) (err error) { + if f.oid, err = parseOID(r); err != nil { + return err + } + f.p.bytes, f.p.bitLength, err = readMPI(r) + return +} + +func (f *ecdsaKey) serialize(w io.Writer) (err error) { + buf := make([]byte, maxOIDLength+1) + buf[0] = byte(len(f.oid)) + copy(buf[1:], f.oid) + if _, err = w.Write(buf[:len(f.oid)+1]); err != nil { + return + } + return writeMPIs(w, f.p) +} + +func (f *ecdsaKey) newECDSA() (*ecdsa.PublicKey, error) { + var c elliptic.Curve + if bytes.Equal(f.oid, oidCurveP256) { + c = elliptic.P256() + } else if bytes.Equal(f.oid, oidCurveP384) { + c = elliptic.P384() + } else if bytes.Equal(f.oid, oidCurveP521) { + c = elliptic.P521() + } else { + return nil, errors.UnsupportedError(fmt.Sprintf("unsupported oid: %x", f.oid)) + } + x, y := elliptic.Unmarshal(c, f.p.bytes) + if x == nil { + return nil, errors.UnsupportedError("failed to parse EC point") + } + return &ecdsa.PublicKey{Curve: c, X: x, Y: y}, nil +} + +func (f *ecdsaKey) byteLen() int { + return 1 + len(f.oid) + 2 + len(f.p.bytes) +} + +type kdfHashFunction byte +type kdfAlgorithm byte + +// ecdhKdf stores key derivation function parameters +// used for ECDH encryption. See RFC 6637, Section 9. +type ecdhKdf struct { + KdfHash kdfHashFunction + KdfAlgo kdfAlgorithm +} + +func (f *ecdhKdf) parse(r io.Reader) (err error) { + buf := make([]byte, 1) + if _, err = readFull(r, buf); err != nil { + return + } + kdfLen := int(buf[0]) + if kdfLen < 3 { + return errors.UnsupportedError("Unsupported ECDH KDF length: " + strconv.Itoa(kdfLen)) + } + buf = make([]byte, kdfLen) + if _, err = readFull(r, buf); err != nil { + return + } + reserved := int(buf[0]) + f.KdfHash = kdfHashFunction(buf[1]) + f.KdfAlgo = kdfAlgorithm(buf[2]) + if reserved != 0x01 { + return errors.UnsupportedError("Unsupported KDF reserved field: " + strconv.Itoa(reserved)) + } + return +} + +func (f *ecdhKdf) serialize(w io.Writer) (err error) { + buf := make([]byte, 4) + // See RFC 6637, Section 9, Algorithm-Specific Fields for ECDH keys. + buf[0] = byte(0x03) // Length of the following fields + buf[1] = byte(0x01) // Reserved for future extensions, must be 1 for now + buf[2] = byte(f.KdfHash) + buf[3] = byte(f.KdfAlgo) + _, err = w.Write(buf[:]) + return +} + +func (f *ecdhKdf) byteLen() int { + return 4 +} + +// PublicKey represents an OpenPGP public key. See RFC 4880, section 5.5.2. +type PublicKey struct { + CreationTime time.Time + PubKeyAlgo PublicKeyAlgorithm + PublicKey interface{} // *rsa.PublicKey, *dsa.PublicKey or *ecdsa.PublicKey + Fingerprint [20]byte + KeyId uint64 + IsSubkey bool + + n, e, p, q, g, y parsedMPI + + // RFC 6637 fields + ec *ecdsaKey + ecdh *ecdhKdf +} + +// signingKey provides a convenient abstraction over signature verification +// for v3 and v4 public keys. +type signingKey interface { + SerializeSignaturePrefix(io.Writer) + serializeWithoutHeaders(io.Writer) error +} + +func fromBig(n *big.Int) parsedMPI { + return parsedMPI{ + bytes: n.Bytes(), + bitLength: uint16(n.BitLen()), + } +} + +// NewRSAPublicKey returns a PublicKey that wraps the given rsa.PublicKey. +func NewRSAPublicKey(creationTime time.Time, pub *rsa.PublicKey) *PublicKey { + pk := &PublicKey{ + CreationTime: creationTime, + PubKeyAlgo: PubKeyAlgoRSA, + PublicKey: pub, + n: fromBig(pub.N), + e: fromBig(big.NewInt(int64(pub.E))), + } + + pk.setFingerPrintAndKeyId() + return pk +} + +// NewDSAPublicKey returns a PublicKey that wraps the given dsa.PublicKey. +func NewDSAPublicKey(creationTime time.Time, pub *dsa.PublicKey) *PublicKey { + pk := &PublicKey{ + CreationTime: creationTime, + PubKeyAlgo: PubKeyAlgoDSA, + PublicKey: pub, + p: fromBig(pub.P), + q: fromBig(pub.Q), + g: fromBig(pub.G), + y: fromBig(pub.Y), + } + + pk.setFingerPrintAndKeyId() + return pk +} + +// NewElGamalPublicKey returns a PublicKey that wraps the given elgamal.PublicKey. +func NewElGamalPublicKey(creationTime time.Time, pub *elgamal.PublicKey) *PublicKey { + pk := &PublicKey{ + CreationTime: creationTime, + PubKeyAlgo: PubKeyAlgoElGamal, + PublicKey: pub, + p: fromBig(pub.P), + g: fromBig(pub.G), + y: fromBig(pub.Y), + } + + pk.setFingerPrintAndKeyId() + return pk +} + +func NewECDSAPublicKey(creationTime time.Time, pub *ecdsa.PublicKey) *PublicKey { + pk := &PublicKey{ + CreationTime: creationTime, + PubKeyAlgo: PubKeyAlgoECDSA, + PublicKey: pub, + ec: new(ecdsaKey), + } + + switch pub.Curve { + case elliptic.P256(): + pk.ec.oid = oidCurveP256 + case elliptic.P384(): + pk.ec.oid = oidCurveP384 + case elliptic.P521(): + pk.ec.oid = oidCurveP521 + default: + panic("unknown elliptic curve") + } + + pk.ec.p.bytes = elliptic.Marshal(pub.Curve, pub.X, pub.Y) + pk.ec.p.bitLength = uint16(8 * len(pk.ec.p.bytes)) + + pk.setFingerPrintAndKeyId() + return pk +} + +func (pk *PublicKey) parse(r io.Reader) (err error) { + // RFC 4880, section 5.5.2 + var buf [6]byte + _, err = readFull(r, buf[:]) + if err != nil { + return + } + if buf[0] != 4 { + return errors.UnsupportedError("public key version") + } + pk.CreationTime = time.Unix(int64(uint32(buf[1])<<24|uint32(buf[2])<<16|uint32(buf[3])<<8|uint32(buf[4])), 0) + pk.PubKeyAlgo = PublicKeyAlgorithm(buf[5]) + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: + err = pk.parseRSA(r) + case PubKeyAlgoDSA: + err = pk.parseDSA(r) + case PubKeyAlgoElGamal: + err = pk.parseElGamal(r) + case PubKeyAlgoECDSA: + pk.ec = new(ecdsaKey) + if err = pk.ec.parse(r); err != nil { + return err + } + pk.PublicKey, err = pk.ec.newECDSA() + case PubKeyAlgoECDH: + pk.ec = new(ecdsaKey) + if err = pk.ec.parse(r); err != nil { + return + } + pk.ecdh = new(ecdhKdf) + if err = pk.ecdh.parse(r); err != nil { + return + } + // The ECDH key is stored in an ecdsa.PublicKey for convenience. + pk.PublicKey, err = pk.ec.newECDSA() + default: + err = errors.UnsupportedError("public key type: " + strconv.Itoa(int(pk.PubKeyAlgo))) + } + if err != nil { + return + } + + pk.setFingerPrintAndKeyId() + return +} + +func (pk *PublicKey) setFingerPrintAndKeyId() { + // RFC 4880, section 12.2 + fingerPrint := sha1.New() + pk.SerializeSignaturePrefix(fingerPrint) + pk.serializeWithoutHeaders(fingerPrint) + copy(pk.Fingerprint[:], fingerPrint.Sum(nil)) + pk.KeyId = binary.BigEndian.Uint64(pk.Fingerprint[12:20]) +} + +// parseRSA parses RSA public key material from the given Reader. See RFC 4880, +// section 5.5.2. +func (pk *PublicKey) parseRSA(r io.Reader) (err error) { + pk.n.bytes, pk.n.bitLength, err = readMPI(r) + if err != nil { + return + } + pk.e.bytes, pk.e.bitLength, err = readMPI(r) + if err != nil { + return + } + + if len(pk.e.bytes) > 3 { + err = errors.UnsupportedError("large public exponent") + return + } + rsa := &rsa.PublicKey{ + N: new(big.Int).SetBytes(pk.n.bytes), + E: 0, + } + for i := 0; i < len(pk.e.bytes); i++ { + rsa.E <<= 8 + rsa.E |= int(pk.e.bytes[i]) + } + pk.PublicKey = rsa + return +} + +// parseDSA parses DSA public key material from the given Reader. See RFC 4880, +// section 5.5.2. +func (pk *PublicKey) parseDSA(r io.Reader) (err error) { + pk.p.bytes, pk.p.bitLength, err = readMPI(r) + if err != nil { + return + } + pk.q.bytes, pk.q.bitLength, err = readMPI(r) + if err != nil { + return + } + pk.g.bytes, pk.g.bitLength, err = readMPI(r) + if err != nil { + return + } + pk.y.bytes, pk.y.bitLength, err = readMPI(r) + if err != nil { + return + } + + dsa := new(dsa.PublicKey) + dsa.P = new(big.Int).SetBytes(pk.p.bytes) + dsa.Q = new(big.Int).SetBytes(pk.q.bytes) + dsa.G = new(big.Int).SetBytes(pk.g.bytes) + dsa.Y = new(big.Int).SetBytes(pk.y.bytes) + pk.PublicKey = dsa + return +} + +// parseElGamal parses ElGamal public key material from the given Reader. See +// RFC 4880, section 5.5.2. +func (pk *PublicKey) parseElGamal(r io.Reader) (err error) { + pk.p.bytes, pk.p.bitLength, err = readMPI(r) + if err != nil { + return + } + pk.g.bytes, pk.g.bitLength, err = readMPI(r) + if err != nil { + return + } + pk.y.bytes, pk.y.bitLength, err = readMPI(r) + if err != nil { + return + } + + elgamal := new(elgamal.PublicKey) + elgamal.P = new(big.Int).SetBytes(pk.p.bytes) + elgamal.G = new(big.Int).SetBytes(pk.g.bytes) + elgamal.Y = new(big.Int).SetBytes(pk.y.bytes) + pk.PublicKey = elgamal + return +} + +// SerializeSignaturePrefix writes the prefix for this public key to the given Writer. +// The prefix is used when calculating a signature over this public key. See +// RFC 4880, section 5.2.4. +func (pk *PublicKey) SerializeSignaturePrefix(h io.Writer) { + var pLength uint16 + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: + pLength += 2 + uint16(len(pk.n.bytes)) + pLength += 2 + uint16(len(pk.e.bytes)) + case PubKeyAlgoDSA: + pLength += 2 + uint16(len(pk.p.bytes)) + pLength += 2 + uint16(len(pk.q.bytes)) + pLength += 2 + uint16(len(pk.g.bytes)) + pLength += 2 + uint16(len(pk.y.bytes)) + case PubKeyAlgoElGamal: + pLength += 2 + uint16(len(pk.p.bytes)) + pLength += 2 + uint16(len(pk.g.bytes)) + pLength += 2 + uint16(len(pk.y.bytes)) + case PubKeyAlgoECDSA: + pLength += uint16(pk.ec.byteLen()) + case PubKeyAlgoECDH: + pLength += uint16(pk.ec.byteLen()) + pLength += uint16(pk.ecdh.byteLen()) + default: + panic("unknown public key algorithm") + } + pLength += 6 + h.Write([]byte{0x99, byte(pLength >> 8), byte(pLength)}) + return +} + +func (pk *PublicKey) Serialize(w io.Writer) (err error) { + length := 6 // 6 byte header + + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: + length += 2 + len(pk.n.bytes) + length += 2 + len(pk.e.bytes) + case PubKeyAlgoDSA: + length += 2 + len(pk.p.bytes) + length += 2 + len(pk.q.bytes) + length += 2 + len(pk.g.bytes) + length += 2 + len(pk.y.bytes) + case PubKeyAlgoElGamal: + length += 2 + len(pk.p.bytes) + length += 2 + len(pk.g.bytes) + length += 2 + len(pk.y.bytes) + case PubKeyAlgoECDSA: + length += pk.ec.byteLen() + case PubKeyAlgoECDH: + length += pk.ec.byteLen() + length += pk.ecdh.byteLen() + default: + panic("unknown public key algorithm") + } + + packetType := packetTypePublicKey + if pk.IsSubkey { + packetType = packetTypePublicSubkey + } + err = serializeHeader(w, packetType, length) + if err != nil { + return + } + return pk.serializeWithoutHeaders(w) +} + +// serializeWithoutHeaders marshals the PublicKey to w in the form of an +// OpenPGP public key packet, not including the packet header. +func (pk *PublicKey) serializeWithoutHeaders(w io.Writer) (err error) { + var buf [6]byte + buf[0] = 4 + t := uint32(pk.CreationTime.Unix()) + buf[1] = byte(t >> 24) + buf[2] = byte(t >> 16) + buf[3] = byte(t >> 8) + buf[4] = byte(t) + buf[5] = byte(pk.PubKeyAlgo) + + _, err = w.Write(buf[:]) + if err != nil { + return + } + + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: + return writeMPIs(w, pk.n, pk.e) + case PubKeyAlgoDSA: + return writeMPIs(w, pk.p, pk.q, pk.g, pk.y) + case PubKeyAlgoElGamal: + return writeMPIs(w, pk.p, pk.g, pk.y) + case PubKeyAlgoECDSA: + return pk.ec.serialize(w) + case PubKeyAlgoECDH: + if err = pk.ec.serialize(w); err != nil { + return + } + return pk.ecdh.serialize(w) + } + return errors.InvalidArgumentError("bad public-key algorithm") +} + +// CanSign returns true iff this public key can generate signatures +func (pk *PublicKey) CanSign() bool { + return pk.PubKeyAlgo != PubKeyAlgoRSAEncryptOnly && pk.PubKeyAlgo != PubKeyAlgoElGamal +} + +// VerifySignature returns nil iff sig is a valid signature, made by this +// public key, of the data hashed into signed. signed is mutated by this call. +func (pk *PublicKey) VerifySignature(signed hash.Hash, sig *Signature) (err error) { + if !pk.CanSign() { + return errors.InvalidArgumentError("public key cannot generate signatures") + } + + signed.Write(sig.HashSuffix) + hashBytes := signed.Sum(nil) + + if hashBytes[0] != sig.HashTag[0] || hashBytes[1] != sig.HashTag[1] { + return errors.SignatureError("hash tag doesn't match") + } + + if pk.PubKeyAlgo != sig.PubKeyAlgo { + return errors.InvalidArgumentError("public key and signature use different algorithms") + } + + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: + rsaPublicKey, _ := pk.PublicKey.(*rsa.PublicKey) + err = rsa.VerifyPKCS1v15(rsaPublicKey, sig.Hash, hashBytes, sig.RSASignature.bytes) + if err != nil { + return errors.SignatureError("RSA verification failure") + } + return nil + case PubKeyAlgoDSA: + dsaPublicKey, _ := pk.PublicKey.(*dsa.PublicKey) + // Need to truncate hashBytes to match FIPS 186-3 section 4.6. + subgroupSize := (dsaPublicKey.Q.BitLen() + 7) / 8 + if len(hashBytes) > subgroupSize { + hashBytes = hashBytes[:subgroupSize] + } + if !dsa.Verify(dsaPublicKey, hashBytes, new(big.Int).SetBytes(sig.DSASigR.bytes), new(big.Int).SetBytes(sig.DSASigS.bytes)) { + return errors.SignatureError("DSA verification failure") + } + return nil + case PubKeyAlgoECDSA: + ecdsaPublicKey := pk.PublicKey.(*ecdsa.PublicKey) + if !ecdsa.Verify(ecdsaPublicKey, hashBytes, new(big.Int).SetBytes(sig.ECDSASigR.bytes), new(big.Int).SetBytes(sig.ECDSASigS.bytes)) { + return errors.SignatureError("ECDSA verification failure") + } + return nil + default: + return errors.SignatureError("Unsupported public key algorithm used in signature") + } +} + +// VerifySignatureV3 returns nil iff sig is a valid signature, made by this +// public key, of the data hashed into signed. signed is mutated by this call. +func (pk *PublicKey) VerifySignatureV3(signed hash.Hash, sig *SignatureV3) (err error) { + if !pk.CanSign() { + return errors.InvalidArgumentError("public key cannot generate signatures") + } + + suffix := make([]byte, 5) + suffix[0] = byte(sig.SigType) + binary.BigEndian.PutUint32(suffix[1:], uint32(sig.CreationTime.Unix())) + signed.Write(suffix) + hashBytes := signed.Sum(nil) + + if hashBytes[0] != sig.HashTag[0] || hashBytes[1] != sig.HashTag[1] { + return errors.SignatureError("hash tag doesn't match") + } + + if pk.PubKeyAlgo != sig.PubKeyAlgo { + return errors.InvalidArgumentError("public key and signature use different algorithms") + } + + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: + rsaPublicKey := pk.PublicKey.(*rsa.PublicKey) + if err = rsa.VerifyPKCS1v15(rsaPublicKey, sig.Hash, hashBytes, sig.RSASignature.bytes); err != nil { + return errors.SignatureError("RSA verification failure") + } + return + case PubKeyAlgoDSA: + dsaPublicKey := pk.PublicKey.(*dsa.PublicKey) + // Need to truncate hashBytes to match FIPS 186-3 section 4.6. + subgroupSize := (dsaPublicKey.Q.BitLen() + 7) / 8 + if len(hashBytes) > subgroupSize { + hashBytes = hashBytes[:subgroupSize] + } + if !dsa.Verify(dsaPublicKey, hashBytes, new(big.Int).SetBytes(sig.DSASigR.bytes), new(big.Int).SetBytes(sig.DSASigS.bytes)) { + return errors.SignatureError("DSA verification failure") + } + return nil + default: + panic("shouldn't happen") + } +} + +// keySignatureHash returns a Hash of the message that needs to be signed for +// pk to assert a subkey relationship to signed. +func keySignatureHash(pk, signed signingKey, hashFunc crypto.Hash) (h hash.Hash, err error) { + if !hashFunc.Available() { + return nil, errors.UnsupportedError("hash function") + } + h = hashFunc.New() + + // RFC 4880, section 5.2.4 + pk.SerializeSignaturePrefix(h) + pk.serializeWithoutHeaders(h) + signed.SerializeSignaturePrefix(h) + signed.serializeWithoutHeaders(h) + return +} + +// VerifyKeySignature returns nil iff sig is a valid signature, made by this +// public key, of signed. +func (pk *PublicKey) VerifyKeySignature(signed *PublicKey, sig *Signature) error { + h, err := keySignatureHash(pk, signed, sig.Hash) + if err != nil { + return err + } + if err = pk.VerifySignature(h, sig); err != nil { + return err + } + + if sig.FlagSign { + // Signing subkeys must be cross-signed. See + // https://www.gnupg.org/faq/subkey-cross-certify.html. + if sig.EmbeddedSignature == nil { + return errors.StructuralError("signing subkey is missing cross-signature") + } + // Verify the cross-signature. This is calculated over the same + // data as the main signature, so we cannot just recursively + // call signed.VerifyKeySignature(...) + if h, err = keySignatureHash(pk, signed, sig.EmbeddedSignature.Hash); err != nil { + return errors.StructuralError("error while hashing for cross-signature: " + err.Error()) + } + if err := signed.VerifySignature(h, sig.EmbeddedSignature); err != nil { + return errors.StructuralError("error while verifying cross-signature: " + err.Error()) + } + } + + return nil +} + +func keyRevocationHash(pk signingKey, hashFunc crypto.Hash) (h hash.Hash, err error) { + if !hashFunc.Available() { + return nil, errors.UnsupportedError("hash function") + } + h = hashFunc.New() + + // RFC 4880, section 5.2.4 + pk.SerializeSignaturePrefix(h) + pk.serializeWithoutHeaders(h) + + return +} + +// VerifyRevocationSignature returns nil iff sig is a valid signature, made by this +// public key. +func (pk *PublicKey) VerifyRevocationSignature(sig *Signature) (err error) { + h, err := keyRevocationHash(pk, sig.Hash) + if err != nil { + return err + } + return pk.VerifySignature(h, sig) +} + +// userIdSignatureHash returns a Hash of the message that needs to be signed +// to assert that pk is a valid key for id. +func userIdSignatureHash(id string, pk *PublicKey, hashFunc crypto.Hash) (h hash.Hash, err error) { + if !hashFunc.Available() { + return nil, errors.UnsupportedError("hash function") + } + h = hashFunc.New() + + // RFC 4880, section 5.2.4 + pk.SerializeSignaturePrefix(h) + pk.serializeWithoutHeaders(h) + + var buf [5]byte + buf[0] = 0xb4 + buf[1] = byte(len(id) >> 24) + buf[2] = byte(len(id) >> 16) + buf[3] = byte(len(id) >> 8) + buf[4] = byte(len(id)) + h.Write(buf[:]) + h.Write([]byte(id)) + + return +} + +// VerifyUserIdSignature returns nil iff sig is a valid signature, made by this +// public key, that id is the identity of pub. +func (pk *PublicKey) VerifyUserIdSignature(id string, pub *PublicKey, sig *Signature) (err error) { + h, err := userIdSignatureHash(id, pub, sig.Hash) + if err != nil { + return err + } + return pk.VerifySignature(h, sig) +} + +// VerifyUserIdSignatureV3 returns nil iff sig is a valid signature, made by this +// public key, that id is the identity of pub. +func (pk *PublicKey) VerifyUserIdSignatureV3(id string, pub *PublicKey, sig *SignatureV3) (err error) { + h, err := userIdSignatureV3Hash(id, pub, sig.Hash) + if err != nil { + return err + } + return pk.VerifySignatureV3(h, sig) +} + +// KeyIdString returns the public key's fingerprint in capital hex +// (e.g. "6C7EE1B8621CC013"). +func (pk *PublicKey) KeyIdString() string { + return fmt.Sprintf("%X", pk.Fingerprint[12:20]) +} + +// KeyIdShortString returns the short form of public key's fingerprint +// in capital hex, as shown by gpg --list-keys (e.g. "621CC013"). +func (pk *PublicKey) KeyIdShortString() string { + return fmt.Sprintf("%X", pk.Fingerprint[16:20]) +} + +// A parsedMPI is used to store the contents of a big integer, along with the +// bit length that was specified in the original input. This allows the MPI to +// be reserialized exactly. +type parsedMPI struct { + bytes []byte + bitLength uint16 +} + +// writeMPIs is a utility function for serializing several big integers to the +// given Writer. +func writeMPIs(w io.Writer, mpis ...parsedMPI) (err error) { + for _, mpi := range mpis { + err = writeMPI(w, mpi.bitLength, mpi.bytes) + if err != nil { + return + } + } + return +} + +// BitLength returns the bit length for the given public key. +func (pk *PublicKey) BitLength() (bitLength uint16, err error) { + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: + bitLength = pk.n.bitLength + case PubKeyAlgoDSA: + bitLength = pk.p.bitLength + case PubKeyAlgoElGamal: + bitLength = pk.p.bitLength + default: + err = errors.InvalidArgumentError("bad public-key algorithm") + } + return +} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/public_key_v3.go b/vendor/golang.org/x/crypto/openpgp/packet/public_key_v3.go new file mode 100644 index 00000000..5daf7b6c --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/packet/public_key_v3.go @@ -0,0 +1,279 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "crypto" + "crypto/md5" + "crypto/rsa" + "encoding/binary" + "fmt" + "hash" + "io" + "math/big" + "strconv" + "time" + + "golang.org/x/crypto/openpgp/errors" +) + +// PublicKeyV3 represents older, version 3 public keys. These keys are less secure and +// should not be used for signing or encrypting. They are supported here only for +// parsing version 3 key material and validating signatures. +// See RFC 4880, section 5.5.2. +type PublicKeyV3 struct { + CreationTime time.Time + DaysToExpire uint16 + PubKeyAlgo PublicKeyAlgorithm + PublicKey *rsa.PublicKey + Fingerprint [16]byte + KeyId uint64 + IsSubkey bool + + n, e parsedMPI +} + +// newRSAPublicKeyV3 returns a PublicKey that wraps the given rsa.PublicKey. +// Included here for testing purposes only. RFC 4880, section 5.5.2: +// "an implementation MUST NOT generate a V3 key, but MAY accept it." +func newRSAPublicKeyV3(creationTime time.Time, pub *rsa.PublicKey) *PublicKeyV3 { + pk := &PublicKeyV3{ + CreationTime: creationTime, + PublicKey: pub, + n: fromBig(pub.N), + e: fromBig(big.NewInt(int64(pub.E))), + } + + pk.setFingerPrintAndKeyId() + return pk +} + +func (pk *PublicKeyV3) parse(r io.Reader) (err error) { + // RFC 4880, section 5.5.2 + var buf [8]byte + if _, err = readFull(r, buf[:]); err != nil { + return + } + if buf[0] < 2 || buf[0] > 3 { + return errors.UnsupportedError("public key version") + } + pk.CreationTime = time.Unix(int64(uint32(buf[1])<<24|uint32(buf[2])<<16|uint32(buf[3])<<8|uint32(buf[4])), 0) + pk.DaysToExpire = binary.BigEndian.Uint16(buf[5:7]) + pk.PubKeyAlgo = PublicKeyAlgorithm(buf[7]) + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: + err = pk.parseRSA(r) + default: + err = errors.UnsupportedError("public key type: " + strconv.Itoa(int(pk.PubKeyAlgo))) + } + if err != nil { + return + } + + pk.setFingerPrintAndKeyId() + return +} + +func (pk *PublicKeyV3) setFingerPrintAndKeyId() { + // RFC 4880, section 12.2 + fingerPrint := md5.New() + fingerPrint.Write(pk.n.bytes) + fingerPrint.Write(pk.e.bytes) + fingerPrint.Sum(pk.Fingerprint[:0]) + pk.KeyId = binary.BigEndian.Uint64(pk.n.bytes[len(pk.n.bytes)-8:]) +} + +// parseRSA parses RSA public key material from the given Reader. See RFC 4880, +// section 5.5.2. +func (pk *PublicKeyV3) parseRSA(r io.Reader) (err error) { + if pk.n.bytes, pk.n.bitLength, err = readMPI(r); err != nil { + return + } + if pk.e.bytes, pk.e.bitLength, err = readMPI(r); err != nil { + return + } + + // RFC 4880 Section 12.2 requires the low 8 bytes of the + // modulus to form the key id. + if len(pk.n.bytes) < 8 { + return errors.StructuralError("v3 public key modulus is too short") + } + if len(pk.e.bytes) > 3 { + err = errors.UnsupportedError("large public exponent") + return + } + rsa := &rsa.PublicKey{N: new(big.Int).SetBytes(pk.n.bytes)} + for i := 0; i < len(pk.e.bytes); i++ { + rsa.E <<= 8 + rsa.E |= int(pk.e.bytes[i]) + } + pk.PublicKey = rsa + return +} + +// SerializeSignaturePrefix writes the prefix for this public key to the given Writer. +// The prefix is used when calculating a signature over this public key. See +// RFC 4880, section 5.2.4. +func (pk *PublicKeyV3) SerializeSignaturePrefix(w io.Writer) { + var pLength uint16 + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: + pLength += 2 + uint16(len(pk.n.bytes)) + pLength += 2 + uint16(len(pk.e.bytes)) + default: + panic("unknown public key algorithm") + } + pLength += 6 + w.Write([]byte{0x99, byte(pLength >> 8), byte(pLength)}) + return +} + +func (pk *PublicKeyV3) Serialize(w io.Writer) (err error) { + length := 8 // 8 byte header + + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: + length += 2 + len(pk.n.bytes) + length += 2 + len(pk.e.bytes) + default: + panic("unknown public key algorithm") + } + + packetType := packetTypePublicKey + if pk.IsSubkey { + packetType = packetTypePublicSubkey + } + if err = serializeHeader(w, packetType, length); err != nil { + return + } + return pk.serializeWithoutHeaders(w) +} + +// serializeWithoutHeaders marshals the PublicKey to w in the form of an +// OpenPGP public key packet, not including the packet header. +func (pk *PublicKeyV3) serializeWithoutHeaders(w io.Writer) (err error) { + var buf [8]byte + // Version 3 + buf[0] = 3 + // Creation time + t := uint32(pk.CreationTime.Unix()) + buf[1] = byte(t >> 24) + buf[2] = byte(t >> 16) + buf[3] = byte(t >> 8) + buf[4] = byte(t) + // Days to expire + buf[5] = byte(pk.DaysToExpire >> 8) + buf[6] = byte(pk.DaysToExpire) + // Public key algorithm + buf[7] = byte(pk.PubKeyAlgo) + + if _, err = w.Write(buf[:]); err != nil { + return + } + + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: + return writeMPIs(w, pk.n, pk.e) + } + return errors.InvalidArgumentError("bad public-key algorithm") +} + +// CanSign returns true iff this public key can generate signatures +func (pk *PublicKeyV3) CanSign() bool { + return pk.PubKeyAlgo != PubKeyAlgoRSAEncryptOnly +} + +// VerifySignatureV3 returns nil iff sig is a valid signature, made by this +// public key, of the data hashed into signed. signed is mutated by this call. +func (pk *PublicKeyV3) VerifySignatureV3(signed hash.Hash, sig *SignatureV3) (err error) { + if !pk.CanSign() { + return errors.InvalidArgumentError("public key cannot generate signatures") + } + + suffix := make([]byte, 5) + suffix[0] = byte(sig.SigType) + binary.BigEndian.PutUint32(suffix[1:], uint32(sig.CreationTime.Unix())) + signed.Write(suffix) + hashBytes := signed.Sum(nil) + + if hashBytes[0] != sig.HashTag[0] || hashBytes[1] != sig.HashTag[1] { + return errors.SignatureError("hash tag doesn't match") + } + + if pk.PubKeyAlgo != sig.PubKeyAlgo { + return errors.InvalidArgumentError("public key and signature use different algorithms") + } + + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: + if err = rsa.VerifyPKCS1v15(pk.PublicKey, sig.Hash, hashBytes, sig.RSASignature.bytes); err != nil { + return errors.SignatureError("RSA verification failure") + } + return + default: + // V3 public keys only support RSA. + panic("shouldn't happen") + } +} + +// VerifyUserIdSignatureV3 returns nil iff sig is a valid signature, made by this +// public key, that id is the identity of pub. +func (pk *PublicKeyV3) VerifyUserIdSignatureV3(id string, pub *PublicKeyV3, sig *SignatureV3) (err error) { + h, err := userIdSignatureV3Hash(id, pk, sig.Hash) + if err != nil { + return err + } + return pk.VerifySignatureV3(h, sig) +} + +// VerifyKeySignatureV3 returns nil iff sig is a valid signature, made by this +// public key, of signed. +func (pk *PublicKeyV3) VerifyKeySignatureV3(signed *PublicKeyV3, sig *SignatureV3) (err error) { + h, err := keySignatureHash(pk, signed, sig.Hash) + if err != nil { + return err + } + return pk.VerifySignatureV3(h, sig) +} + +// userIdSignatureV3Hash returns a Hash of the message that needs to be signed +// to assert that pk is a valid key for id. +func userIdSignatureV3Hash(id string, pk signingKey, hfn crypto.Hash) (h hash.Hash, err error) { + if !hfn.Available() { + return nil, errors.UnsupportedError("hash function") + } + h = hfn.New() + + // RFC 4880, section 5.2.4 + pk.SerializeSignaturePrefix(h) + pk.serializeWithoutHeaders(h) + + h.Write([]byte(id)) + + return +} + +// KeyIdString returns the public key's fingerprint in capital hex +// (e.g. "6C7EE1B8621CC013"). +func (pk *PublicKeyV3) KeyIdString() string { + return fmt.Sprintf("%X", pk.KeyId) +} + +// KeyIdShortString returns the short form of public key's fingerprint +// in capital hex, as shown by gpg --list-keys (e.g. "621CC013"). +func (pk *PublicKeyV3) KeyIdShortString() string { + return fmt.Sprintf("%X", pk.KeyId&0xFFFFFFFF) +} + +// BitLength returns the bit length for the given public key. +func (pk *PublicKeyV3) BitLength() (bitLength uint16, err error) { + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: + bitLength = pk.n.bitLength + default: + err = errors.InvalidArgumentError("bad public-key algorithm") + } + return +} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/reader.go b/vendor/golang.org/x/crypto/openpgp/packet/reader.go new file mode 100644 index 00000000..34bc7c61 --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/packet/reader.go @@ -0,0 +1,76 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "golang.org/x/crypto/openpgp/errors" + "io" +) + +// Reader reads packets from an io.Reader and allows packets to be 'unread' so +// that they result from the next call to Next. +type Reader struct { + q []Packet + readers []io.Reader +} + +// New io.Readers are pushed when a compressed or encrypted packet is processed +// and recursively treated as a new source of packets. However, a carefully +// crafted packet can trigger an infinite recursive sequence of packets. See +// http://mumble.net/~campbell/misc/pgp-quine +// https://web.nvd.nist.gov/view/vuln/detail?vulnId=CVE-2013-4402 +// This constant limits the number of recursive packets that may be pushed. +const maxReaders = 32 + +// Next returns the most recently unread Packet, or reads another packet from +// the top-most io.Reader. Unknown packet types are skipped. +func (r *Reader) Next() (p Packet, err error) { + if len(r.q) > 0 { + p = r.q[len(r.q)-1] + r.q = r.q[:len(r.q)-1] + return + } + + for len(r.readers) > 0 { + p, err = Read(r.readers[len(r.readers)-1]) + if err == nil { + return + } + if err == io.EOF { + r.readers = r.readers[:len(r.readers)-1] + continue + } + if _, ok := err.(errors.UnknownPacketTypeError); !ok { + return nil, err + } + } + + return nil, io.EOF +} + +// Push causes the Reader to start reading from a new io.Reader. When an EOF +// error is seen from the new io.Reader, it is popped and the Reader continues +// to read from the next most recent io.Reader. Push returns a StructuralError +// if pushing the reader would exceed the maximum recursion level, otherwise it +// returns nil. +func (r *Reader) Push(reader io.Reader) (err error) { + if len(r.readers) >= maxReaders { + return errors.StructuralError("too many layers of packets") + } + r.readers = append(r.readers, reader) + return nil +} + +// Unread causes the given Packet to be returned from the next call to Next. +func (r *Reader) Unread(p Packet) { + r.q = append(r.q, p) +} + +func NewReader(r io.Reader) *Reader { + return &Reader{ + q: nil, + readers: []io.Reader{r}, + } +} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/signature.go b/vendor/golang.org/x/crypto/openpgp/packet/signature.go new file mode 100644 index 00000000..6ce0cbed --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/packet/signature.go @@ -0,0 +1,731 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "bytes" + "crypto" + "crypto/dsa" + "crypto/ecdsa" + "encoding/asn1" + "encoding/binary" + "hash" + "io" + "math/big" + "strconv" + "time" + + "golang.org/x/crypto/openpgp/errors" + "golang.org/x/crypto/openpgp/s2k" +) + +const ( + // See RFC 4880, section 5.2.3.21 for details. + KeyFlagCertify = 1 << iota + KeyFlagSign + KeyFlagEncryptCommunications + KeyFlagEncryptStorage +) + +// Signature represents a signature. See RFC 4880, section 5.2. +type Signature struct { + SigType SignatureType + PubKeyAlgo PublicKeyAlgorithm + Hash crypto.Hash + + // HashSuffix is extra data that is hashed in after the signed data. + HashSuffix []byte + // HashTag contains the first two bytes of the hash for fast rejection + // of bad signed data. + HashTag [2]byte + CreationTime time.Time + + RSASignature parsedMPI + DSASigR, DSASigS parsedMPI + ECDSASigR, ECDSASigS parsedMPI + + // rawSubpackets contains the unparsed subpackets, in order. + rawSubpackets []outputSubpacket + + // The following are optional so are nil when not included in the + // signature. + + SigLifetimeSecs, KeyLifetimeSecs *uint32 + PreferredSymmetric, PreferredHash, PreferredCompression []uint8 + IssuerKeyId *uint64 + IsPrimaryId *bool + + // FlagsValid is set if any flags were given. See RFC 4880, section + // 5.2.3.21 for details. + FlagsValid bool + FlagCertify, FlagSign, FlagEncryptCommunications, FlagEncryptStorage bool + + // RevocationReason is set if this signature has been revoked. + // See RFC 4880, section 5.2.3.23 for details. + RevocationReason *uint8 + RevocationReasonText string + + // MDC is set if this signature has a feature packet that indicates + // support for MDC subpackets. + MDC bool + + // EmbeddedSignature, if non-nil, is a signature of the parent key, by + // this key. This prevents an attacker from claiming another's signing + // subkey as their own. + EmbeddedSignature *Signature + + outSubpackets []outputSubpacket +} + +func (sig *Signature) parse(r io.Reader) (err error) { + // RFC 4880, section 5.2.3 + var buf [5]byte + _, err = readFull(r, buf[:1]) + if err != nil { + return + } + if buf[0] != 4 { + err = errors.UnsupportedError("signature packet version " + strconv.Itoa(int(buf[0]))) + return + } + + _, err = readFull(r, buf[:5]) + if err != nil { + return + } + sig.SigType = SignatureType(buf[0]) + sig.PubKeyAlgo = PublicKeyAlgorithm(buf[1]) + switch sig.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoDSA, PubKeyAlgoECDSA: + default: + err = errors.UnsupportedError("public key algorithm " + strconv.Itoa(int(sig.PubKeyAlgo))) + return + } + + var ok bool + sig.Hash, ok = s2k.HashIdToHash(buf[2]) + if !ok { + return errors.UnsupportedError("hash function " + strconv.Itoa(int(buf[2]))) + } + + hashedSubpacketsLength := int(buf[3])<<8 | int(buf[4]) + l := 6 + hashedSubpacketsLength + sig.HashSuffix = make([]byte, l+6) + sig.HashSuffix[0] = 4 + copy(sig.HashSuffix[1:], buf[:5]) + hashedSubpackets := sig.HashSuffix[6:l] + _, err = readFull(r, hashedSubpackets) + if err != nil { + return + } + // See RFC 4880, section 5.2.4 + trailer := sig.HashSuffix[l:] + trailer[0] = 4 + trailer[1] = 0xff + trailer[2] = uint8(l >> 24) + trailer[3] = uint8(l >> 16) + trailer[4] = uint8(l >> 8) + trailer[5] = uint8(l) + + err = parseSignatureSubpackets(sig, hashedSubpackets, true) + if err != nil { + return + } + + _, err = readFull(r, buf[:2]) + if err != nil { + return + } + unhashedSubpacketsLength := int(buf[0])<<8 | int(buf[1]) + unhashedSubpackets := make([]byte, unhashedSubpacketsLength) + _, err = readFull(r, unhashedSubpackets) + if err != nil { + return + } + err = parseSignatureSubpackets(sig, unhashedSubpackets, false) + if err != nil { + return + } + + _, err = readFull(r, sig.HashTag[:2]) + if err != nil { + return + } + + switch sig.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: + sig.RSASignature.bytes, sig.RSASignature.bitLength, err = readMPI(r) + case PubKeyAlgoDSA: + sig.DSASigR.bytes, sig.DSASigR.bitLength, err = readMPI(r) + if err == nil { + sig.DSASigS.bytes, sig.DSASigS.bitLength, err = readMPI(r) + } + case PubKeyAlgoECDSA: + sig.ECDSASigR.bytes, sig.ECDSASigR.bitLength, err = readMPI(r) + if err == nil { + sig.ECDSASigS.bytes, sig.ECDSASigS.bitLength, err = readMPI(r) + } + default: + panic("unreachable") + } + return +} + +// parseSignatureSubpackets parses subpackets of the main signature packet. See +// RFC 4880, section 5.2.3.1. +func parseSignatureSubpackets(sig *Signature, subpackets []byte, isHashed bool) (err error) { + for len(subpackets) > 0 { + subpackets, err = parseSignatureSubpacket(sig, subpackets, isHashed) + if err != nil { + return + } + } + + if sig.CreationTime.IsZero() { + err = errors.StructuralError("no creation time in signature") + } + + return +} + +type signatureSubpacketType uint8 + +const ( + creationTimeSubpacket signatureSubpacketType = 2 + signatureExpirationSubpacket signatureSubpacketType = 3 + keyExpirationSubpacket signatureSubpacketType = 9 + prefSymmetricAlgosSubpacket signatureSubpacketType = 11 + issuerSubpacket signatureSubpacketType = 16 + prefHashAlgosSubpacket signatureSubpacketType = 21 + prefCompressionSubpacket signatureSubpacketType = 22 + primaryUserIdSubpacket signatureSubpacketType = 25 + keyFlagsSubpacket signatureSubpacketType = 27 + reasonForRevocationSubpacket signatureSubpacketType = 29 + featuresSubpacket signatureSubpacketType = 30 + embeddedSignatureSubpacket signatureSubpacketType = 32 +) + +// parseSignatureSubpacket parses a single subpacket. len(subpacket) is >= 1. +func parseSignatureSubpacket(sig *Signature, subpacket []byte, isHashed bool) (rest []byte, err error) { + // RFC 4880, section 5.2.3.1 + var ( + length uint32 + packetType signatureSubpacketType + isCritical bool + ) + switch { + case subpacket[0] < 192: + length = uint32(subpacket[0]) + subpacket = subpacket[1:] + case subpacket[0] < 255: + if len(subpacket) < 2 { + goto Truncated + } + length = uint32(subpacket[0]-192)<<8 + uint32(subpacket[1]) + 192 + subpacket = subpacket[2:] + default: + if len(subpacket) < 5 { + goto Truncated + } + length = uint32(subpacket[1])<<24 | + uint32(subpacket[2])<<16 | + uint32(subpacket[3])<<8 | + uint32(subpacket[4]) + subpacket = subpacket[5:] + } + if length > uint32(len(subpacket)) { + goto Truncated + } + rest = subpacket[length:] + subpacket = subpacket[:length] + if len(subpacket) == 0 { + err = errors.StructuralError("zero length signature subpacket") + return + } + packetType = signatureSubpacketType(subpacket[0] & 0x7f) + isCritical = subpacket[0]&0x80 == 0x80 + subpacket = subpacket[1:] + sig.rawSubpackets = append(sig.rawSubpackets, outputSubpacket{isHashed, packetType, isCritical, subpacket}) + switch packetType { + case creationTimeSubpacket: + if !isHashed { + err = errors.StructuralError("signature creation time in non-hashed area") + return + } + if len(subpacket) != 4 { + err = errors.StructuralError("signature creation time not four bytes") + return + } + t := binary.BigEndian.Uint32(subpacket) + sig.CreationTime = time.Unix(int64(t), 0) + case signatureExpirationSubpacket: + // Signature expiration time, section 5.2.3.10 + if !isHashed { + return + } + if len(subpacket) != 4 { + err = errors.StructuralError("expiration subpacket with bad length") + return + } + sig.SigLifetimeSecs = new(uint32) + *sig.SigLifetimeSecs = binary.BigEndian.Uint32(subpacket) + case keyExpirationSubpacket: + // Key expiration time, section 5.2.3.6 + if !isHashed { + return + } + if len(subpacket) != 4 { + err = errors.StructuralError("key expiration subpacket with bad length") + return + } + sig.KeyLifetimeSecs = new(uint32) + *sig.KeyLifetimeSecs = binary.BigEndian.Uint32(subpacket) + case prefSymmetricAlgosSubpacket: + // Preferred symmetric algorithms, section 5.2.3.7 + if !isHashed { + return + } + sig.PreferredSymmetric = make([]byte, len(subpacket)) + copy(sig.PreferredSymmetric, subpacket) + case issuerSubpacket: + // Issuer, section 5.2.3.5 + if len(subpacket) != 8 { + err = errors.StructuralError("issuer subpacket with bad length") + return + } + sig.IssuerKeyId = new(uint64) + *sig.IssuerKeyId = binary.BigEndian.Uint64(subpacket) + case prefHashAlgosSubpacket: + // Preferred hash algorithms, section 5.2.3.8 + if !isHashed { + return + } + sig.PreferredHash = make([]byte, len(subpacket)) + copy(sig.PreferredHash, subpacket) + case prefCompressionSubpacket: + // Preferred compression algorithms, section 5.2.3.9 + if !isHashed { + return + } + sig.PreferredCompression = make([]byte, len(subpacket)) + copy(sig.PreferredCompression, subpacket) + case primaryUserIdSubpacket: + // Primary User ID, section 5.2.3.19 + if !isHashed { + return + } + if len(subpacket) != 1 { + err = errors.StructuralError("primary user id subpacket with bad length") + return + } + sig.IsPrimaryId = new(bool) + if subpacket[0] > 0 { + *sig.IsPrimaryId = true + } + case keyFlagsSubpacket: + // Key flags, section 5.2.3.21 + if !isHashed { + return + } + if len(subpacket) == 0 { + err = errors.StructuralError("empty key flags subpacket") + return + } + sig.FlagsValid = true + if subpacket[0]&KeyFlagCertify != 0 { + sig.FlagCertify = true + } + if subpacket[0]&KeyFlagSign != 0 { + sig.FlagSign = true + } + if subpacket[0]&KeyFlagEncryptCommunications != 0 { + sig.FlagEncryptCommunications = true + } + if subpacket[0]&KeyFlagEncryptStorage != 0 { + sig.FlagEncryptStorage = true + } + case reasonForRevocationSubpacket: + // Reason For Revocation, section 5.2.3.23 + if !isHashed { + return + } + if len(subpacket) == 0 { + err = errors.StructuralError("empty revocation reason subpacket") + return + } + sig.RevocationReason = new(uint8) + *sig.RevocationReason = subpacket[0] + sig.RevocationReasonText = string(subpacket[1:]) + case featuresSubpacket: + // Features subpacket, section 5.2.3.24 specifies a very general + // mechanism for OpenPGP implementations to signal support for new + // features. In practice, the subpacket is used exclusively to + // indicate support for MDC-protected encryption. + sig.MDC = len(subpacket) >= 1 && subpacket[0]&1 == 1 + case embeddedSignatureSubpacket: + // Only usage is in signatures that cross-certify + // signing subkeys. section 5.2.3.26 describes the + // format, with its usage described in section 11.1 + if sig.EmbeddedSignature != nil { + err = errors.StructuralError("Cannot have multiple embedded signatures") + return + } + sig.EmbeddedSignature = new(Signature) + // Embedded signatures are required to be v4 signatures see + // section 12.1. However, we only parse v4 signatures in this + // file anyway. + if err := sig.EmbeddedSignature.parse(bytes.NewBuffer(subpacket)); err != nil { + return nil, err + } + if sigType := sig.EmbeddedSignature.SigType; sigType != SigTypePrimaryKeyBinding { + return nil, errors.StructuralError("cross-signature has unexpected type " + strconv.Itoa(int(sigType))) + } + default: + if isCritical { + err = errors.UnsupportedError("unknown critical signature subpacket type " + strconv.Itoa(int(packetType))) + return + } + } + return + +Truncated: + err = errors.StructuralError("signature subpacket truncated") + return +} + +// subpacketLengthLength returns the length, in bytes, of an encoded length value. +func subpacketLengthLength(length int) int { + if length < 192 { + return 1 + } + if length < 16320 { + return 2 + } + return 5 +} + +// serializeSubpacketLength marshals the given length into to. +func serializeSubpacketLength(to []byte, length int) int { + // RFC 4880, Section 4.2.2. + if length < 192 { + to[0] = byte(length) + return 1 + } + if length < 16320 { + length -= 192 + to[0] = byte((length >> 8) + 192) + to[1] = byte(length) + return 2 + } + to[0] = 255 + to[1] = byte(length >> 24) + to[2] = byte(length >> 16) + to[3] = byte(length >> 8) + to[4] = byte(length) + return 5 +} + +// subpacketsLength returns the serialized length, in bytes, of the given +// subpackets. +func subpacketsLength(subpackets []outputSubpacket, hashed bool) (length int) { + for _, subpacket := range subpackets { + if subpacket.hashed == hashed { + length += subpacketLengthLength(len(subpacket.contents) + 1) + length += 1 // type byte + length += len(subpacket.contents) + } + } + return +} + +// serializeSubpackets marshals the given subpackets into to. +func serializeSubpackets(to []byte, subpackets []outputSubpacket, hashed bool) { + for _, subpacket := range subpackets { + if subpacket.hashed == hashed { + n := serializeSubpacketLength(to, len(subpacket.contents)+1) + to[n] = byte(subpacket.subpacketType) + to = to[1+n:] + n = copy(to, subpacket.contents) + to = to[n:] + } + } + return +} + +// KeyExpired returns whether sig is a self-signature of a key that has +// expired. +func (sig *Signature) KeyExpired(currentTime time.Time) bool { + if sig.KeyLifetimeSecs == nil { + return false + } + expiry := sig.CreationTime.Add(time.Duration(*sig.KeyLifetimeSecs) * time.Second) + return currentTime.After(expiry) +} + +// buildHashSuffix constructs the HashSuffix member of sig in preparation for signing. +func (sig *Signature) buildHashSuffix() (err error) { + hashedSubpacketsLen := subpacketsLength(sig.outSubpackets, true) + + var ok bool + l := 6 + hashedSubpacketsLen + sig.HashSuffix = make([]byte, l+6) + sig.HashSuffix[0] = 4 + sig.HashSuffix[1] = uint8(sig.SigType) + sig.HashSuffix[2] = uint8(sig.PubKeyAlgo) + sig.HashSuffix[3], ok = s2k.HashToHashId(sig.Hash) + if !ok { + sig.HashSuffix = nil + return errors.InvalidArgumentError("hash cannot be represented in OpenPGP: " + strconv.Itoa(int(sig.Hash))) + } + sig.HashSuffix[4] = byte(hashedSubpacketsLen >> 8) + sig.HashSuffix[5] = byte(hashedSubpacketsLen) + serializeSubpackets(sig.HashSuffix[6:l], sig.outSubpackets, true) + trailer := sig.HashSuffix[l:] + trailer[0] = 4 + trailer[1] = 0xff + trailer[2] = byte(l >> 24) + trailer[3] = byte(l >> 16) + trailer[4] = byte(l >> 8) + trailer[5] = byte(l) + return +} + +func (sig *Signature) signPrepareHash(h hash.Hash) (digest []byte, err error) { + err = sig.buildHashSuffix() + if err != nil { + return + } + + h.Write(sig.HashSuffix) + digest = h.Sum(nil) + copy(sig.HashTag[:], digest) + return +} + +// Sign signs a message with a private key. The hash, h, must contain +// the hash of the message to be signed and will be mutated by this function. +// On success, the signature is stored in sig. Call Serialize to write it out. +// If config is nil, sensible defaults will be used. +func (sig *Signature) Sign(h hash.Hash, priv *PrivateKey, config *Config) (err error) { + sig.outSubpackets = sig.buildSubpackets() + digest, err := sig.signPrepareHash(h) + if err != nil { + return + } + + switch priv.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: + // supports both *rsa.PrivateKey and crypto.Signer + sig.RSASignature.bytes, err = priv.PrivateKey.(crypto.Signer).Sign(config.Random(), digest, sig.Hash) + sig.RSASignature.bitLength = uint16(8 * len(sig.RSASignature.bytes)) + case PubKeyAlgoDSA: + dsaPriv := priv.PrivateKey.(*dsa.PrivateKey) + + // Need to truncate hashBytes to match FIPS 186-3 section 4.6. + subgroupSize := (dsaPriv.Q.BitLen() + 7) / 8 + if len(digest) > subgroupSize { + digest = digest[:subgroupSize] + } + r, s, err := dsa.Sign(config.Random(), dsaPriv, digest) + if err == nil { + sig.DSASigR.bytes = r.Bytes() + sig.DSASigR.bitLength = uint16(8 * len(sig.DSASigR.bytes)) + sig.DSASigS.bytes = s.Bytes() + sig.DSASigS.bitLength = uint16(8 * len(sig.DSASigS.bytes)) + } + case PubKeyAlgoECDSA: + var r, s *big.Int + if pk, ok := priv.PrivateKey.(*ecdsa.PrivateKey); ok { + // direct support, avoid asn1 wrapping/unwrapping + r, s, err = ecdsa.Sign(config.Random(), pk, digest) + } else { + var b []byte + b, err = priv.PrivateKey.(crypto.Signer).Sign(config.Random(), digest, nil) + if err == nil { + r, s, err = unwrapECDSASig(b) + } + } + if err == nil { + sig.ECDSASigR = fromBig(r) + sig.ECDSASigS = fromBig(s) + } + default: + err = errors.UnsupportedError("public key algorithm: " + strconv.Itoa(int(sig.PubKeyAlgo))) + } + + return +} + +// unwrapECDSASig parses the two integer components of an ASN.1-encoded ECDSA +// signature. +func unwrapECDSASig(b []byte) (r, s *big.Int, err error) { + var ecsdaSig struct { + R, S *big.Int + } + _, err = asn1.Unmarshal(b, &ecsdaSig) + if err != nil { + return + } + return ecsdaSig.R, ecsdaSig.S, nil +} + +// SignUserId computes a signature from priv, asserting that pub is a valid +// key for the identity id. On success, the signature is stored in sig. Call +// Serialize to write it out. +// If config is nil, sensible defaults will be used. +func (sig *Signature) SignUserId(id string, pub *PublicKey, priv *PrivateKey, config *Config) error { + h, err := userIdSignatureHash(id, pub, sig.Hash) + if err != nil { + return err + } + return sig.Sign(h, priv, config) +} + +// SignKey computes a signature from priv, asserting that pub is a subkey. On +// success, the signature is stored in sig. Call Serialize to write it out. +// If config is nil, sensible defaults will be used. +func (sig *Signature) SignKey(pub *PublicKey, priv *PrivateKey, config *Config) error { + h, err := keySignatureHash(&priv.PublicKey, pub, sig.Hash) + if err != nil { + return err + } + return sig.Sign(h, priv, config) +} + +// Serialize marshals sig to w. Sign, SignUserId or SignKey must have been +// called first. +func (sig *Signature) Serialize(w io.Writer) (err error) { + if len(sig.outSubpackets) == 0 { + sig.outSubpackets = sig.rawSubpackets + } + if sig.RSASignature.bytes == nil && sig.DSASigR.bytes == nil && sig.ECDSASigR.bytes == nil { + return errors.InvalidArgumentError("Signature: need to call Sign, SignUserId or SignKey before Serialize") + } + + sigLength := 0 + switch sig.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: + sigLength = 2 + len(sig.RSASignature.bytes) + case PubKeyAlgoDSA: + sigLength = 2 + len(sig.DSASigR.bytes) + sigLength += 2 + len(sig.DSASigS.bytes) + case PubKeyAlgoECDSA: + sigLength = 2 + len(sig.ECDSASigR.bytes) + sigLength += 2 + len(sig.ECDSASigS.bytes) + default: + panic("impossible") + } + + unhashedSubpacketsLen := subpacketsLength(sig.outSubpackets, false) + length := len(sig.HashSuffix) - 6 /* trailer not included */ + + 2 /* length of unhashed subpackets */ + unhashedSubpacketsLen + + 2 /* hash tag */ + sigLength + err = serializeHeader(w, packetTypeSignature, length) + if err != nil { + return + } + + _, err = w.Write(sig.HashSuffix[:len(sig.HashSuffix)-6]) + if err != nil { + return + } + + unhashedSubpackets := make([]byte, 2+unhashedSubpacketsLen) + unhashedSubpackets[0] = byte(unhashedSubpacketsLen >> 8) + unhashedSubpackets[1] = byte(unhashedSubpacketsLen) + serializeSubpackets(unhashedSubpackets[2:], sig.outSubpackets, false) + + _, err = w.Write(unhashedSubpackets) + if err != nil { + return + } + _, err = w.Write(sig.HashTag[:]) + if err != nil { + return + } + + switch sig.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: + err = writeMPIs(w, sig.RSASignature) + case PubKeyAlgoDSA: + err = writeMPIs(w, sig.DSASigR, sig.DSASigS) + case PubKeyAlgoECDSA: + err = writeMPIs(w, sig.ECDSASigR, sig.ECDSASigS) + default: + panic("impossible") + } + return +} + +// outputSubpacket represents a subpacket to be marshaled. +type outputSubpacket struct { + hashed bool // true if this subpacket is in the hashed area. + subpacketType signatureSubpacketType + isCritical bool + contents []byte +} + +func (sig *Signature) buildSubpackets() (subpackets []outputSubpacket) { + creationTime := make([]byte, 4) + binary.BigEndian.PutUint32(creationTime, uint32(sig.CreationTime.Unix())) + subpackets = append(subpackets, outputSubpacket{true, creationTimeSubpacket, false, creationTime}) + + if sig.IssuerKeyId != nil { + keyId := make([]byte, 8) + binary.BigEndian.PutUint64(keyId, *sig.IssuerKeyId) + subpackets = append(subpackets, outputSubpacket{true, issuerSubpacket, false, keyId}) + } + + if sig.SigLifetimeSecs != nil && *sig.SigLifetimeSecs != 0 { + sigLifetime := make([]byte, 4) + binary.BigEndian.PutUint32(sigLifetime, *sig.SigLifetimeSecs) + subpackets = append(subpackets, outputSubpacket{true, signatureExpirationSubpacket, true, sigLifetime}) + } + + // Key flags may only appear in self-signatures or certification signatures. + + if sig.FlagsValid { + var flags byte + if sig.FlagCertify { + flags |= KeyFlagCertify + } + if sig.FlagSign { + flags |= KeyFlagSign + } + if sig.FlagEncryptCommunications { + flags |= KeyFlagEncryptCommunications + } + if sig.FlagEncryptStorage { + flags |= KeyFlagEncryptStorage + } + subpackets = append(subpackets, outputSubpacket{true, keyFlagsSubpacket, false, []byte{flags}}) + } + + // The following subpackets may only appear in self-signatures + + if sig.KeyLifetimeSecs != nil && *sig.KeyLifetimeSecs != 0 { + keyLifetime := make([]byte, 4) + binary.BigEndian.PutUint32(keyLifetime, *sig.KeyLifetimeSecs) + subpackets = append(subpackets, outputSubpacket{true, keyExpirationSubpacket, true, keyLifetime}) + } + + if sig.IsPrimaryId != nil && *sig.IsPrimaryId { + subpackets = append(subpackets, outputSubpacket{true, primaryUserIdSubpacket, false, []byte{1}}) + } + + if len(sig.PreferredSymmetric) > 0 { + subpackets = append(subpackets, outputSubpacket{true, prefSymmetricAlgosSubpacket, false, sig.PreferredSymmetric}) + } + + if len(sig.PreferredHash) > 0 { + subpackets = append(subpackets, outputSubpacket{true, prefHashAlgosSubpacket, false, sig.PreferredHash}) + } + + if len(sig.PreferredCompression) > 0 { + subpackets = append(subpackets, outputSubpacket{true, prefCompressionSubpacket, false, sig.PreferredCompression}) + } + + return +} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/signature_v3.go b/vendor/golang.org/x/crypto/openpgp/packet/signature_v3.go new file mode 100644 index 00000000..6edff889 --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/packet/signature_v3.go @@ -0,0 +1,146 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "crypto" + "encoding/binary" + "fmt" + "io" + "strconv" + "time" + + "golang.org/x/crypto/openpgp/errors" + "golang.org/x/crypto/openpgp/s2k" +) + +// SignatureV3 represents older version 3 signatures. These signatures are less secure +// than version 4 and should not be used to create new signatures. They are included +// here for backwards compatibility to read and validate with older key material. +// See RFC 4880, section 5.2.2. +type SignatureV3 struct { + SigType SignatureType + CreationTime time.Time + IssuerKeyId uint64 + PubKeyAlgo PublicKeyAlgorithm + Hash crypto.Hash + HashTag [2]byte + + RSASignature parsedMPI + DSASigR, DSASigS parsedMPI +} + +func (sig *SignatureV3) parse(r io.Reader) (err error) { + // RFC 4880, section 5.2.2 + var buf [8]byte + if _, err = readFull(r, buf[:1]); err != nil { + return + } + if buf[0] < 2 || buf[0] > 3 { + err = errors.UnsupportedError("signature packet version " + strconv.Itoa(int(buf[0]))) + return + } + if _, err = readFull(r, buf[:1]); err != nil { + return + } + if buf[0] != 5 { + err = errors.UnsupportedError( + "invalid hashed material length " + strconv.Itoa(int(buf[0]))) + return + } + + // Read hashed material: signature type + creation time + if _, err = readFull(r, buf[:5]); err != nil { + return + } + sig.SigType = SignatureType(buf[0]) + t := binary.BigEndian.Uint32(buf[1:5]) + sig.CreationTime = time.Unix(int64(t), 0) + + // Eight-octet Key ID of signer. + if _, err = readFull(r, buf[:8]); err != nil { + return + } + sig.IssuerKeyId = binary.BigEndian.Uint64(buf[:]) + + // Public-key and hash algorithm + if _, err = readFull(r, buf[:2]); err != nil { + return + } + sig.PubKeyAlgo = PublicKeyAlgorithm(buf[0]) + switch sig.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoDSA: + default: + err = errors.UnsupportedError("public key algorithm " + strconv.Itoa(int(sig.PubKeyAlgo))) + return + } + var ok bool + if sig.Hash, ok = s2k.HashIdToHash(buf[1]); !ok { + return errors.UnsupportedError("hash function " + strconv.Itoa(int(buf[2]))) + } + + // Two-octet field holding left 16 bits of signed hash value. + if _, err = readFull(r, sig.HashTag[:2]); err != nil { + return + } + + switch sig.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: + sig.RSASignature.bytes, sig.RSASignature.bitLength, err = readMPI(r) + case PubKeyAlgoDSA: + if sig.DSASigR.bytes, sig.DSASigR.bitLength, err = readMPI(r); err != nil { + return + } + sig.DSASigS.bytes, sig.DSASigS.bitLength, err = readMPI(r) + default: + panic("unreachable") + } + return +} + +// Serialize marshals sig to w. Sign, SignUserId or SignKey must have been +// called first. +func (sig *SignatureV3) Serialize(w io.Writer) (err error) { + buf := make([]byte, 8) + + // Write the sig type and creation time + buf[0] = byte(sig.SigType) + binary.BigEndian.PutUint32(buf[1:5], uint32(sig.CreationTime.Unix())) + if _, err = w.Write(buf[:5]); err != nil { + return + } + + // Write the issuer long key ID + binary.BigEndian.PutUint64(buf[:8], sig.IssuerKeyId) + if _, err = w.Write(buf[:8]); err != nil { + return + } + + // Write public key algorithm, hash ID, and hash value + buf[0] = byte(sig.PubKeyAlgo) + hashId, ok := s2k.HashToHashId(sig.Hash) + if !ok { + return errors.UnsupportedError(fmt.Sprintf("hash function %v", sig.Hash)) + } + buf[1] = hashId + copy(buf[2:4], sig.HashTag[:]) + if _, err = w.Write(buf[:4]); err != nil { + return + } + + if sig.RSASignature.bytes == nil && sig.DSASigR.bytes == nil { + return errors.InvalidArgumentError("Signature: need to call Sign, SignUserId or SignKey before Serialize") + } + + switch sig.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: + err = writeMPIs(w, sig.RSASignature) + case PubKeyAlgoDSA: + err = writeMPIs(w, sig.DSASigR, sig.DSASigS) + default: + panic("impossible") + } + return +} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/symmetric_key_encrypted.go b/vendor/golang.org/x/crypto/openpgp/packet/symmetric_key_encrypted.go new file mode 100644 index 00000000..744c2d2c --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/packet/symmetric_key_encrypted.go @@ -0,0 +1,155 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "bytes" + "crypto/cipher" + "io" + "strconv" + + "golang.org/x/crypto/openpgp/errors" + "golang.org/x/crypto/openpgp/s2k" +) + +// This is the largest session key that we'll support. Since no 512-bit cipher +// has even been seriously used, this is comfortably large. +const maxSessionKeySizeInBytes = 64 + +// SymmetricKeyEncrypted represents a passphrase protected session key. See RFC +// 4880, section 5.3. +type SymmetricKeyEncrypted struct { + CipherFunc CipherFunction + s2k func(out, in []byte) + encryptedKey []byte +} + +const symmetricKeyEncryptedVersion = 4 + +func (ske *SymmetricKeyEncrypted) parse(r io.Reader) error { + // RFC 4880, section 5.3. + var buf [2]byte + if _, err := readFull(r, buf[:]); err != nil { + return err + } + if buf[0] != symmetricKeyEncryptedVersion { + return errors.UnsupportedError("SymmetricKeyEncrypted version") + } + ske.CipherFunc = CipherFunction(buf[1]) + + if ske.CipherFunc.KeySize() == 0 { + return errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(buf[1]))) + } + + var err error + ske.s2k, err = s2k.Parse(r) + if err != nil { + return err + } + + encryptedKey := make([]byte, maxSessionKeySizeInBytes) + // The session key may follow. We just have to try and read to find + // out. If it exists then we limit it to maxSessionKeySizeInBytes. + n, err := readFull(r, encryptedKey) + if err != nil && err != io.ErrUnexpectedEOF { + return err + } + + if n != 0 { + if n == maxSessionKeySizeInBytes { + return errors.UnsupportedError("oversized encrypted session key") + } + ske.encryptedKey = encryptedKey[:n] + } + + return nil +} + +// Decrypt attempts to decrypt an encrypted session key and returns the key and +// the cipher to use when decrypting a subsequent Symmetrically Encrypted Data +// packet. +func (ske *SymmetricKeyEncrypted) Decrypt(passphrase []byte) ([]byte, CipherFunction, error) { + key := make([]byte, ske.CipherFunc.KeySize()) + ske.s2k(key, passphrase) + + if len(ske.encryptedKey) == 0 { + return key, ske.CipherFunc, nil + } + + // the IV is all zeros + iv := make([]byte, ske.CipherFunc.blockSize()) + c := cipher.NewCFBDecrypter(ske.CipherFunc.new(key), iv) + plaintextKey := make([]byte, len(ske.encryptedKey)) + c.XORKeyStream(plaintextKey, ske.encryptedKey) + cipherFunc := CipherFunction(plaintextKey[0]) + if cipherFunc.blockSize() == 0 { + return nil, ske.CipherFunc, errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(cipherFunc))) + } + plaintextKey = plaintextKey[1:] + if l, cipherKeySize := len(plaintextKey), cipherFunc.KeySize(); l != cipherFunc.KeySize() { + return nil, cipherFunc, errors.StructuralError("length of decrypted key (" + strconv.Itoa(l) + ") " + + "not equal to cipher keysize (" + strconv.Itoa(cipherKeySize) + ")") + } + return plaintextKey, cipherFunc, nil +} + +// SerializeSymmetricKeyEncrypted serializes a symmetric key packet to w. The +// packet contains a random session key, encrypted by a key derived from the +// given passphrase. The session key is returned and must be passed to +// SerializeSymmetricallyEncrypted. +// If config is nil, sensible defaults will be used. +func SerializeSymmetricKeyEncrypted(w io.Writer, passphrase []byte, config *Config) (key []byte, err error) { + cipherFunc := config.Cipher() + keySize := cipherFunc.KeySize() + if keySize == 0 { + return nil, errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(cipherFunc))) + } + + s2kBuf := new(bytes.Buffer) + keyEncryptingKey := make([]byte, keySize) + // s2k.Serialize salts and stretches the passphrase, and writes the + // resulting key to keyEncryptingKey and the s2k descriptor to s2kBuf. + err = s2k.Serialize(s2kBuf, keyEncryptingKey, config.Random(), passphrase, &s2k.Config{Hash: config.Hash(), S2KCount: config.PasswordHashIterations()}) + if err != nil { + return + } + s2kBytes := s2kBuf.Bytes() + + packetLength := 2 /* header */ + len(s2kBytes) + 1 /* cipher type */ + keySize + err = serializeHeader(w, packetTypeSymmetricKeyEncrypted, packetLength) + if err != nil { + return + } + + var buf [2]byte + buf[0] = symmetricKeyEncryptedVersion + buf[1] = byte(cipherFunc) + _, err = w.Write(buf[:]) + if err != nil { + return + } + _, err = w.Write(s2kBytes) + if err != nil { + return + } + + sessionKey := make([]byte, keySize) + _, err = io.ReadFull(config.Random(), sessionKey) + if err != nil { + return + } + iv := make([]byte, cipherFunc.blockSize()) + c := cipher.NewCFBEncrypter(cipherFunc.new(keyEncryptingKey), iv) + encryptedCipherAndKey := make([]byte, keySize+1) + c.XORKeyStream(encryptedCipherAndKey, buf[1:]) + c.XORKeyStream(encryptedCipherAndKey[1:], sessionKey) + _, err = w.Write(encryptedCipherAndKey) + if err != nil { + return + } + + key = sessionKey + return +} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/symmetrically_encrypted.go b/vendor/golang.org/x/crypto/openpgp/packet/symmetrically_encrypted.go new file mode 100644 index 00000000..6126030e --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/packet/symmetrically_encrypted.go @@ -0,0 +1,290 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "crypto/cipher" + "crypto/sha1" + "crypto/subtle" + "golang.org/x/crypto/openpgp/errors" + "hash" + "io" + "strconv" +) + +// SymmetricallyEncrypted represents a symmetrically encrypted byte string. The +// encrypted contents will consist of more OpenPGP packets. See RFC 4880, +// sections 5.7 and 5.13. +type SymmetricallyEncrypted struct { + MDC bool // true iff this is a type 18 packet and thus has an embedded MAC. + contents io.Reader + prefix []byte +} + +const symmetricallyEncryptedVersion = 1 + +func (se *SymmetricallyEncrypted) parse(r io.Reader) error { + if se.MDC { + // See RFC 4880, section 5.13. + var buf [1]byte + _, err := readFull(r, buf[:]) + if err != nil { + return err + } + if buf[0] != symmetricallyEncryptedVersion { + return errors.UnsupportedError("unknown SymmetricallyEncrypted version") + } + } + se.contents = r + return nil +} + +// Decrypt returns a ReadCloser, from which the decrypted contents of the +// packet can be read. An incorrect key can, with high probability, be detected +// immediately and this will result in a KeyIncorrect error being returned. +func (se *SymmetricallyEncrypted) Decrypt(c CipherFunction, key []byte) (io.ReadCloser, error) { + keySize := c.KeySize() + if keySize == 0 { + return nil, errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(c))) + } + if len(key) != keySize { + return nil, errors.InvalidArgumentError("SymmetricallyEncrypted: incorrect key length") + } + + if se.prefix == nil { + se.prefix = make([]byte, c.blockSize()+2) + _, err := readFull(se.contents, se.prefix) + if err != nil { + return nil, err + } + } else if len(se.prefix) != c.blockSize()+2 { + return nil, errors.InvalidArgumentError("can't try ciphers with different block lengths") + } + + ocfbResync := OCFBResync + if se.MDC { + // MDC packets use a different form of OCFB mode. + ocfbResync = OCFBNoResync + } + + s := NewOCFBDecrypter(c.new(key), se.prefix, ocfbResync) + if s == nil { + return nil, errors.ErrKeyIncorrect + } + + plaintext := cipher.StreamReader{S: s, R: se.contents} + + if se.MDC { + // MDC packets have an embedded hash that we need to check. + h := sha1.New() + h.Write(se.prefix) + return &seMDCReader{in: plaintext, h: h}, nil + } + + // Otherwise, we just need to wrap plaintext so that it's a valid ReadCloser. + return seReader{plaintext}, nil +} + +// seReader wraps an io.Reader with a no-op Close method. +type seReader struct { + in io.Reader +} + +func (ser seReader) Read(buf []byte) (int, error) { + return ser.in.Read(buf) +} + +func (ser seReader) Close() error { + return nil +} + +const mdcTrailerSize = 1 /* tag byte */ + 1 /* length byte */ + sha1.Size + +// An seMDCReader wraps an io.Reader, maintains a running hash and keeps hold +// of the most recent 22 bytes (mdcTrailerSize). Upon EOF, those bytes form an +// MDC packet containing a hash of the previous contents which is checked +// against the running hash. See RFC 4880, section 5.13. +type seMDCReader struct { + in io.Reader + h hash.Hash + trailer [mdcTrailerSize]byte + scratch [mdcTrailerSize]byte + trailerUsed int + error bool + eof bool +} + +func (ser *seMDCReader) Read(buf []byte) (n int, err error) { + if ser.error { + err = io.ErrUnexpectedEOF + return + } + if ser.eof { + err = io.EOF + return + } + + // If we haven't yet filled the trailer buffer then we must do that + // first. + for ser.trailerUsed < mdcTrailerSize { + n, err = ser.in.Read(ser.trailer[ser.trailerUsed:]) + ser.trailerUsed += n + if err == io.EOF { + if ser.trailerUsed != mdcTrailerSize { + n = 0 + err = io.ErrUnexpectedEOF + ser.error = true + return + } + ser.eof = true + n = 0 + return + } + + if err != nil { + n = 0 + return + } + } + + // If it's a short read then we read into a temporary buffer and shift + // the data into the caller's buffer. + if len(buf) <= mdcTrailerSize { + n, err = readFull(ser.in, ser.scratch[:len(buf)]) + copy(buf, ser.trailer[:n]) + ser.h.Write(buf[:n]) + copy(ser.trailer[:], ser.trailer[n:]) + copy(ser.trailer[mdcTrailerSize-n:], ser.scratch[:]) + if n < len(buf) { + ser.eof = true + err = io.EOF + } + return + } + + n, err = ser.in.Read(buf[mdcTrailerSize:]) + copy(buf, ser.trailer[:]) + ser.h.Write(buf[:n]) + copy(ser.trailer[:], buf[n:]) + + if err == io.EOF { + ser.eof = true + } + return +} + +// This is a new-format packet tag byte for a type 19 (MDC) packet. +const mdcPacketTagByte = byte(0x80) | 0x40 | 19 + +func (ser *seMDCReader) Close() error { + if ser.error { + return errors.SignatureError("error during reading") + } + + for !ser.eof { + // We haven't seen EOF so we need to read to the end + var buf [1024]byte + _, err := ser.Read(buf[:]) + if err == io.EOF { + break + } + if err != nil { + return errors.SignatureError("error during reading") + } + } + + if ser.trailer[0] != mdcPacketTagByte || ser.trailer[1] != sha1.Size { + return errors.SignatureError("MDC packet not found") + } + ser.h.Write(ser.trailer[:2]) + + final := ser.h.Sum(nil) + if subtle.ConstantTimeCompare(final, ser.trailer[2:]) != 1 { + return errors.SignatureError("hash mismatch") + } + return nil +} + +// An seMDCWriter writes through to an io.WriteCloser while maintains a running +// hash of the data written. On close, it emits an MDC packet containing the +// running hash. +type seMDCWriter struct { + w io.WriteCloser + h hash.Hash +} + +func (w *seMDCWriter) Write(buf []byte) (n int, err error) { + w.h.Write(buf) + return w.w.Write(buf) +} + +func (w *seMDCWriter) Close() (err error) { + var buf [mdcTrailerSize]byte + + buf[0] = mdcPacketTagByte + buf[1] = sha1.Size + w.h.Write(buf[:2]) + digest := w.h.Sum(nil) + copy(buf[2:], digest) + + _, err = w.w.Write(buf[:]) + if err != nil { + return + } + return w.w.Close() +} + +// noOpCloser is like an ioutil.NopCloser, but for an io.Writer. +type noOpCloser struct { + w io.Writer +} + +func (c noOpCloser) Write(data []byte) (n int, err error) { + return c.w.Write(data) +} + +func (c noOpCloser) Close() error { + return nil +} + +// SerializeSymmetricallyEncrypted serializes a symmetrically encrypted packet +// to w and returns a WriteCloser to which the to-be-encrypted packets can be +// written. +// If config is nil, sensible defaults will be used. +func SerializeSymmetricallyEncrypted(w io.Writer, c CipherFunction, key []byte, config *Config) (contents io.WriteCloser, err error) { + if c.KeySize() != len(key) { + return nil, errors.InvalidArgumentError("SymmetricallyEncrypted.Serialize: bad key length") + } + writeCloser := noOpCloser{w} + ciphertext, err := serializeStreamHeader(writeCloser, packetTypeSymmetricallyEncryptedMDC) + if err != nil { + return + } + + _, err = ciphertext.Write([]byte{symmetricallyEncryptedVersion}) + if err != nil { + return + } + + block := c.new(key) + blockSize := block.BlockSize() + iv := make([]byte, blockSize) + _, err = config.Random().Read(iv) + if err != nil { + return + } + s, prefix := NewOCFBEncrypter(block, iv, OCFBNoResync) + _, err = ciphertext.Write(prefix) + if err != nil { + return + } + plaintext := cipher.StreamWriter{S: s, W: ciphertext} + + h := sha1.New() + h.Write(iv) + h.Write(iv[blockSize-2:]) + contents = &seMDCWriter{w: plaintext, h: h} + return +} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/userattribute.go b/vendor/golang.org/x/crypto/openpgp/packet/userattribute.go new file mode 100644 index 00000000..96a2b382 --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/packet/userattribute.go @@ -0,0 +1,91 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "bytes" + "image" + "image/jpeg" + "io" + "io/ioutil" +) + +const UserAttrImageSubpacket = 1 + +// UserAttribute is capable of storing other types of data about a user +// beyond name, email and a text comment. In practice, user attributes are typically used +// to store a signed thumbnail photo JPEG image of the user. +// See RFC 4880, section 5.12. +type UserAttribute struct { + Contents []*OpaqueSubpacket +} + +// NewUserAttributePhoto creates a user attribute packet +// containing the given images. +func NewUserAttributePhoto(photos ...image.Image) (uat *UserAttribute, err error) { + uat = new(UserAttribute) + for _, photo := range photos { + var buf bytes.Buffer + // RFC 4880, Section 5.12.1. + data := []byte{ + 0x10, 0x00, // Little-endian image header length (16 bytes) + 0x01, // Image header version 1 + 0x01, // JPEG + 0, 0, 0, 0, // 12 reserved octets, must be all zero. + 0, 0, 0, 0, + 0, 0, 0, 0} + if _, err = buf.Write(data); err != nil { + return + } + if err = jpeg.Encode(&buf, photo, nil); err != nil { + return + } + uat.Contents = append(uat.Contents, &OpaqueSubpacket{ + SubType: UserAttrImageSubpacket, + Contents: buf.Bytes()}) + } + return +} + +// NewUserAttribute creates a new user attribute packet containing the given subpackets. +func NewUserAttribute(contents ...*OpaqueSubpacket) *UserAttribute { + return &UserAttribute{Contents: contents} +} + +func (uat *UserAttribute) parse(r io.Reader) (err error) { + // RFC 4880, section 5.13 + b, err := ioutil.ReadAll(r) + if err != nil { + return + } + uat.Contents, err = OpaqueSubpackets(b) + return +} + +// Serialize marshals the user attribute to w in the form of an OpenPGP packet, including +// header. +func (uat *UserAttribute) Serialize(w io.Writer) (err error) { + var buf bytes.Buffer + for _, sp := range uat.Contents { + sp.Serialize(&buf) + } + if err = serializeHeader(w, packetTypeUserAttribute, buf.Len()); err != nil { + return err + } + _, err = w.Write(buf.Bytes()) + return +} + +// ImageData returns zero or more byte slices, each containing +// JPEG File Interchange Format (JFIF), for each photo in the +// the user attribute packet. +func (uat *UserAttribute) ImageData() (imageData [][]byte) { + for _, sp := range uat.Contents { + if sp.SubType == UserAttrImageSubpacket && len(sp.Contents) > 16 { + imageData = append(imageData, sp.Contents[16:]) + } + } + return +} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/userid.go b/vendor/golang.org/x/crypto/openpgp/packet/userid.go new file mode 100644 index 00000000..d6bea7d4 --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/packet/userid.go @@ -0,0 +1,160 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "io" + "io/ioutil" + "strings" +) + +// UserId contains text that is intended to represent the name and email +// address of the key holder. See RFC 4880, section 5.11. By convention, this +// takes the form "Full Name (Comment) " +type UserId struct { + Id string // By convention, this takes the form "Full Name (Comment) " which is split out in the fields below. + + Name, Comment, Email string +} + +func hasInvalidCharacters(s string) bool { + for _, c := range s { + switch c { + case '(', ')', '<', '>', 0: + return true + } + } + return false +} + +// NewUserId returns a UserId or nil if any of the arguments contain invalid +// characters. The invalid characters are '\x00', '(', ')', '<' and '>' +func NewUserId(name, comment, email string) *UserId { + // RFC 4880 doesn't deal with the structure of userid strings; the + // name, comment and email form is just a convention. However, there's + // no convention about escaping the metacharacters and GPG just refuses + // to create user ids where, say, the name contains a '('. We mirror + // this behaviour. + + if hasInvalidCharacters(name) || hasInvalidCharacters(comment) || hasInvalidCharacters(email) { + return nil + } + + uid := new(UserId) + uid.Name, uid.Comment, uid.Email = name, comment, email + uid.Id = name + if len(comment) > 0 { + if len(uid.Id) > 0 { + uid.Id += " " + } + uid.Id += "(" + uid.Id += comment + uid.Id += ")" + } + if len(email) > 0 { + if len(uid.Id) > 0 { + uid.Id += " " + } + uid.Id += "<" + uid.Id += email + uid.Id += ">" + } + return uid +} + +func (uid *UserId) parse(r io.Reader) (err error) { + // RFC 4880, section 5.11 + b, err := ioutil.ReadAll(r) + if err != nil { + return + } + uid.Id = string(b) + uid.Name, uid.Comment, uid.Email = parseUserId(uid.Id) + return +} + +// Serialize marshals uid to w in the form of an OpenPGP packet, including +// header. +func (uid *UserId) Serialize(w io.Writer) error { + err := serializeHeader(w, packetTypeUserId, len(uid.Id)) + if err != nil { + return err + } + _, err = w.Write([]byte(uid.Id)) + return err +} + +// parseUserId extracts the name, comment and email from a user id string that +// is formatted as "Full Name (Comment) ". +func parseUserId(id string) (name, comment, email string) { + var n, c, e struct { + start, end int + } + var state int + + for offset, rune := range id { + switch state { + case 0: + // Entering name + n.start = offset + state = 1 + fallthrough + case 1: + // In name + if rune == '(' { + state = 2 + n.end = offset + } else if rune == '<' { + state = 5 + n.end = offset + } + case 2: + // Entering comment + c.start = offset + state = 3 + fallthrough + case 3: + // In comment + if rune == ')' { + state = 4 + c.end = offset + } + case 4: + // Between comment and email + if rune == '<' { + state = 5 + } + case 5: + // Entering email + e.start = offset + state = 6 + fallthrough + case 6: + // In email + if rune == '>' { + state = 7 + e.end = offset + } + default: + // After email + } + } + switch state { + case 1: + // ended in the name + n.end = len(id) + case 3: + // ended in comment + c.end = len(id) + case 6: + // ended in email + e.end = len(id) + } + + name = strings.TrimSpace(id[n.start:n.end]) + comment = strings.TrimSpace(id[c.start:c.end]) + email = strings.TrimSpace(id[e.start:e.end]) + return +} diff --git a/vendor/golang.org/x/crypto/openpgp/read.go b/vendor/golang.org/x/crypto/openpgp/read.go new file mode 100644 index 00000000..6ec664f4 --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/read.go @@ -0,0 +1,442 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package openpgp implements high level operations on OpenPGP messages. +package openpgp // import "golang.org/x/crypto/openpgp" + +import ( + "crypto" + _ "crypto/sha256" + "hash" + "io" + "strconv" + + "golang.org/x/crypto/openpgp/armor" + "golang.org/x/crypto/openpgp/errors" + "golang.org/x/crypto/openpgp/packet" +) + +// SignatureType is the armor type for a PGP signature. +var SignatureType = "PGP SIGNATURE" + +// readArmored reads an armored block with the given type. +func readArmored(r io.Reader, expectedType string) (body io.Reader, err error) { + block, err := armor.Decode(r) + if err != nil { + return + } + + if block.Type != expectedType { + return nil, errors.InvalidArgumentError("expected '" + expectedType + "', got: " + block.Type) + } + + return block.Body, nil +} + +// MessageDetails contains the result of parsing an OpenPGP encrypted and/or +// signed message. +type MessageDetails struct { + IsEncrypted bool // true if the message was encrypted. + EncryptedToKeyIds []uint64 // the list of recipient key ids. + IsSymmetricallyEncrypted bool // true if a passphrase could have decrypted the message. + DecryptedWith Key // the private key used to decrypt the message, if any. + IsSigned bool // true if the message is signed. + SignedByKeyId uint64 // the key id of the signer, if any. + SignedBy *Key // the key of the signer, if available. + LiteralData *packet.LiteralData // the metadata of the contents + UnverifiedBody io.Reader // the contents of the message. + + // If IsSigned is true and SignedBy is non-zero then the signature will + // be verified as UnverifiedBody is read. The signature cannot be + // checked until the whole of UnverifiedBody is read so UnverifiedBody + // must be consumed until EOF before the data can be trusted. Even if a + // message isn't signed (or the signer is unknown) the data may contain + // an authentication code that is only checked once UnverifiedBody has + // been consumed. Once EOF has been seen, the following fields are + // valid. (An authentication code failure is reported as a + // SignatureError error when reading from UnverifiedBody.) + SignatureError error // nil if the signature is good. + Signature *packet.Signature // the signature packet itself, if v4 (default) + SignatureV3 *packet.SignatureV3 // the signature packet if it is a v2 or v3 signature + + decrypted io.ReadCloser +} + +// A PromptFunction is used as a callback by functions that may need to decrypt +// a private key, or prompt for a passphrase. It is called with a list of +// acceptable, encrypted private keys and a boolean that indicates whether a +// passphrase is usable. It should either decrypt a private key or return a +// passphrase to try. If the decrypted private key or given passphrase isn't +// correct, the function will be called again, forever. Any error returned will +// be passed up. +type PromptFunction func(keys []Key, symmetric bool) ([]byte, error) + +// A keyEnvelopePair is used to store a private key with the envelope that +// contains a symmetric key, encrypted with that key. +type keyEnvelopePair struct { + key Key + encryptedKey *packet.EncryptedKey +} + +// ReadMessage parses an OpenPGP message that may be signed and/or encrypted. +// The given KeyRing should contain both public keys (for signature +// verification) and, possibly encrypted, private keys for decrypting. +// If config is nil, sensible defaults will be used. +func ReadMessage(r io.Reader, keyring KeyRing, prompt PromptFunction, config *packet.Config) (md *MessageDetails, err error) { + var p packet.Packet + + var symKeys []*packet.SymmetricKeyEncrypted + var pubKeys []keyEnvelopePair + var se *packet.SymmetricallyEncrypted + + packets := packet.NewReader(r) + md = new(MessageDetails) + md.IsEncrypted = true + + // The message, if encrypted, starts with a number of packets + // containing an encrypted decryption key. The decryption key is either + // encrypted to a public key, or with a passphrase. This loop + // collects these packets. +ParsePackets: + for { + p, err = packets.Next() + if err != nil { + return nil, err + } + switch p := p.(type) { + case *packet.SymmetricKeyEncrypted: + // This packet contains the decryption key encrypted with a passphrase. + md.IsSymmetricallyEncrypted = true + symKeys = append(symKeys, p) + case *packet.EncryptedKey: + // This packet contains the decryption key encrypted to a public key. + md.EncryptedToKeyIds = append(md.EncryptedToKeyIds, p.KeyId) + switch p.Algo { + case packet.PubKeyAlgoRSA, packet.PubKeyAlgoRSAEncryptOnly, packet.PubKeyAlgoElGamal: + break + default: + continue + } + var keys []Key + if p.KeyId == 0 { + keys = keyring.DecryptionKeys() + } else { + keys = keyring.KeysById(p.KeyId) + } + for _, k := range keys { + pubKeys = append(pubKeys, keyEnvelopePair{k, p}) + } + case *packet.SymmetricallyEncrypted: + se = p + break ParsePackets + case *packet.Compressed, *packet.LiteralData, *packet.OnePassSignature: + // This message isn't encrypted. + if len(symKeys) != 0 || len(pubKeys) != 0 { + return nil, errors.StructuralError("key material not followed by encrypted message") + } + packets.Unread(p) + return readSignedMessage(packets, nil, keyring) + } + } + + var candidates []Key + var decrypted io.ReadCloser + + // Now that we have the list of encrypted keys we need to decrypt at + // least one of them or, if we cannot, we need to call the prompt + // function so that it can decrypt a key or give us a passphrase. +FindKey: + for { + // See if any of the keys already have a private key available + candidates = candidates[:0] + candidateFingerprints := make(map[string]bool) + + for _, pk := range pubKeys { + if pk.key.PrivateKey == nil { + continue + } + if !pk.key.PrivateKey.Encrypted { + if len(pk.encryptedKey.Key) == 0 { + pk.encryptedKey.Decrypt(pk.key.PrivateKey, config) + } + if len(pk.encryptedKey.Key) == 0 { + continue + } + decrypted, err = se.Decrypt(pk.encryptedKey.CipherFunc, pk.encryptedKey.Key) + if err != nil && err != errors.ErrKeyIncorrect { + return nil, err + } + if decrypted != nil { + md.DecryptedWith = pk.key + break FindKey + } + } else { + fpr := string(pk.key.PublicKey.Fingerprint[:]) + if v := candidateFingerprints[fpr]; v { + continue + } + candidates = append(candidates, pk.key) + candidateFingerprints[fpr] = true + } + } + + if len(candidates) == 0 && len(symKeys) == 0 { + return nil, errors.ErrKeyIncorrect + } + + if prompt == nil { + return nil, errors.ErrKeyIncorrect + } + + passphrase, err := prompt(candidates, len(symKeys) != 0) + if err != nil { + return nil, err + } + + // Try the symmetric passphrase first + if len(symKeys) != 0 && passphrase != nil { + for _, s := range symKeys { + key, cipherFunc, err := s.Decrypt(passphrase) + if err == nil { + decrypted, err = se.Decrypt(cipherFunc, key) + if err != nil && err != errors.ErrKeyIncorrect { + return nil, err + } + if decrypted != nil { + break FindKey + } + } + + } + } + } + + md.decrypted = decrypted + if err := packets.Push(decrypted); err != nil { + return nil, err + } + return readSignedMessage(packets, md, keyring) +} + +// readSignedMessage reads a possibly signed message if mdin is non-zero then +// that structure is updated and returned. Otherwise a fresh MessageDetails is +// used. +func readSignedMessage(packets *packet.Reader, mdin *MessageDetails, keyring KeyRing) (md *MessageDetails, err error) { + if mdin == nil { + mdin = new(MessageDetails) + } + md = mdin + + var p packet.Packet + var h hash.Hash + var wrappedHash hash.Hash +FindLiteralData: + for { + p, err = packets.Next() + if err != nil { + return nil, err + } + switch p := p.(type) { + case *packet.Compressed: + if err := packets.Push(p.Body); err != nil { + return nil, err + } + case *packet.OnePassSignature: + if !p.IsLast { + return nil, errors.UnsupportedError("nested signatures") + } + + h, wrappedHash, err = hashForSignature(p.Hash, p.SigType) + if err != nil { + md = nil + return + } + + md.IsSigned = true + md.SignedByKeyId = p.KeyId + keys := keyring.KeysByIdUsage(p.KeyId, packet.KeyFlagSign) + if len(keys) > 0 { + md.SignedBy = &keys[0] + } + case *packet.LiteralData: + md.LiteralData = p + break FindLiteralData + } + } + + if md.SignedBy != nil { + md.UnverifiedBody = &signatureCheckReader{packets, h, wrappedHash, md} + } else if md.decrypted != nil { + md.UnverifiedBody = checkReader{md} + } else { + md.UnverifiedBody = md.LiteralData.Body + } + + return md, nil +} + +// hashForSignature returns a pair of hashes that can be used to verify a +// signature. The signature may specify that the contents of the signed message +// should be preprocessed (i.e. to normalize line endings). Thus this function +// returns two hashes. The second should be used to hash the message itself and +// performs any needed preprocessing. +func hashForSignature(hashId crypto.Hash, sigType packet.SignatureType) (hash.Hash, hash.Hash, error) { + if !hashId.Available() { + return nil, nil, errors.UnsupportedError("hash not available: " + strconv.Itoa(int(hashId))) + } + h := hashId.New() + + switch sigType { + case packet.SigTypeBinary: + return h, h, nil + case packet.SigTypeText: + return h, NewCanonicalTextHash(h), nil + } + + return nil, nil, errors.UnsupportedError("unsupported signature type: " + strconv.Itoa(int(sigType))) +} + +// checkReader wraps an io.Reader from a LiteralData packet. When it sees EOF +// it closes the ReadCloser from any SymmetricallyEncrypted packet to trigger +// MDC checks. +type checkReader struct { + md *MessageDetails +} + +func (cr checkReader) Read(buf []byte) (n int, err error) { + n, err = cr.md.LiteralData.Body.Read(buf) + if err == io.EOF { + mdcErr := cr.md.decrypted.Close() + if mdcErr != nil { + err = mdcErr + } + } + return +} + +// signatureCheckReader wraps an io.Reader from a LiteralData packet and hashes +// the data as it is read. When it sees an EOF from the underlying io.Reader +// it parses and checks a trailing Signature packet and triggers any MDC checks. +type signatureCheckReader struct { + packets *packet.Reader + h, wrappedHash hash.Hash + md *MessageDetails +} + +func (scr *signatureCheckReader) Read(buf []byte) (n int, err error) { + n, err = scr.md.LiteralData.Body.Read(buf) + scr.wrappedHash.Write(buf[:n]) + if err == io.EOF { + var p packet.Packet + p, scr.md.SignatureError = scr.packets.Next() + if scr.md.SignatureError != nil { + return + } + + var ok bool + if scr.md.Signature, ok = p.(*packet.Signature); ok { + scr.md.SignatureError = scr.md.SignedBy.PublicKey.VerifySignature(scr.h, scr.md.Signature) + } else if scr.md.SignatureV3, ok = p.(*packet.SignatureV3); ok { + scr.md.SignatureError = scr.md.SignedBy.PublicKey.VerifySignatureV3(scr.h, scr.md.SignatureV3) + } else { + scr.md.SignatureError = errors.StructuralError("LiteralData not followed by Signature") + return + } + + // The SymmetricallyEncrypted packet, if any, might have an + // unsigned hash of its own. In order to check this we need to + // close that Reader. + if scr.md.decrypted != nil { + mdcErr := scr.md.decrypted.Close() + if mdcErr != nil { + err = mdcErr + } + } + } + return +} + +// CheckDetachedSignature takes a signed file and a detached signature and +// returns the signer if the signature is valid. If the signer isn't known, +// ErrUnknownIssuer is returned. +func CheckDetachedSignature(keyring KeyRing, signed, signature io.Reader) (signer *Entity, err error) { + var issuerKeyId uint64 + var hashFunc crypto.Hash + var sigType packet.SignatureType + var keys []Key + var p packet.Packet + + packets := packet.NewReader(signature) + for { + p, err = packets.Next() + if err == io.EOF { + return nil, errors.ErrUnknownIssuer + } + if err != nil { + return nil, err + } + + switch sig := p.(type) { + case *packet.Signature: + if sig.IssuerKeyId == nil { + return nil, errors.StructuralError("signature doesn't have an issuer") + } + issuerKeyId = *sig.IssuerKeyId + hashFunc = sig.Hash + sigType = sig.SigType + case *packet.SignatureV3: + issuerKeyId = sig.IssuerKeyId + hashFunc = sig.Hash + sigType = sig.SigType + default: + return nil, errors.StructuralError("non signature packet found") + } + + keys = keyring.KeysByIdUsage(issuerKeyId, packet.KeyFlagSign) + if len(keys) > 0 { + break + } + } + + if len(keys) == 0 { + panic("unreachable") + } + + h, wrappedHash, err := hashForSignature(hashFunc, sigType) + if err != nil { + return nil, err + } + + if _, err := io.Copy(wrappedHash, signed); err != nil && err != io.EOF { + return nil, err + } + + for _, key := range keys { + switch sig := p.(type) { + case *packet.Signature: + err = key.PublicKey.VerifySignature(h, sig) + case *packet.SignatureV3: + err = key.PublicKey.VerifySignatureV3(h, sig) + default: + panic("unreachable") + } + + if err == nil { + return key.Entity, nil + } + } + + return nil, err +} + +// CheckArmoredDetachedSignature performs the same actions as +// CheckDetachedSignature but expects the signature to be armored. +func CheckArmoredDetachedSignature(keyring KeyRing, signed, signature io.Reader) (signer *Entity, err error) { + body, err := readArmored(signature, SignatureType) + if err != nil { + return + } + + return CheckDetachedSignature(keyring, signed, body) +} diff --git a/vendor/golang.org/x/crypto/openpgp/s2k/s2k.go b/vendor/golang.org/x/crypto/openpgp/s2k/s2k.go new file mode 100644 index 00000000..4b9a44ca --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/s2k/s2k.go @@ -0,0 +1,273 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package s2k implements the various OpenPGP string-to-key transforms as +// specified in RFC 4800 section 3.7.1. +package s2k // import "golang.org/x/crypto/openpgp/s2k" + +import ( + "crypto" + "hash" + "io" + "strconv" + + "golang.org/x/crypto/openpgp/errors" +) + +// Config collects configuration parameters for s2k key-stretching +// transformatioms. A nil *Config is valid and results in all default +// values. Currently, Config is used only by the Serialize function in +// this package. +type Config struct { + // Hash is the default hash function to be used. If + // nil, SHA1 is used. + Hash crypto.Hash + // S2KCount is only used for symmetric encryption. It + // determines the strength of the passphrase stretching when + // the said passphrase is hashed to produce a key. S2KCount + // should be between 1024 and 65011712, inclusive. If Config + // is nil or S2KCount is 0, the value 65536 used. Not all + // values in the above range can be represented. S2KCount will + // be rounded up to the next representable value if it cannot + // be encoded exactly. When set, it is strongly encrouraged to + // use a value that is at least 65536. See RFC 4880 Section + // 3.7.1.3. + S2KCount int +} + +func (c *Config) hash() crypto.Hash { + if c == nil || uint(c.Hash) == 0 { + // SHA1 is the historical default in this package. + return crypto.SHA1 + } + + return c.Hash +} + +func (c *Config) encodedCount() uint8 { + if c == nil || c.S2KCount == 0 { + return 96 // The common case. Correspoding to 65536 + } + + i := c.S2KCount + switch { + // Behave like GPG. Should we make 65536 the lowest value used? + case i < 1024: + i = 1024 + case i > 65011712: + i = 65011712 + } + + return encodeCount(i) +} + +// encodeCount converts an iterative "count" in the range 1024 to +// 65011712, inclusive, to an encoded count. The return value is the +// octet that is actually stored in the GPG file. encodeCount panics +// if i is not in the above range (encodedCount above takes care to +// pass i in the correct range). See RFC 4880 Section 3.7.7.1. +func encodeCount(i int) uint8 { + if i < 1024 || i > 65011712 { + panic("count arg i outside the required range") + } + + for encoded := 0; encoded < 256; encoded++ { + count := decodeCount(uint8(encoded)) + if count >= i { + return uint8(encoded) + } + } + + return 255 +} + +// decodeCount returns the s2k mode 3 iterative "count" corresponding to +// the encoded octet c. +func decodeCount(c uint8) int { + return (16 + int(c&15)) << (uint32(c>>4) + 6) +} + +// Simple writes to out the result of computing the Simple S2K function (RFC +// 4880, section 3.7.1.1) using the given hash and input passphrase. +func Simple(out []byte, h hash.Hash, in []byte) { + Salted(out, h, in, nil) +} + +var zero [1]byte + +// Salted writes to out the result of computing the Salted S2K function (RFC +// 4880, section 3.7.1.2) using the given hash, input passphrase and salt. +func Salted(out []byte, h hash.Hash, in []byte, salt []byte) { + done := 0 + var digest []byte + + for i := 0; done < len(out); i++ { + h.Reset() + for j := 0; j < i; j++ { + h.Write(zero[:]) + } + h.Write(salt) + h.Write(in) + digest = h.Sum(digest[:0]) + n := copy(out[done:], digest) + done += n + } +} + +// Iterated writes to out the result of computing the Iterated and Salted S2K +// function (RFC 4880, section 3.7.1.3) using the given hash, input passphrase, +// salt and iteration count. +func Iterated(out []byte, h hash.Hash, in []byte, salt []byte, count int) { + combined := make([]byte, len(in)+len(salt)) + copy(combined, salt) + copy(combined[len(salt):], in) + + if count < len(combined) { + count = len(combined) + } + + done := 0 + var digest []byte + for i := 0; done < len(out); i++ { + h.Reset() + for j := 0; j < i; j++ { + h.Write(zero[:]) + } + written := 0 + for written < count { + if written+len(combined) > count { + todo := count - written + h.Write(combined[:todo]) + written = count + } else { + h.Write(combined) + written += len(combined) + } + } + digest = h.Sum(digest[:0]) + n := copy(out[done:], digest) + done += n + } +} + +// Parse reads a binary specification for a string-to-key transformation from r +// and returns a function which performs that transform. +func Parse(r io.Reader) (f func(out, in []byte), err error) { + var buf [9]byte + + _, err = io.ReadFull(r, buf[:2]) + if err != nil { + return + } + + hash, ok := HashIdToHash(buf[1]) + if !ok { + return nil, errors.UnsupportedError("hash for S2K function: " + strconv.Itoa(int(buf[1]))) + } + if !hash.Available() { + return nil, errors.UnsupportedError("hash not available: " + strconv.Itoa(int(hash))) + } + h := hash.New() + + switch buf[0] { + case 0: + f := func(out, in []byte) { + Simple(out, h, in) + } + return f, nil + case 1: + _, err = io.ReadFull(r, buf[:8]) + if err != nil { + return + } + f := func(out, in []byte) { + Salted(out, h, in, buf[:8]) + } + return f, nil + case 3: + _, err = io.ReadFull(r, buf[:9]) + if err != nil { + return + } + count := decodeCount(buf[8]) + f := func(out, in []byte) { + Iterated(out, h, in, buf[:8], count) + } + return f, nil + } + + return nil, errors.UnsupportedError("S2K function") +} + +// Serialize salts and stretches the given passphrase and writes the +// resulting key into key. It also serializes an S2K descriptor to +// w. The key stretching can be configured with c, which may be +// nil. In that case, sensible defaults will be used. +func Serialize(w io.Writer, key []byte, rand io.Reader, passphrase []byte, c *Config) error { + var buf [11]byte + buf[0] = 3 /* iterated and salted */ + buf[1], _ = HashToHashId(c.hash()) + salt := buf[2:10] + if _, err := io.ReadFull(rand, salt); err != nil { + return err + } + encodedCount := c.encodedCount() + count := decodeCount(encodedCount) + buf[10] = encodedCount + if _, err := w.Write(buf[:]); err != nil { + return err + } + + Iterated(key, c.hash().New(), passphrase, salt, count) + return nil +} + +// hashToHashIdMapping contains pairs relating OpenPGP's hash identifier with +// Go's crypto.Hash type. See RFC 4880, section 9.4. +var hashToHashIdMapping = []struct { + id byte + hash crypto.Hash + name string +}{ + {1, crypto.MD5, "MD5"}, + {2, crypto.SHA1, "SHA1"}, + {3, crypto.RIPEMD160, "RIPEMD160"}, + {8, crypto.SHA256, "SHA256"}, + {9, crypto.SHA384, "SHA384"}, + {10, crypto.SHA512, "SHA512"}, + {11, crypto.SHA224, "SHA224"}, +} + +// HashIdToHash returns a crypto.Hash which corresponds to the given OpenPGP +// hash id. +func HashIdToHash(id byte) (h crypto.Hash, ok bool) { + for _, m := range hashToHashIdMapping { + if m.id == id { + return m.hash, true + } + } + return 0, false +} + +// HashIdToString returns the name of the hash function corresponding to the +// given OpenPGP hash id. +func HashIdToString(id byte) (name string, ok bool) { + for _, m := range hashToHashIdMapping { + if m.id == id { + return m.name, true + } + } + + return "", false +} + +// HashIdToHash returns an OpenPGP hash id which corresponds the given Hash. +func HashToHashId(h crypto.Hash) (id byte, ok bool) { + for _, m := range hashToHashIdMapping { + if m.hash == h { + return m.id, true + } + } + return 0, false +} diff --git a/vendor/golang.org/x/crypto/openpgp/write.go b/vendor/golang.org/x/crypto/openpgp/write.go new file mode 100644 index 00000000..65a304cc --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/write.go @@ -0,0 +1,378 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package openpgp + +import ( + "crypto" + "hash" + "io" + "strconv" + "time" + + "golang.org/x/crypto/openpgp/armor" + "golang.org/x/crypto/openpgp/errors" + "golang.org/x/crypto/openpgp/packet" + "golang.org/x/crypto/openpgp/s2k" +) + +// DetachSign signs message with the private key from signer (which must +// already have been decrypted) and writes the signature to w. +// If config is nil, sensible defaults will be used. +func DetachSign(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) error { + return detachSign(w, signer, message, packet.SigTypeBinary, config) +} + +// ArmoredDetachSign signs message with the private key from signer (which +// must already have been decrypted) and writes an armored signature to w. +// If config is nil, sensible defaults will be used. +func ArmoredDetachSign(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) (err error) { + return armoredDetachSign(w, signer, message, packet.SigTypeBinary, config) +} + +// DetachSignText signs message (after canonicalising the line endings) with +// the private key from signer (which must already have been decrypted) and +// writes the signature to w. +// If config is nil, sensible defaults will be used. +func DetachSignText(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) error { + return detachSign(w, signer, message, packet.SigTypeText, config) +} + +// ArmoredDetachSignText signs message (after canonicalising the line endings) +// with the private key from signer (which must already have been decrypted) +// and writes an armored signature to w. +// If config is nil, sensible defaults will be used. +func ArmoredDetachSignText(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) error { + return armoredDetachSign(w, signer, message, packet.SigTypeText, config) +} + +func armoredDetachSign(w io.Writer, signer *Entity, message io.Reader, sigType packet.SignatureType, config *packet.Config) (err error) { + out, err := armor.Encode(w, SignatureType, nil) + if err != nil { + return + } + err = detachSign(out, signer, message, sigType, config) + if err != nil { + return + } + return out.Close() +} + +func detachSign(w io.Writer, signer *Entity, message io.Reader, sigType packet.SignatureType, config *packet.Config) (err error) { + if signer.PrivateKey == nil { + return errors.InvalidArgumentError("signing key doesn't have a private key") + } + if signer.PrivateKey.Encrypted { + return errors.InvalidArgumentError("signing key is encrypted") + } + + sig := new(packet.Signature) + sig.SigType = sigType + sig.PubKeyAlgo = signer.PrivateKey.PubKeyAlgo + sig.Hash = config.Hash() + sig.CreationTime = config.Now() + sig.IssuerKeyId = &signer.PrivateKey.KeyId + + h, wrappedHash, err := hashForSignature(sig.Hash, sig.SigType) + if err != nil { + return + } + io.Copy(wrappedHash, message) + + err = sig.Sign(h, signer.PrivateKey, config) + if err != nil { + return + } + + return sig.Serialize(w) +} + +// FileHints contains metadata about encrypted files. This metadata is, itself, +// encrypted. +type FileHints struct { + // IsBinary can be set to hint that the contents are binary data. + IsBinary bool + // FileName hints at the name of the file that should be written. It's + // truncated to 255 bytes if longer. It may be empty to suggest that the + // file should not be written to disk. It may be equal to "_CONSOLE" to + // suggest the data should not be written to disk. + FileName string + // ModTime contains the modification time of the file, or the zero time if not applicable. + ModTime time.Time +} + +// SymmetricallyEncrypt acts like gpg -c: it encrypts a file with a passphrase. +// The resulting WriteCloser must be closed after the contents of the file have +// been written. +// If config is nil, sensible defaults will be used. +func SymmetricallyEncrypt(ciphertext io.Writer, passphrase []byte, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) { + if hints == nil { + hints = &FileHints{} + } + + key, err := packet.SerializeSymmetricKeyEncrypted(ciphertext, passphrase, config) + if err != nil { + return + } + w, err := packet.SerializeSymmetricallyEncrypted(ciphertext, config.Cipher(), key, config) + if err != nil { + return + } + + literaldata := w + if algo := config.Compression(); algo != packet.CompressionNone { + var compConfig *packet.CompressionConfig + if config != nil { + compConfig = config.CompressionConfig + } + literaldata, err = packet.SerializeCompressed(w, algo, compConfig) + if err != nil { + return + } + } + + var epochSeconds uint32 + if !hints.ModTime.IsZero() { + epochSeconds = uint32(hints.ModTime.Unix()) + } + return packet.SerializeLiteral(literaldata, hints.IsBinary, hints.FileName, epochSeconds) +} + +// intersectPreferences mutates and returns a prefix of a that contains only +// the values in the intersection of a and b. The order of a is preserved. +func intersectPreferences(a []uint8, b []uint8) (intersection []uint8) { + var j int + for _, v := range a { + for _, v2 := range b { + if v == v2 { + a[j] = v + j++ + break + } + } + } + + return a[:j] +} + +func hashToHashId(h crypto.Hash) uint8 { + v, ok := s2k.HashToHashId(h) + if !ok { + panic("tried to convert unknown hash") + } + return v +} + +// Encrypt encrypts a message to a number of recipients and, optionally, signs +// it. hints contains optional information, that is also encrypted, that aids +// the recipients in processing the message. The resulting WriteCloser must +// be closed after the contents of the file have been written. +// If config is nil, sensible defaults will be used. +func Encrypt(ciphertext io.Writer, to []*Entity, signed *Entity, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) { + var signer *packet.PrivateKey + if signed != nil { + signKey, ok := signed.signingKey(config.Now()) + if !ok { + return nil, errors.InvalidArgumentError("no valid signing keys") + } + signer = signKey.PrivateKey + if signer == nil { + return nil, errors.InvalidArgumentError("no private key in signing key") + } + if signer.Encrypted { + return nil, errors.InvalidArgumentError("signing key must be decrypted") + } + } + + // These are the possible ciphers that we'll use for the message. + candidateCiphers := []uint8{ + uint8(packet.CipherAES128), + uint8(packet.CipherAES256), + uint8(packet.CipherCAST5), + } + // These are the possible hash functions that we'll use for the signature. + candidateHashes := []uint8{ + hashToHashId(crypto.SHA256), + hashToHashId(crypto.SHA512), + hashToHashId(crypto.SHA1), + hashToHashId(crypto.RIPEMD160), + } + // In the event that a recipient doesn't specify any supported ciphers + // or hash functions, these are the ones that we assume that every + // implementation supports. + defaultCiphers := candidateCiphers[len(candidateCiphers)-1:] + defaultHashes := candidateHashes[len(candidateHashes)-1:] + + encryptKeys := make([]Key, len(to)) + for i := range to { + var ok bool + encryptKeys[i], ok = to[i].encryptionKey(config.Now()) + if !ok { + return nil, errors.InvalidArgumentError("cannot encrypt a message to key id " + strconv.FormatUint(to[i].PrimaryKey.KeyId, 16) + " because it has no encryption keys") + } + + sig := to[i].primaryIdentity().SelfSignature + + preferredSymmetric := sig.PreferredSymmetric + if len(preferredSymmetric) == 0 { + preferredSymmetric = defaultCiphers + } + preferredHashes := sig.PreferredHash + if len(preferredHashes) == 0 { + preferredHashes = defaultHashes + } + candidateCiphers = intersectPreferences(candidateCiphers, preferredSymmetric) + candidateHashes = intersectPreferences(candidateHashes, preferredHashes) + } + + if len(candidateCiphers) == 0 || len(candidateHashes) == 0 { + return nil, errors.InvalidArgumentError("cannot encrypt because recipient set shares no common algorithms") + } + + cipher := packet.CipherFunction(candidateCiphers[0]) + // If the cipher specified by config is a candidate, we'll use that. + configuredCipher := config.Cipher() + for _, c := range candidateCiphers { + cipherFunc := packet.CipherFunction(c) + if cipherFunc == configuredCipher { + cipher = cipherFunc + break + } + } + + var hash crypto.Hash + for _, hashId := range candidateHashes { + if h, ok := s2k.HashIdToHash(hashId); ok && h.Available() { + hash = h + break + } + } + + // If the hash specified by config is a candidate, we'll use that. + if configuredHash := config.Hash(); configuredHash.Available() { + for _, hashId := range candidateHashes { + if h, ok := s2k.HashIdToHash(hashId); ok && h == configuredHash { + hash = h + break + } + } + } + + if hash == 0 { + hashId := candidateHashes[0] + name, ok := s2k.HashIdToString(hashId) + if !ok { + name = "#" + strconv.Itoa(int(hashId)) + } + return nil, errors.InvalidArgumentError("cannot encrypt because no candidate hash functions are compiled in. (Wanted " + name + " in this case.)") + } + + symKey := make([]byte, cipher.KeySize()) + if _, err := io.ReadFull(config.Random(), symKey); err != nil { + return nil, err + } + + for _, key := range encryptKeys { + if err := packet.SerializeEncryptedKey(ciphertext, key.PublicKey, cipher, symKey, config); err != nil { + return nil, err + } + } + + encryptedData, err := packet.SerializeSymmetricallyEncrypted(ciphertext, cipher, symKey, config) + if err != nil { + return + } + + if signer != nil { + ops := &packet.OnePassSignature{ + SigType: packet.SigTypeBinary, + Hash: hash, + PubKeyAlgo: signer.PubKeyAlgo, + KeyId: signer.KeyId, + IsLast: true, + } + if err := ops.Serialize(encryptedData); err != nil { + return nil, err + } + } + + if hints == nil { + hints = &FileHints{} + } + + w := encryptedData + if signer != nil { + // If we need to write a signature packet after the literal + // data then we need to stop literalData from closing + // encryptedData. + w = noOpCloser{encryptedData} + + } + var epochSeconds uint32 + if !hints.ModTime.IsZero() { + epochSeconds = uint32(hints.ModTime.Unix()) + } + literalData, err := packet.SerializeLiteral(w, hints.IsBinary, hints.FileName, epochSeconds) + if err != nil { + return nil, err + } + + if signer != nil { + return signatureWriter{encryptedData, literalData, hash, hash.New(), signer, config}, nil + } + return literalData, nil +} + +// signatureWriter hashes the contents of a message while passing it along to +// literalData. When closed, it closes literalData, writes a signature packet +// to encryptedData and then also closes encryptedData. +type signatureWriter struct { + encryptedData io.WriteCloser + literalData io.WriteCloser + hashType crypto.Hash + h hash.Hash + signer *packet.PrivateKey + config *packet.Config +} + +func (s signatureWriter) Write(data []byte) (int, error) { + s.h.Write(data) + return s.literalData.Write(data) +} + +func (s signatureWriter) Close() error { + sig := &packet.Signature{ + SigType: packet.SigTypeBinary, + PubKeyAlgo: s.signer.PubKeyAlgo, + Hash: s.hashType, + CreationTime: s.config.Now(), + IssuerKeyId: &s.signer.KeyId, + } + + if err := sig.Sign(s.h, s.signer, s.config); err != nil { + return err + } + if err := s.literalData.Close(); err != nil { + return err + } + if err := sig.Serialize(s.encryptedData); err != nil { + return err + } + return s.encryptedData.Close() +} + +// noOpCloser is like an ioutil.NopCloser, but for an io.Writer. +// TODO: we have two of these in OpenPGP packages alone. This probably needs +// to be promoted somewhere more common. +type noOpCloser struct { + w io.Writer +} + +func (c noOpCloser) Write(data []byte) (n int, err error) { + return c.w.Write(data) +} + +func (c noOpCloser) Close() error { + return nil +} diff --git a/vendor/golang.org/x/text/internal/gen/LICENSE b/vendor/golang.org/x/text/internal/gen/LICENSE new file mode 100644 index 00000000..6a66aea5 --- /dev/null +++ b/vendor/golang.org/x/text/internal/gen/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/text/internal/gen/bitfield/bitfield.go b/vendor/golang.org/x/text/internal/gen/bitfield/bitfield.go new file mode 100644 index 00000000..a8d0a48d --- /dev/null +++ b/vendor/golang.org/x/text/internal/gen/bitfield/bitfield.go @@ -0,0 +1,226 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package bitfield converts annotated structs into integer values. +// +// Any field that is marked with a bitfield tag is compacted. The tag value has +// two parts. The part before the comma determines the method name for a +// generated type. If left blank the name of the field is used. +// The part after the comma determines the number of bits to use for the +// representation. +package bitfield + +import ( + "bytes" + "fmt" + "io" + "reflect" + "strconv" + "strings" +) + +// Config determines settings for packing and generation. If a Config is used, +// the same Config should be used for packing and generation. +type Config struct { + // NumBits fixes the maximum allowed bits for the integer representation. + // If NumBits is not 8, 16, 32, or 64, the actual underlying integer size + // will be the next largest available. + NumBits uint + + // If Package is set, code generation will write a package clause. + Package string + + // TypeName is the name for the generated type. By default it is the name + // of the type of the value passed to Gen. + TypeName string +} + +var nullConfig = &Config{} + +// Pack packs annotated bit ranges of struct x in an integer. +// +// Only fields that have a "bitfield" tag are compacted. +func Pack(x interface{}, c *Config) (packed uint64, err error) { + packed, _, err = pack(x, c) + return +} + +func pack(x interface{}, c *Config) (packed uint64, nBit uint, err error) { + if c == nil { + c = nullConfig + } + nBits := c.NumBits + v := reflect.ValueOf(x) + v = reflect.Indirect(v) + t := v.Type() + pos := 64 - nBits + if nBits == 0 { + pos = 0 + } + for i := 0; i < v.NumField(); i++ { + v := v.Field(i) + field := t.Field(i) + f, err := parseField(field) + + if err != nil { + return 0, 0, err + } + if f.nBits == 0 { + continue + } + value := uint64(0) + switch v.Kind() { + case reflect.Bool: + if v.Bool() { + value = 1 + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + value = v.Uint() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + x := v.Int() + if x < 0 { + return 0, 0, fmt.Errorf("bitfield: negative value for field %q not allowed", field.Name) + } + value = uint64(x) + } + if value > (1< 64 { + return 0, 0, fmt.Errorf("bitfield: no more bits left for field %q", field.Name) + } + packed |= value << shift + } + if nBits == 0 { + nBits = posToBits(pos) + packed >>= (64 - nBits) + } + return packed, nBits, nil +} + +type field struct { + name string + value uint64 + nBits uint +} + +// parseField parses a tag of the form [][:][,[..]] +func parseField(field reflect.StructField) (f field, err error) { + s, ok := field.Tag.Lookup("bitfield") + if !ok { + return f, nil + } + switch field.Type.Kind() { + case reflect.Bool: + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + default: + return f, fmt.Errorf("bitfield: field %q is not an integer or bool type", field.Name) + } + bits := s + f.name = "" + + if i := strings.IndexByte(s, ','); i >= 0 { + bits = s[:i] + f.name = s[i+1:] + } + if bits != "" { + nBits, err := strconv.ParseUint(bits, 10, 8) + if err != nil { + return f, fmt.Errorf("bitfield: invalid bit size for field %q: %v", field.Name, err) + } + f.nBits = uint(nBits) + } + if f.nBits == 0 { + if field.Type.Kind() == reflect.Bool { + f.nBits = 1 + } else { + f.nBits = uint(field.Type.Bits()) + } + } + if f.name == "" { + f.name = field.Name + } + return f, err +} + +func posToBits(pos uint) (bits uint) { + switch { + case pos <= 8: + bits = 8 + case pos <= 16: + bits = 16 + case pos <= 32: + bits = 32 + case pos <= 64: + bits = 64 + default: + panic("unreachable") + } + return bits +} + +// Gen generates code for unpacking integers created with Pack. +func Gen(w io.Writer, x interface{}, c *Config) error { + if c == nil { + c = nullConfig + } + _, nBits, err := pack(x, c) + if err != nil { + return err + } + + t := reflect.TypeOf(x) + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + if c.TypeName == "" { + c.TypeName = t.Name() + } + firstChar := []rune(c.TypeName)[0] + + buf := &bytes.Buffer{} + + print := func(w io.Writer, format string, args ...interface{}) { + if _, e := fmt.Fprintf(w, format+"\n", args...); e != nil && err == nil { + err = fmt.Errorf("bitfield: write failed: %v", err) + } + } + + pos := uint(0) + for i := 0; i < t.NumField(); i++ { + field := t.Field(i) + f, _ := parseField(field) + if f.nBits == 0 { + continue + } + shift := nBits - pos - f.nBits + pos += f.nBits + + retType := field.Type.Name() + print(buf, "\nfunc (%c %s) %s() %s {", firstChar, c.TypeName, f.name, retType) + if field.Type.Kind() == reflect.Bool { + print(buf, "\tconst bit = 1 << %d", shift) + print(buf, "\treturn %c&bit == bit", firstChar) + } else { + print(buf, "\treturn %s((%c >> %d) & %#x)", retType, firstChar, shift, (1< 0 && (s[0] == '\t' || s[0] == ' '); s = s[1:] { + sep += s[:1] + } + + strings.NewReplacer(sep, "\n// ", "\n", "\n// ").WriteString(w, s) + + w.printf("\n") +} + +func (w *CodeWriter) writeSizeInfo(size int) { + w.printf("// Size: %d bytes\n", size) +} + +// WriteConst writes a constant of the given name and value. +func (w *CodeWriter) WriteConst(name string, x interface{}) { + w.insertSep() + v := reflect.ValueOf(x) + + switch v.Type().Kind() { + case reflect.String: + w.printf("const %s %s = ", name, typeName(x)) + w.WriteString(v.String()) + w.printf("\n") + default: + w.printf("const %s = %#v\n", name, x) + } +} + +// WriteVar writes a variable of the given name and value. +func (w *CodeWriter) WriteVar(name string, x interface{}) { + w.insertSep() + v := reflect.ValueOf(x) + oldSize := w.Size + sz := int(v.Type().Size()) + w.Size += sz + + switch v.Type().Kind() { + case reflect.String: + w.printf("var %s %s = ", name, typeName(x)) + w.WriteString(v.String()) + case reflect.Struct: + w.gob.Encode(x) + fallthrough + case reflect.Slice, reflect.Array: + w.printf("var %s = ", name) + w.writeValue(v) + w.writeSizeInfo(w.Size - oldSize) + default: + w.printf("var %s %s = ", name, typeName(x)) + w.gob.Encode(x) + w.writeValue(v) + w.writeSizeInfo(w.Size - oldSize) + } + w.printf("\n") +} + +func (w *CodeWriter) writeValue(v reflect.Value) { + x := v.Interface() + switch v.Kind() { + case reflect.String: + w.WriteString(v.String()) + case reflect.Array: + // Don't double count: callers of WriteArray count on the size being + // added, so we need to discount it here. + w.Size -= int(v.Type().Size()) + w.writeSlice(x, true) + case reflect.Slice: + w.writeSlice(x, false) + case reflect.Struct: + w.printf("%s{\n", typeName(v.Interface())) + t := v.Type() + for i := 0; i < v.NumField(); i++ { + w.printf("%s: ", t.Field(i).Name) + w.writeValue(v.Field(i)) + w.printf(",\n") + } + w.printf("}") + default: + w.printf("%#v", x) + } +} + +// WriteString writes a string literal. +func (w *CodeWriter) WriteString(s string) { + io.WriteString(w.Hash, s) // content hash + w.Size += len(s) + + const maxInline = 40 + if len(s) <= maxInline { + w.printf("%q", s) + return + } + + // We will render the string as a multi-line string. + const maxWidth = 80 - 4 - len(`"`) - len(`" +`) + + // When starting on its own line, go fmt indents line 2+ an extra level. + n, max := maxWidth, maxWidth-4 + + // As per https://golang.org/issue/18078, the compiler has trouble + // compiling the concatenation of many strings, s0 + s1 + s2 + ... + sN, + // for large N. We insert redundant, explicit parentheses to work around + // that, lowering the N at any given step: (s0 + s1 + ... + s63) + (s64 + + // ... + s127) + etc + (etc + ... + sN). + explicitParens, extraComment := len(s) > 128*1024, "" + if explicitParens { + w.printf(`(`) + extraComment = "; the redundant, explicit parens are for https://golang.org/issue/18078" + } + + // Print "" +\n, if a string does not start on its own line. + b := w.buf.Bytes() + if p := len(bytes.TrimRight(b, " \t")); p > 0 && b[p-1] != '\n' { + w.printf("\"\" + // Size: %d bytes%s\n", len(s), extraComment) + n, max = maxWidth, maxWidth + } + + w.printf(`"`) + + for sz, p, nLines := 0, 0, 0; p < len(s); { + var r rune + r, sz = utf8.DecodeRuneInString(s[p:]) + out := s[p : p+sz] + chars := 1 + if !unicode.IsPrint(r) || r == utf8.RuneError || r == '"' { + switch sz { + case 1: + out = fmt.Sprintf("\\x%02x", s[p]) + case 2, 3: + out = fmt.Sprintf("\\u%04x", r) + case 4: + out = fmt.Sprintf("\\U%08x", r) + } + chars = len(out) + } else if r == '\\' { + out = "\\" + string(r) + chars = 2 + } + if n -= chars; n < 0 { + nLines++ + if explicitParens && nLines&63 == 63 { + w.printf("\") + (\"") + } + w.printf("\" +\n\"") + n = max - len(out) + } + w.printf("%s", out) + p += sz + } + w.printf(`"`) + if explicitParens { + w.printf(`)`) + } +} + +// WriteSlice writes a slice value. +func (w *CodeWriter) WriteSlice(x interface{}) { + w.writeSlice(x, false) +} + +// WriteArray writes an array value. +func (w *CodeWriter) WriteArray(x interface{}) { + w.writeSlice(x, true) +} + +func (w *CodeWriter) writeSlice(x interface{}, isArray bool) { + v := reflect.ValueOf(x) + w.gob.Encode(v.Len()) + w.Size += v.Len() * int(v.Type().Elem().Size()) + name := typeName(x) + if isArray { + name = fmt.Sprintf("[%d]%s", v.Len(), name[strings.Index(name, "]")+1:]) + } + if isArray { + w.printf("%s{\n", name) + } else { + w.printf("%s{ // %d elements\n", name, v.Len()) + } + + switch kind := v.Type().Elem().Kind(); kind { + case reflect.String: + for _, s := range x.([]string) { + w.WriteString(s) + w.printf(",\n") + } + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + // nLine and nBlock are the number of elements per line and block. + nLine, nBlock, format := 8, 64, "%d," + switch kind { + case reflect.Uint8: + format = "%#02x," + case reflect.Uint16: + format = "%#04x," + case reflect.Uint32: + nLine, nBlock, format = 4, 32, "%#08x," + case reflect.Uint, reflect.Uint64: + nLine, nBlock, format = 4, 32, "%#016x," + case reflect.Int8: + nLine = 16 + } + n := nLine + for i := 0; i < v.Len(); i++ { + if i%nBlock == 0 && v.Len() > nBlock { + w.printf("// Entry %X - %X\n", i, i+nBlock-1) + } + x := v.Index(i).Interface() + w.gob.Encode(x) + w.printf(format, x) + if n--; n == 0 { + n = nLine + w.printf("\n") + } + } + w.printf("\n") + case reflect.Struct: + zero := reflect.Zero(v.Type().Elem()).Interface() + for i := 0; i < v.Len(); i++ { + x := v.Index(i).Interface() + w.gob.EncodeValue(v) + if !reflect.DeepEqual(zero, x) { + line := fmt.Sprintf("%#v,\n", x) + line = line[strings.IndexByte(line, '{'):] + w.printf("%d: ", i) + w.printf(line) + } + } + case reflect.Array: + for i := 0; i < v.Len(); i++ { + w.printf("%d: %#v,\n", i, v.Index(i).Interface()) + } + default: + panic("gen: slice elem type not supported") + } + w.printf("}") +} + +// WriteType writes a definition of the type of the given value and returns the +// type name. +func (w *CodeWriter) WriteType(x interface{}) string { + t := reflect.TypeOf(x) + w.printf("type %s struct {\n", t.Name()) + for i := 0; i < t.NumField(); i++ { + w.printf("\t%s %s\n", t.Field(i).Name, t.Field(i).Type) + } + w.printf("}\n") + return t.Name() +} + +// typeName returns the name of the go type of x. +func typeName(x interface{}) string { + t := reflect.ValueOf(x).Type() + return strings.Replace(fmt.Sprint(t), "main.", "", 1) +} diff --git a/vendor/golang.org/x/text/internal/gen/gen.go b/vendor/golang.org/x/text/internal/gen/gen.go new file mode 100644 index 00000000..4c3f7606 --- /dev/null +++ b/vendor/golang.org/x/text/internal/gen/gen.go @@ -0,0 +1,333 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package gen contains common code for the various code generation tools in the +// text repository. Its usage ensures consistency between tools. +// +// This package defines command line flags that are common to most generation +// tools. The flags allow for specifying specific Unicode and CLDR versions +// in the public Unicode data repository (http://www.unicode.org/Public). +// +// A local Unicode data mirror can be set through the flag -local or the +// environment variable UNICODE_DIR. The former takes precedence. The local +// directory should follow the same structure as the public repository. +// +// IANA data can also optionally be mirrored by putting it in the iana directory +// rooted at the top of the local mirror. Beware, though, that IANA data is not +// versioned. So it is up to the developer to use the right version. +package gen // import "golang.org/x/text/internal/gen" + +import ( + "bytes" + "flag" + "fmt" + "go/build" + "go/format" + "io" + "io/ioutil" + "log" + "net/http" + "os" + "path" + "path/filepath" + "strings" + "sync" + "unicode" + + "golang.org/x/text/unicode/cldr" +) + +var ( + url = flag.String("url", + "http://www.unicode.org/Public", + "URL of Unicode database directory") + iana = flag.String("iana", + "http://www.iana.org", + "URL of the IANA repository") + unicodeVersion = flag.String("unicode", + getEnv("UNICODE_VERSION", unicode.Version), + "unicode version to use") + cldrVersion = flag.String("cldr", + getEnv("CLDR_VERSION", cldr.Version), + "cldr version to use") +) + +func getEnv(name, def string) string { + if v := os.Getenv(name); v != "" { + return v + } + return def +} + +// Init performs common initialization for a gen command. It parses the flags +// and sets up the standard logging parameters. +func Init() { + log.SetPrefix("") + log.SetFlags(log.Lshortfile) + flag.Parse() +} + +const header = `// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +` + +// UnicodeVersion reports the requested Unicode version. +func UnicodeVersion() string { + return *unicodeVersion +} + +// CLDRVersion reports the requested CLDR version. +func CLDRVersion() string { + return *cldrVersion +} + +var tags = []struct{ version, buildTags string }{ + {"10.0.0", "go1.10"}, + {"", "!go1.10"}, +} + +// buildTags reports the build tags used for the current Unicode version. +func buildTags() string { + v := UnicodeVersion() + for _, x := range tags { + // We should do a numeric comparison, but including the collate package + // would create an import cycle. We approximate it by assuming that + // longer version strings are later. + if len(x.version) <= len(v) { + return x.buildTags + } + if len(x.version) == len(v) && x.version <= v { + return x.buildTags + } + } + return tags[0].buildTags +} + +// IsLocal reports whether data files are available locally. +func IsLocal() bool { + dir, err := localReadmeFile() + if err != nil { + return false + } + if _, err = os.Stat(dir); err != nil { + return false + } + return true +} + +// OpenUCDFile opens the requested UCD file. The file is specified relative to +// the public Unicode root directory. It will call log.Fatal if there are any +// errors. +func OpenUCDFile(file string) io.ReadCloser { + return openUnicode(path.Join(*unicodeVersion, "ucd", file)) +} + +// OpenCLDRCoreZip opens the CLDR core zip file. It will call log.Fatal if there +// are any errors. +func OpenCLDRCoreZip() io.ReadCloser { + return OpenUnicodeFile("cldr", *cldrVersion, "core.zip") +} + +// OpenUnicodeFile opens the requested file of the requested category from the +// root of the Unicode data archive. The file is specified relative to the +// public Unicode root directory. If version is "", it will use the default +// Unicode version. It will call log.Fatal if there are any errors. +func OpenUnicodeFile(category, version, file string) io.ReadCloser { + if version == "" { + version = UnicodeVersion() + } + return openUnicode(path.Join(category, version, file)) +} + +// OpenIANAFile opens the requested IANA file. The file is specified relative +// to the IANA root, which is typically either http://www.iana.org or the +// iana directory in the local mirror. It will call log.Fatal if there are any +// errors. +func OpenIANAFile(path string) io.ReadCloser { + return Open(*iana, "iana", path) +} + +var ( + dirMutex sync.Mutex + localDir string +) + +const permissions = 0755 + +func localReadmeFile() (string, error) { + p, err := build.Import("golang.org/x/text", "", build.FindOnly) + if err != nil { + return "", fmt.Errorf("Could not locate package: %v", err) + } + return filepath.Join(p.Dir, "DATA", "README"), nil +} + +func getLocalDir() string { + dirMutex.Lock() + defer dirMutex.Unlock() + + readme, err := localReadmeFile() + if err != nil { + log.Fatal(err) + } + dir := filepath.Dir(readme) + if _, err := os.Stat(readme); err != nil { + if err := os.MkdirAll(dir, permissions); err != nil { + log.Fatalf("Could not create directory: %v", err) + } + ioutil.WriteFile(readme, []byte(readmeTxt), permissions) + } + return dir +} + +const readmeTxt = `Generated by golang.org/x/text/internal/gen. DO NOT EDIT. + +This directory contains downloaded files used to generate the various tables +in the golang.org/x/text subrepo. + +Note that the language subtag repo (iana/assignments/language-subtag-registry) +and all other times in the iana subdirectory are not versioned and will need +to be periodically manually updated. The easiest way to do this is to remove +the entire iana directory. This is mostly of concern when updating the language +package. +` + +// Open opens subdir/path if a local directory is specified and the file exists, +// where subdir is a directory relative to the local root, or fetches it from +// urlRoot/path otherwise. It will call log.Fatal if there are any errors. +func Open(urlRoot, subdir, path string) io.ReadCloser { + file := filepath.Join(getLocalDir(), subdir, filepath.FromSlash(path)) + return open(file, urlRoot, path) +} + +func openUnicode(path string) io.ReadCloser { + file := filepath.Join(getLocalDir(), filepath.FromSlash(path)) + return open(file, *url, path) +} + +// TODO: automatically periodically update non-versioned files. + +func open(file, urlRoot, path string) io.ReadCloser { + if f, err := os.Open(file); err == nil { + return f + } + r := get(urlRoot, path) + defer r.Close() + b, err := ioutil.ReadAll(r) + if err != nil { + log.Fatalf("Could not download file: %v", err) + } + os.MkdirAll(filepath.Dir(file), permissions) + if err := ioutil.WriteFile(file, b, permissions); err != nil { + log.Fatalf("Could not create file: %v", err) + } + return ioutil.NopCloser(bytes.NewReader(b)) +} + +func get(root, path string) io.ReadCloser { + url := root + "/" + path + fmt.Printf("Fetching %s...", url) + defer fmt.Println(" done.") + resp, err := http.Get(url) + if err != nil { + log.Fatalf("HTTP GET: %v", err) + } + if resp.StatusCode != 200 { + log.Fatalf("Bad GET status for %q: %q", url, resp.Status) + } + return resp.Body +} + +// TODO: use Write*Version in all applicable packages. + +// WriteUnicodeVersion writes a constant for the Unicode version from which the +// tables are generated. +func WriteUnicodeVersion(w io.Writer) { + fmt.Fprintf(w, "// UnicodeVersion is the Unicode version from which the tables in this package are derived.\n") + fmt.Fprintf(w, "const UnicodeVersion = %q\n\n", UnicodeVersion()) +} + +// WriteCLDRVersion writes a constant for the CLDR version from which the +// tables are generated. +func WriteCLDRVersion(w io.Writer) { + fmt.Fprintf(w, "// CLDRVersion is the CLDR version from which the tables in this package are derived.\n") + fmt.Fprintf(w, "const CLDRVersion = %q\n\n", CLDRVersion()) +} + +// WriteGoFile prepends a standard file comment and package statement to the +// given bytes, applies gofmt, and writes them to a file with the given name. +// It will call log.Fatal if there are any errors. +func WriteGoFile(filename, pkg string, b []byte) { + w, err := os.Create(filename) + if err != nil { + log.Fatalf("Could not create file %s: %v", filename, err) + } + defer w.Close() + if _, err = WriteGo(w, pkg, "", b); err != nil { + log.Fatalf("Error writing file %s: %v", filename, err) + } +} + +func insertVersion(filename, version string) string { + suffix := ".go" + if strings.HasSuffix(filename, "_test.go") { + suffix = "_test.go" + } + return fmt.Sprint(filename[:len(filename)-len(suffix)], version, suffix) +} + +// WriteVersionedGoFile prepends a standard file comment, adds build tags to +// version the file for the current Unicode version, and package statement to +// the given bytes, applies gofmt, and writes them to a file with the given +// name. It will call log.Fatal if there are any errors. +func WriteVersionedGoFile(filename, pkg string, b []byte) { + tags := buildTags() + if tags != "" { + filename = insertVersion(filename, UnicodeVersion()) + } + w, err := os.Create(filename) + if err != nil { + log.Fatalf("Could not create file %s: %v", filename, err) + } + defer w.Close() + if _, err = WriteGo(w, pkg, tags, b); err != nil { + log.Fatalf("Error writing file %s: %v", filename, err) + } +} + +// WriteGo prepends a standard file comment and package statement to the given +// bytes, applies gofmt, and writes them to w. +func WriteGo(w io.Writer, pkg, tags string, b []byte) (n int, err error) { + src := []byte(header) + if tags != "" { + src = append(src, fmt.Sprintf("// +build %s\n\n", tags)...) + } + src = append(src, fmt.Sprintf("package %s\n\n", pkg)...) + src = append(src, b...) + formatted, err := format.Source(src) + if err != nil { + // Print the generated code even in case of an error so that the + // returned error can be meaningfully interpreted. + n, _ = w.Write(src) + return n, err + } + return w.Write(formatted) +} + +// Repackage rewrites a Go file from belonging to package main to belonging to +// the given package. +func Repackage(inFile, outFile, pkg string) { + src, err := ioutil.ReadFile(inFile) + if err != nil { + log.Fatalf("reading %s: %v", inFile, err) + } + const toDelete = "package main\n\n" + i := bytes.Index(src, []byte(toDelete)) + if i < 0 { + log.Fatalf("Could not find %q in %s.", toDelete, inFile) + } + w := &bytes.Buffer{} + w.Write(src[i+len(toDelete):]) + WriteGoFile(outFile, pkg, w.Bytes()) +} diff --git a/vendor/golang.org/x/text/internal/triegen/LICENSE b/vendor/golang.org/x/text/internal/triegen/LICENSE new file mode 100644 index 00000000..6a66aea5 --- /dev/null +++ b/vendor/golang.org/x/text/internal/triegen/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/text/internal/triegen/compact.go b/vendor/golang.org/x/text/internal/triegen/compact.go new file mode 100644 index 00000000..397b975c --- /dev/null +++ b/vendor/golang.org/x/text/internal/triegen/compact.go @@ -0,0 +1,58 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package triegen + +// This file defines Compacter and its implementations. + +import "io" + +// A Compacter generates an alternative, more space-efficient way to store a +// trie value block. A trie value block holds all possible values for the last +// byte of a UTF-8 encoded rune. Excluding ASCII characters, a trie value block +// always has 64 values, as a UTF-8 encoding ends with a byte in [0x80, 0xC0). +type Compacter interface { + // Size returns whether the Compacter could encode the given block as well + // as its size in case it can. len(v) is always 64. + Size(v []uint64) (sz int, ok bool) + + // Store stores the block using the Compacter's compression method. + // It returns a handle with which the block can be retrieved. + // len(v) is always 64. + Store(v []uint64) uint32 + + // Print writes the data structures associated to the given store to w. + Print(w io.Writer) error + + // Handler returns the name of a function that gets called during trie + // lookup for blocks generated by the Compacter. The function should be of + // the form func (n uint32, b byte) uint64, where n is the index returned by + // the Compacter's Store method and b is the last byte of the UTF-8 + // encoding, where 0x80 <= b < 0xC0, for which to do the lookup in the + // block. + Handler() string +} + +// simpleCompacter is the default Compacter used by builder. It implements a +// normal trie block. +type simpleCompacter builder + +func (b *simpleCompacter) Size([]uint64) (sz int, ok bool) { + return blockSize * b.ValueSize, true +} + +func (b *simpleCompacter) Store(v []uint64) uint32 { + h := uint32(len(b.ValueBlocks) - blockOffset) + b.ValueBlocks = append(b.ValueBlocks, v) + return h +} + +func (b *simpleCompacter) Print(io.Writer) error { + // Structures are printed in print.go. + return nil +} + +func (b *simpleCompacter) Handler() string { + panic("Handler should be special-cased for this Compacter") +} diff --git a/vendor/golang.org/x/text/internal/triegen/print.go b/vendor/golang.org/x/text/internal/triegen/print.go new file mode 100644 index 00000000..8d9f120b --- /dev/null +++ b/vendor/golang.org/x/text/internal/triegen/print.go @@ -0,0 +1,251 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package triegen + +import ( + "bytes" + "fmt" + "io" + "strings" + "text/template" +) + +// print writes all the data structures as well as the code necessary to use the +// trie to w. +func (b *builder) print(w io.Writer) error { + b.Stats.NValueEntries = len(b.ValueBlocks) * blockSize + b.Stats.NValueBytes = len(b.ValueBlocks) * blockSize * b.ValueSize + b.Stats.NIndexEntries = len(b.IndexBlocks) * blockSize + b.Stats.NIndexBytes = len(b.IndexBlocks) * blockSize * b.IndexSize + b.Stats.NHandleBytes = len(b.Trie) * 2 * b.IndexSize + + // If we only have one root trie, all starter blocks are at position 0 and + // we can access the arrays directly. + if len(b.Trie) == 1 { + // At this point we cannot refer to the generated tables directly. + b.ASCIIBlock = b.Name + "Values" + b.StarterBlock = b.Name + "Index" + } else { + // Otherwise we need to have explicit starter indexes in the trie + // structure. + b.ASCIIBlock = "t.ascii" + b.StarterBlock = "t.utf8Start" + } + + b.SourceType = "[]byte" + if err := lookupGen.Execute(w, b); err != nil { + return err + } + + b.SourceType = "string" + if err := lookupGen.Execute(w, b); err != nil { + return err + } + + if err := trieGen.Execute(w, b); err != nil { + return err + } + + for _, c := range b.Compactions { + if err := c.c.Print(w); err != nil { + return err + } + } + + return nil +} + +func printValues(n int, values []uint64) string { + w := &bytes.Buffer{} + boff := n * blockSize + fmt.Fprintf(w, "\t// Block %#x, offset %#x", n, boff) + var newline bool + for i, v := range values { + if i%6 == 0 { + newline = true + } + if v != 0 { + if newline { + fmt.Fprintf(w, "\n") + newline = false + } + fmt.Fprintf(w, "\t%#02x:%#04x, ", boff+i, v) + } + } + return w.String() +} + +func printIndex(b *builder, nr int, n *node) string { + w := &bytes.Buffer{} + boff := nr * blockSize + fmt.Fprintf(w, "\t// Block %#x, offset %#x", nr, boff) + var newline bool + for i, c := range n.children { + if i%8 == 0 { + newline = true + } + if c != nil { + v := b.Compactions[c.index.compaction].Offset + uint32(c.index.index) + if v != 0 { + if newline { + fmt.Fprintf(w, "\n") + newline = false + } + fmt.Fprintf(w, "\t%#02x:%#02x, ", boff+i, v) + } + } + } + return w.String() +} + +var ( + trieGen = template.Must(template.New("trie").Funcs(template.FuncMap{ + "printValues": printValues, + "printIndex": printIndex, + "title": strings.Title, + "dec": func(x int) int { return x - 1 }, + "psize": func(n int) string { + return fmt.Sprintf("%d bytes (%.2f KiB)", n, float64(n)/1024) + }, + }).Parse(trieTemplate)) + lookupGen = template.Must(template.New("lookup").Parse(lookupTemplate)) +) + +// TODO: consider the return type of lookup. It could be uint64, even if the +// internal value type is smaller. We will have to verify this with the +// performance of unicode/norm, which is very sensitive to such changes. +const trieTemplate = `{{$b := .}}{{$multi := gt (len .Trie) 1}} +// {{.Name}}Trie. Total size: {{psize .Size}}. Checksum: {{printf "%08x" .Checksum}}. +type {{.Name}}Trie struct { {{if $multi}} + ascii []{{.ValueType}} // index for ASCII bytes + utf8Start []{{.IndexType}} // index for UTF-8 bytes >= 0xC0 +{{end}}} + +func new{{title .Name}}Trie(i int) *{{.Name}}Trie { {{if $multi}} + h := {{.Name}}TrieHandles[i] + return &{{.Name}}Trie{ {{.Name}}Values[uint32(h.ascii)<<6:], {{.Name}}Index[uint32(h.multi)<<6:] } +} + +type {{.Name}}TrieHandle struct { + ascii, multi {{.IndexType}} +} + +// {{.Name}}TrieHandles: {{len .Trie}} handles, {{.Stats.NHandleBytes}} bytes +var {{.Name}}TrieHandles = [{{len .Trie}}]{{.Name}}TrieHandle{ +{{range .Trie}} { {{.ASCIIIndex}}, {{.StarterIndex}} }, // {{printf "%08x" .Checksum}}: {{.Name}} +{{end}}}{{else}} + return &{{.Name}}Trie{} +} +{{end}} +// lookupValue determines the type of block n and looks up the value for b. +func (t *{{.Name}}Trie) lookupValue(n uint32, b byte) {{.ValueType}}{{$last := dec (len .Compactions)}} { + switch { {{range $i, $c := .Compactions}} + {{if eq $i $last}}default{{else}}case n < {{$c.Cutoff}}{{end}}:{{if ne $i 0}} + n -= {{$c.Offset}}{{end}} + return {{print $b.ValueType}}({{$c.Handler}}){{end}} + } +} + +// {{.Name}}Values: {{len .ValueBlocks}} blocks, {{.Stats.NValueEntries}} entries, {{.Stats.NValueBytes}} bytes +// The third block is the zero block. +var {{.Name}}Values = [{{.Stats.NValueEntries}}]{{.ValueType}} { +{{range $i, $v := .ValueBlocks}}{{printValues $i $v}} +{{end}}} + +// {{.Name}}Index: {{len .IndexBlocks}} blocks, {{.Stats.NIndexEntries}} entries, {{.Stats.NIndexBytes}} bytes +// Block 0 is the zero block. +var {{.Name}}Index = [{{.Stats.NIndexEntries}}]{{.IndexType}} { +{{range $i, $v := .IndexBlocks}}{{printIndex $b $i $v}} +{{end}}} +` + +// TODO: consider allowing zero-length strings after evaluating performance with +// unicode/norm. +const lookupTemplate = ` +// lookup{{if eq .SourceType "string"}}String{{end}} returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *{{.Name}}Trie) lookup{{if eq .SourceType "string"}}String{{end}}(s {{.SourceType}}) (v {{.ValueType}}, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return {{.ASCIIBlock}}[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := {{.StarterBlock}}[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := {{.StarterBlock}}[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = {{.Name}}Index[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := {{.StarterBlock}}[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = {{.Name}}Index[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = {{.Name}}Index[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookup{{if eq .SourceType "string"}}String{{end}}Unsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *{{.Name}}Trie) lookup{{if eq .SourceType "string"}}String{{end}}Unsafe(s {{.SourceType}}) {{.ValueType}} { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return {{.ASCIIBlock}}[c0] + } + i := {{.StarterBlock}}[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = {{.Name}}Index[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = {{.Name}}Index[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} +` diff --git a/vendor/golang.org/x/text/internal/triegen/triegen.go b/vendor/golang.org/x/text/internal/triegen/triegen.go new file mode 100644 index 00000000..adb01081 --- /dev/null +++ b/vendor/golang.org/x/text/internal/triegen/triegen.go @@ -0,0 +1,494 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package triegen implements a code generator for a trie for associating +// unsigned integer values with UTF-8 encoded runes. +// +// Many of the go.text packages use tries for storing per-rune information. A +// trie is especially useful if many of the runes have the same value. If this +// is the case, many blocks can be expected to be shared allowing for +// information on many runes to be stored in little space. +// +// As most of the lookups are done directly on []byte slices, the tries use the +// UTF-8 bytes directly for the lookup. This saves a conversion from UTF-8 to +// runes and contributes a little bit to better performance. It also naturally +// provides a fast path for ASCII. +// +// Space is also an issue. There are many code points defined in Unicode and as +// a result tables can get quite large. So every byte counts. The triegen +// package automatically chooses the smallest integer values to represent the +// tables. Compacters allow further compression of the trie by allowing for +// alternative representations of individual trie blocks. +// +// triegen allows generating multiple tries as a single structure. This is +// useful when, for example, one wants to generate tries for several languages +// that have a lot of values in common. Some existing libraries for +// internationalization store all per-language data as a dynamically loadable +// chunk. The go.text packages are designed with the assumption that the user +// typically wants to compile in support for all supported languages, in line +// with the approach common to Go to create a single standalone binary. The +// multi-root trie approach can give significant storage savings in this +// scenario. +// +// triegen generates both tables and code. The code is optimized to use the +// automatically chosen data types. The following code is generated for a Trie +// or multiple Tries named "foo": +// - type fooTrie +// The trie type. +// +// - func newFooTrie(x int) *fooTrie +// Trie constructor, where x is the index of the trie passed to Gen. +// +// - func (t *fooTrie) lookup(s []byte) (v uintX, sz int) +// The lookup method, where uintX is automatically chosen. +// +// - func lookupString, lookupUnsafe and lookupStringUnsafe +// Variants of the above. +// +// - var fooValues and fooIndex and any tables generated by Compacters. +// The core trie data. +// +// - var fooTrieHandles +// Indexes of starter blocks in case of multiple trie roots. +// +// It is recommended that users test the generated trie by checking the returned +// value for every rune. Such exhaustive tests are possible as the the number of +// runes in Unicode is limited. +package triegen // import "golang.org/x/text/internal/triegen" + +// TODO: Arguably, the internally optimized data types would not have to be +// exposed in the generated API. We could also investigate not generating the +// code, but using it through a package. We would have to investigate the impact +// on performance of making such change, though. For packages like unicode/norm, +// small changes like this could tank performance. + +import ( + "encoding/binary" + "fmt" + "hash/crc64" + "io" + "log" + "unicode/utf8" +) + +// builder builds a set of tries for associating values with runes. The set of +// tries can share common index and value blocks. +type builder struct { + Name string + + // ValueType is the type of the trie values looked up. + ValueType string + + // ValueSize is the byte size of the ValueType. + ValueSize int + + // IndexType is the type of trie index values used for all UTF-8 bytes of + // a rune except the last one. + IndexType string + + // IndexSize is the byte size of the IndexType. + IndexSize int + + // SourceType is used when generating the lookup functions. If the user + // requests StringSupport, all lookup functions will be generated for + // string input as well. + SourceType string + + Trie []*Trie + + IndexBlocks []*node + ValueBlocks [][]uint64 + Compactions []compaction + Checksum uint64 + + ASCIIBlock string + StarterBlock string + + indexBlockIdx map[uint64]int + valueBlockIdx map[uint64]nodeIndex + asciiBlockIdx map[uint64]int + + // Stats are used to fill out the template. + Stats struct { + NValueEntries int + NValueBytes int + NIndexEntries int + NIndexBytes int + NHandleBytes int + } + + err error +} + +// A nodeIndex encodes the index of a node, which is defined by the compaction +// which stores it and an index within the compaction. For internal nodes, the +// compaction is always 0. +type nodeIndex struct { + compaction int + index int +} + +// compaction keeps track of stats used for the compaction. +type compaction struct { + c Compacter + blocks []*node + maxHandle uint32 + totalSize int + + // Used by template-based generator and thus exported. + Cutoff uint32 + Offset uint32 + Handler string +} + +func (b *builder) setError(err error) { + if b.err == nil { + b.err = err + } +} + +// An Option can be passed to Gen. +type Option func(b *builder) error + +// Compact configures the trie generator to use the given Compacter. +func Compact(c Compacter) Option { + return func(b *builder) error { + b.Compactions = append(b.Compactions, compaction{ + c: c, + Handler: c.Handler() + "(n, b)"}) + return nil + } +} + +// Gen writes Go code for a shared trie lookup structure to w for the given +// Tries. The generated trie type will be called nameTrie. newNameTrie(x) will +// return the *nameTrie for tries[x]. A value can be looked up by using one of +// the various lookup methods defined on nameTrie. It returns the table size of +// the generated trie. +func Gen(w io.Writer, name string, tries []*Trie, opts ...Option) (sz int, err error) { + // The index contains two dummy blocks, followed by the zero block. The zero + // block is at offset 0x80, so that the offset for the zero block for + // continuation bytes is 0. + b := &builder{ + Name: name, + Trie: tries, + IndexBlocks: []*node{{}, {}, {}}, + Compactions: []compaction{{ + Handler: name + "Values[n<<6+uint32(b)]", + }}, + // The 0 key in indexBlockIdx and valueBlockIdx is the hash of the zero + // block. + indexBlockIdx: map[uint64]int{0: 0}, + valueBlockIdx: map[uint64]nodeIndex{0: {}}, + asciiBlockIdx: map[uint64]int{}, + } + b.Compactions[0].c = (*simpleCompacter)(b) + + for _, f := range opts { + if err := f(b); err != nil { + return 0, err + } + } + b.build() + if b.err != nil { + return 0, b.err + } + if err = b.print(w); err != nil { + return 0, err + } + return b.Size(), nil +} + +// A Trie represents a single root node of a trie. A builder may build several +// overlapping tries at once. +type Trie struct { + root *node + + hiddenTrie +} + +// hiddenTrie contains values we want to be visible to the template generator, +// but hidden from the API documentation. +type hiddenTrie struct { + Name string + Checksum uint64 + ASCIIIndex int + StarterIndex int +} + +// NewTrie returns a new trie root. +func NewTrie(name string) *Trie { + return &Trie{ + &node{ + children: make([]*node, blockSize), + values: make([]uint64, utf8.RuneSelf), + }, + hiddenTrie{Name: name}, + } +} + +// Gen is a convenience wrapper around the Gen func passing t as the only trie +// and uses the name passed to NewTrie. It returns the size of the generated +// tables. +func (t *Trie) Gen(w io.Writer, opts ...Option) (sz int, err error) { + return Gen(w, t.Name, []*Trie{t}, opts...) +} + +// node is a node of the intermediate trie structure. +type node struct { + // children holds this node's children. It is always of length 64. + // A child node may be nil. + children []*node + + // values contains the values of this node. If it is non-nil, this node is + // either a root or leaf node: + // For root nodes, len(values) == 128 and it maps the bytes in [0x00, 0x7F]. + // For leaf nodes, len(values) == 64 and it maps the bytes in [0x80, 0xBF]. + values []uint64 + + index nodeIndex +} + +// Insert associates value with the given rune. Insert will panic if a non-zero +// value is passed for an invalid rune. +func (t *Trie) Insert(r rune, value uint64) { + if value == 0 { + return + } + s := string(r) + if []rune(s)[0] != r && value != 0 { + // Note: The UCD tables will always assign what amounts to a zero value + // to a surrogate. Allowing a zero value for an illegal rune allows + // users to iterate over [0..MaxRune] without having to explicitly + // exclude surrogates, which would be tedious. + panic(fmt.Sprintf("triegen: non-zero value for invalid rune %U", r)) + } + if len(s) == 1 { + // It is a root node value (ASCII). + t.root.values[s[0]] = value + return + } + + n := t.root + for ; len(s) > 1; s = s[1:] { + if n.children == nil { + n.children = make([]*node, blockSize) + } + p := s[0] % blockSize + c := n.children[p] + if c == nil { + c = &node{} + n.children[p] = c + } + if len(s) > 2 && c.values != nil { + log.Fatalf("triegen: insert(%U): found internal node with values", r) + } + n = c + } + if n.values == nil { + n.values = make([]uint64, blockSize) + } + if n.children != nil { + log.Fatalf("triegen: insert(%U): found leaf node that also has child nodes", r) + } + n.values[s[0]-0x80] = value +} + +// Size returns the number of bytes the generated trie will take to store. It +// needs to be exported as it is used in the templates. +func (b *builder) Size() int { + // Index blocks. + sz := len(b.IndexBlocks) * blockSize * b.IndexSize + + // Skip the first compaction, which represents the normal value blocks, as + // its totalSize does not account for the ASCII blocks, which are managed + // separately. + sz += len(b.ValueBlocks) * blockSize * b.ValueSize + for _, c := range b.Compactions[1:] { + sz += c.totalSize + } + + // TODO: this computation does not account for the fixed overhead of a using + // a compaction, either code or data. As for data, though, the typical + // overhead of data is in the order of bytes (2 bytes for cases). Further, + // the savings of using a compaction should anyway be substantial for it to + // be worth it. + + // For multi-root tries, we also need to account for the handles. + if len(b.Trie) > 1 { + sz += 2 * b.IndexSize * len(b.Trie) + } + return sz +} + +func (b *builder) build() { + // Compute the sizes of the values. + var vmax uint64 + for _, t := range b.Trie { + vmax = maxValue(t.root, vmax) + } + b.ValueType, b.ValueSize = getIntType(vmax) + + // Compute all block allocations. + // TODO: first compute the ASCII blocks for all tries and then the other + // nodes. ASCII blocks are more restricted in placement, as they require two + // blocks to be placed consecutively. Processing them first may improve + // sharing (at least one zero block can be expected to be saved.) + for _, t := range b.Trie { + b.Checksum += b.buildTrie(t) + } + + // Compute the offsets for all the Compacters. + offset := uint32(0) + for i := range b.Compactions { + c := &b.Compactions[i] + c.Offset = offset + offset += c.maxHandle + 1 + c.Cutoff = offset + } + + // Compute the sizes of indexes. + // TODO: different byte positions could have different sizes. So far we have + // not found a case where this is beneficial. + imax := uint64(b.Compactions[len(b.Compactions)-1].Cutoff) + for _, ib := range b.IndexBlocks { + if x := uint64(ib.index.index); x > imax { + imax = x + } + } + b.IndexType, b.IndexSize = getIntType(imax) +} + +func maxValue(n *node, max uint64) uint64 { + if n == nil { + return max + } + for _, c := range n.children { + max = maxValue(c, max) + } + for _, v := range n.values { + if max < v { + max = v + } + } + return max +} + +func getIntType(v uint64) (string, int) { + switch { + case v < 1<<8: + return "uint8", 1 + case v < 1<<16: + return "uint16", 2 + case v < 1<<32: + return "uint32", 4 + } + return "uint64", 8 +} + +const ( + blockSize = 64 + + // Subtract two blocks to offset 0x80, the first continuation byte. + blockOffset = 2 + + // Subtract three blocks to offset 0xC0, the first non-ASCII starter. + rootBlockOffset = 3 +) + +var crcTable = crc64.MakeTable(crc64.ISO) + +func (b *builder) buildTrie(t *Trie) uint64 { + n := t.root + + // Get the ASCII offset. For the first trie, the ASCII block will be at + // position 0. + hasher := crc64.New(crcTable) + binary.Write(hasher, binary.BigEndian, n.values) + hash := hasher.Sum64() + + v, ok := b.asciiBlockIdx[hash] + if !ok { + v = len(b.ValueBlocks) + b.asciiBlockIdx[hash] = v + + b.ValueBlocks = append(b.ValueBlocks, n.values[:blockSize], n.values[blockSize:]) + if v == 0 { + // Add the zero block at position 2 so that it will be assigned a + // zero reference in the lookup blocks. + // TODO: always do this? This would allow us to remove a check from + // the trie lookup, but at the expense of extra space. Analyze + // performance for unicode/norm. + b.ValueBlocks = append(b.ValueBlocks, make([]uint64, blockSize)) + } + } + t.ASCIIIndex = v + + // Compute remaining offsets. + t.Checksum = b.computeOffsets(n, true) + // We already subtracted the normal blockOffset from the index. Subtract the + // difference for starter bytes. + t.StarterIndex = n.index.index - (rootBlockOffset - blockOffset) + return t.Checksum +} + +func (b *builder) computeOffsets(n *node, root bool) uint64 { + // For the first trie, the root lookup block will be at position 3, which is + // the offset for UTF-8 non-ASCII starter bytes. + first := len(b.IndexBlocks) == rootBlockOffset + if first { + b.IndexBlocks = append(b.IndexBlocks, n) + } + + // We special-case the cases where all values recursively are 0. This allows + // for the use of a zero block to which all such values can be directed. + hash := uint64(0) + if n.children != nil || n.values != nil { + hasher := crc64.New(crcTable) + for _, c := range n.children { + var v uint64 + if c != nil { + v = b.computeOffsets(c, false) + } + binary.Write(hasher, binary.BigEndian, v) + } + binary.Write(hasher, binary.BigEndian, n.values) + hash = hasher.Sum64() + } + + if first { + b.indexBlockIdx[hash] = rootBlockOffset - blockOffset + } + + // Compacters don't apply to internal nodes. + if n.children != nil { + v, ok := b.indexBlockIdx[hash] + if !ok { + v = len(b.IndexBlocks) - blockOffset + b.IndexBlocks = append(b.IndexBlocks, n) + b.indexBlockIdx[hash] = v + } + n.index = nodeIndex{0, v} + } else { + h, ok := b.valueBlockIdx[hash] + if !ok { + bestI, bestSize := 0, blockSize*b.ValueSize + for i, c := range b.Compactions[1:] { + if sz, ok := c.c.Size(n.values); ok && bestSize > sz { + bestI, bestSize = i+1, sz + } + } + c := &b.Compactions[bestI] + c.totalSize += bestSize + v := c.c.Store(n.values) + if c.maxHandle < v { + c.maxHandle = v + } + h = nodeIndex{bestI, int(v)} + b.valueBlockIdx[hash] = h + } + n.index = h + } + return hash +} diff --git a/vendor/golang.org/x/text/internal/ucd/LICENSE b/vendor/golang.org/x/text/internal/ucd/LICENSE new file mode 100644 index 00000000..6a66aea5 --- /dev/null +++ b/vendor/golang.org/x/text/internal/ucd/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/text/internal/ucd/ucd.go b/vendor/golang.org/x/text/internal/ucd/ucd.go new file mode 100644 index 00000000..8c45b5f3 --- /dev/null +++ b/vendor/golang.org/x/text/internal/ucd/ucd.go @@ -0,0 +1,371 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package ucd provides a parser for Unicode Character Database files, the +// format of which is defined in http://www.unicode.org/reports/tr44/. See +// http://www.unicode.org/Public/UCD/latest/ucd/ for example files. +// +// It currently does not support substitutions of missing fields. +package ucd // import "golang.org/x/text/internal/ucd" + +import ( + "bufio" + "errors" + "fmt" + "io" + "log" + "regexp" + "strconv" + "strings" +) + +// UnicodeData.txt fields. +const ( + CodePoint = iota + Name + GeneralCategory + CanonicalCombiningClass + BidiClass + DecompMapping + DecimalValue + DigitValue + NumericValue + BidiMirrored + Unicode1Name + ISOComment + SimpleUppercaseMapping + SimpleLowercaseMapping + SimpleTitlecaseMapping +) + +// Parse calls f for each entry in the given reader of a UCD file. It will close +// the reader upon return. It will call log.Fatal if any error occurred. +// +// This implements the most common usage pattern of using Parser. +func Parse(r io.ReadCloser, f func(p *Parser)) { + defer r.Close() + + p := New(r) + for p.Next() { + f(p) + } + if err := p.Err(); err != nil { + r.Close() // os.Exit will cause defers not to be called. + log.Fatal(err) + } +} + +// An Option is used to configure a Parser. +type Option func(p *Parser) + +func keepRanges(p *Parser) { + p.keepRanges = true +} + +var ( + // KeepRanges prevents the expansion of ranges. The raw ranges can be + // obtained by calling Range(0) on the parser. + KeepRanges Option = keepRanges +) + +// The Part option register a handler for lines starting with a '@'. The text +// after a '@' is available as the first field. Comments are handled as usual. +func Part(f func(p *Parser)) Option { + return func(p *Parser) { + p.partHandler = f + } +} + +// The CommentHandler option passes comments that are on a line by itself to +// a given handler. +func CommentHandler(f func(s string)) Option { + return func(p *Parser) { + p.commentHandler = f + } +} + +// A Parser parses Unicode Character Database (UCD) files. +type Parser struct { + scanner *bufio.Scanner + + keepRanges bool // Don't expand rune ranges in field 0. + + err error + comment string + field []string + // parsedRange is needed in case Range(0) is called more than once for one + // field. In some cases this requires scanning ahead. + line int + parsedRange bool + rangeStart, rangeEnd rune + + partHandler func(p *Parser) + commentHandler func(s string) +} + +func (p *Parser) setError(err error, msg string) { + if p.err == nil && err != nil { + if msg == "" { + p.err = fmt.Errorf("ucd:line:%d: %v", p.line, err) + } else { + p.err = fmt.Errorf("ucd:line:%d:%s: %v", p.line, msg, err) + } + } +} + +func (p *Parser) getField(i int) string { + if i >= len(p.field) { + return "" + } + return p.field[i] +} + +// Err returns a non-nil error if any error occurred during parsing. +func (p *Parser) Err() error { + return p.err +} + +// New returns a Parser for the given Reader. +func New(r io.Reader, o ...Option) *Parser { + p := &Parser{ + scanner: bufio.NewScanner(r), + } + for _, f := range o { + f(p) + } + return p +} + +// Next parses the next line in the file. It returns true if a line was parsed +// and false if it reached the end of the file. +func (p *Parser) Next() bool { + if !p.keepRanges && p.rangeStart < p.rangeEnd { + p.rangeStart++ + return true + } + p.comment = "" + p.field = p.field[:0] + p.parsedRange = false + + for p.scanner.Scan() && p.err == nil { + p.line++ + s := p.scanner.Text() + if s == "" { + continue + } + if s[0] == '#' { + if p.commentHandler != nil { + p.commentHandler(strings.TrimSpace(s[1:])) + } + continue + } + + // Parse line + if i := strings.IndexByte(s, '#'); i != -1 { + p.comment = strings.TrimSpace(s[i+1:]) + s = s[:i] + } + if s[0] == '@' { + if p.partHandler != nil { + p.field = append(p.field, strings.TrimSpace(s[1:])) + p.partHandler(p) + p.field = p.field[:0] + } + p.comment = "" + continue + } + for { + i := strings.IndexByte(s, ';') + if i == -1 { + p.field = append(p.field, strings.TrimSpace(s)) + break + } + p.field = append(p.field, strings.TrimSpace(s[:i])) + s = s[i+1:] + } + if !p.keepRanges { + p.rangeStart, p.rangeEnd = p.getRange(0) + } + return true + } + p.setError(p.scanner.Err(), "scanner failed") + return false +} + +func parseRune(b string) (rune, error) { + if len(b) > 2 && b[0] == 'U' && b[1] == '+' { + b = b[2:] + } + x, err := strconv.ParseUint(b, 16, 32) + return rune(x), err +} + +func (p *Parser) parseRune(s string) rune { + x, err := parseRune(s) + p.setError(err, "failed to parse rune") + return x +} + +// Rune parses and returns field i as a rune. +func (p *Parser) Rune(i int) rune { + if i > 0 || p.keepRanges { + return p.parseRune(p.getField(i)) + } + return p.rangeStart +} + +// Runes interprets and returns field i as a sequence of runes. +func (p *Parser) Runes(i int) (runes []rune) { + add := func(s string) { + if s = strings.TrimSpace(s); len(s) > 0 { + runes = append(runes, p.parseRune(s)) + } + } + for b := p.getField(i); ; { + i := strings.IndexByte(b, ' ') + if i == -1 { + add(b) + break + } + add(b[:i]) + b = b[i+1:] + } + return +} + +var ( + errIncorrectLegacyRange = errors.New("ucd: unmatched <* First>") + + // reRange matches one line of a legacy rune range. + reRange = regexp.MustCompile("^([0-9A-F]*);<([^,]*), ([^>]*)>(.*)$") +) + +// Range parses and returns field i as a rune range. A range is inclusive at +// both ends. If the field only has one rune, first and last will be identical. +// It supports the legacy format for ranges used in UnicodeData.txt. +func (p *Parser) Range(i int) (first, last rune) { + if !p.keepRanges { + return p.rangeStart, p.rangeStart + } + return p.getRange(i) +} + +func (p *Parser) getRange(i int) (first, last rune) { + b := p.getField(i) + if k := strings.Index(b, ".."); k != -1 { + return p.parseRune(b[:k]), p.parseRune(b[k+2:]) + } + // The first field may not be a rune, in which case we may ignore any error + // and set the range as 0..0. + x, err := parseRune(b) + if err != nil { + // Disable range parsing henceforth. This ensures that an error will be + // returned if the user subsequently will try to parse this field as + // a Rune. + p.keepRanges = true + } + // Special case for UnicodeData that was retained for backwards compatibility. + if i == 0 && len(p.field) > 1 && strings.HasSuffix(p.field[1], "First>") { + if p.parsedRange { + return p.rangeStart, p.rangeEnd + } + mf := reRange.FindStringSubmatch(p.scanner.Text()) + p.line++ + if mf == nil || !p.scanner.Scan() { + p.setError(errIncorrectLegacyRange, "") + return x, x + } + // Using Bytes would be more efficient here, but Text is a lot easier + // and this is not a frequent case. + ml := reRange.FindStringSubmatch(p.scanner.Text()) + if ml == nil || mf[2] != ml[2] || ml[3] != "Last" || mf[4] != ml[4] { + p.setError(errIncorrectLegacyRange, "") + return x, x + } + p.rangeStart, p.rangeEnd = x, p.parseRune(p.scanner.Text()[:len(ml[1])]) + p.parsedRange = true + return p.rangeStart, p.rangeEnd + } + return x, x +} + +// bools recognizes all valid UCD boolean values. +var bools = map[string]bool{ + "": false, + "N": false, + "No": false, + "F": false, + "False": false, + "Y": true, + "Yes": true, + "T": true, + "True": true, +} + +// Bool parses and returns field i as a boolean value. +func (p *Parser) Bool(i int) bool { + f := p.getField(i) + for s, v := range bools { + if f == s { + return v + } + } + p.setError(strconv.ErrSyntax, "error parsing bool") + return false +} + +// Int parses and returns field i as an integer value. +func (p *Parser) Int(i int) int { + x, err := strconv.ParseInt(string(p.getField(i)), 10, 64) + p.setError(err, "error parsing int") + return int(x) +} + +// Uint parses and returns field i as an unsigned integer value. +func (p *Parser) Uint(i int) uint { + x, err := strconv.ParseUint(string(p.getField(i)), 10, 64) + p.setError(err, "error parsing uint") + return uint(x) +} + +// Float parses and returns field i as a decimal value. +func (p *Parser) Float(i int) float64 { + x, err := strconv.ParseFloat(string(p.getField(i)), 64) + p.setError(err, "error parsing float") + return x +} + +// String parses and returns field i as a string value. +func (p *Parser) String(i int) string { + return string(p.getField(i)) +} + +// Strings parses and returns field i as a space-separated list of strings. +func (p *Parser) Strings(i int) []string { + ss := strings.Split(string(p.getField(i)), " ") + for i, s := range ss { + ss[i] = strings.TrimSpace(s) + } + return ss +} + +// Comment returns the comments for the current line. +func (p *Parser) Comment() string { + return string(p.comment) +} + +var errUndefinedEnum = errors.New("ucd: undefined enum value") + +// Enum interprets and returns field i as a value that must be one of the values +// in enum. +func (p *Parser) Enum(i int, enum ...string) string { + f := p.getField(i) + for _, s := range enum { + if f == s { + return s + } + } + p.setError(errUndefinedEnum, "error parsing enum") + return "" +} diff --git a/vendor/golang.org/x/text/unicode/cldr/LICENSE b/vendor/golang.org/x/text/unicode/cldr/LICENSE new file mode 100644 index 00000000..6a66aea5 --- /dev/null +++ b/vendor/golang.org/x/text/unicode/cldr/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/text/unicode/cldr/base.go b/vendor/golang.org/x/text/unicode/cldr/base.go new file mode 100644 index 00000000..63cdc16c --- /dev/null +++ b/vendor/golang.org/x/text/unicode/cldr/base.go @@ -0,0 +1,105 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cldr + +import ( + "encoding/xml" + "regexp" + "strconv" +) + +// Elem is implemented by every XML element. +type Elem interface { + setEnclosing(Elem) + setName(string) + enclosing() Elem + + GetCommon() *Common +} + +type hidden struct { + CharData string `xml:",chardata"` + Alias *struct { + Common + Source string `xml:"source,attr"` + Path string `xml:"path,attr"` + } `xml:"alias"` + Def *struct { + Common + Choice string `xml:"choice,attr,omitempty"` + Type string `xml:"type,attr,omitempty"` + } `xml:"default"` +} + +// Common holds several of the most common attributes and sub elements +// of an XML element. +type Common struct { + XMLName xml.Name + name string + enclElem Elem + Type string `xml:"type,attr,omitempty"` + Reference string `xml:"reference,attr,omitempty"` + Alt string `xml:"alt,attr,omitempty"` + ValidSubLocales string `xml:"validSubLocales,attr,omitempty"` + Draft string `xml:"draft,attr,omitempty"` + hidden +} + +// Default returns the default type to select from the enclosed list +// or "" if no default value is specified. +func (e *Common) Default() string { + if e.Def == nil { + return "" + } + if e.Def.Choice != "" { + return e.Def.Choice + } else if e.Def.Type != "" { + // Type is still used by the default element in collation. + return e.Def.Type + } + return "" +} + +// Element returns the XML element name. +func (e *Common) Element() string { + return e.name +} + +// GetCommon returns e. It is provided such that Common implements Elem. +func (e *Common) GetCommon() *Common { + return e +} + +// Data returns the character data accumulated for this element. +func (e *Common) Data() string { + e.CharData = charRe.ReplaceAllStringFunc(e.CharData, replaceUnicode) + return e.CharData +} + +func (e *Common) setName(s string) { + e.name = s +} + +func (e *Common) enclosing() Elem { + return e.enclElem +} + +func (e *Common) setEnclosing(en Elem) { + e.enclElem = en +} + +// Escape characters that can be escaped without further escaping the string. +var charRe = regexp.MustCompile(`&#x[0-9a-fA-F]*;|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}|\\x[0-9a-fA-F]{2}|\\[0-7]{3}|\\[abtnvfr]`) + +// replaceUnicode converts hexadecimal Unicode codepoint notations to a one-rune string. +// It assumes the input string is correctly formatted. +func replaceUnicode(s string) string { + if s[1] == '#' { + r, _ := strconv.ParseInt(s[3:len(s)-1], 16, 32) + return string(r) + } + r, _, _, _ := strconv.UnquoteChar(s, 0) + return string(r) +} diff --git a/vendor/golang.org/x/text/unicode/cldr/cldr.go b/vendor/golang.org/x/text/unicode/cldr/cldr.go new file mode 100644 index 00000000..2197f8ac --- /dev/null +++ b/vendor/golang.org/x/text/unicode/cldr/cldr.go @@ -0,0 +1,130 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:generate go run makexml.go -output xml.go + +// Package cldr provides a parser for LDML and related XML formats. +// This package is intended to be used by the table generation tools +// for the various internationalization-related packages. +// As the XML types are generated from the CLDR DTD, and as the CLDR standard +// is periodically amended, this package may change considerably over time. +// This mostly means that data may appear and disappear between versions. +// That is, old code should keep compiling for newer versions, but data +// may have moved or changed. +// CLDR version 22 is the first version supported by this package. +// Older versions may not work. +package cldr // import "golang.org/x/text/unicode/cldr" + +import ( + "fmt" + "sort" +) + +// CLDR provides access to parsed data of the Unicode Common Locale Data Repository. +type CLDR struct { + parent map[string][]string + locale map[string]*LDML + resolved map[string]*LDML + bcp47 *LDMLBCP47 + supp *SupplementalData +} + +func makeCLDR() *CLDR { + return &CLDR{ + parent: make(map[string][]string), + locale: make(map[string]*LDML), + resolved: make(map[string]*LDML), + bcp47: &LDMLBCP47{}, + supp: &SupplementalData{}, + } +} + +// BCP47 returns the parsed BCP47 LDML data. If no such data was parsed, nil is returned. +func (cldr *CLDR) BCP47() *LDMLBCP47 { + return nil +} + +// Draft indicates the draft level of an element. +type Draft int + +const ( + Approved Draft = iota + Contributed + Provisional + Unconfirmed +) + +var drafts = []string{"unconfirmed", "provisional", "contributed", "approved", ""} + +// ParseDraft returns the Draft value corresponding to the given string. The +// empty string corresponds to Approved. +func ParseDraft(level string) (Draft, error) { + if level == "" { + return Approved, nil + } + for i, s := range drafts { + if level == s { + return Unconfirmed - Draft(i), nil + } + } + return Approved, fmt.Errorf("cldr: unknown draft level %q", level) +} + +func (d Draft) String() string { + return drafts[len(drafts)-1-int(d)] +} + +// SetDraftLevel sets which draft levels to include in the evaluated LDML. +// Any draft element for which the draft level is higher than lev will be excluded. +// If multiple draft levels are available for a single element, the one with the +// lowest draft level will be selected, unless preferDraft is true, in which case +// the highest draft will be chosen. +// It is assumed that the underlying LDML is canonicalized. +func (cldr *CLDR) SetDraftLevel(lev Draft, preferDraft bool) { + // TODO: implement + cldr.resolved = make(map[string]*LDML) +} + +// RawLDML returns the LDML XML for id in unresolved form. +// id must be one of the strings returned by Locales. +func (cldr *CLDR) RawLDML(loc string) *LDML { + return cldr.locale[loc] +} + +// LDML returns the fully resolved LDML XML for loc, which must be one of +// the strings returned by Locales. +func (cldr *CLDR) LDML(loc string) (*LDML, error) { + return cldr.resolve(loc) +} + +// Supplemental returns the parsed supplemental data. If no such data was parsed, +// nil is returned. +func (cldr *CLDR) Supplemental() *SupplementalData { + return cldr.supp +} + +// Locales returns the locales for which there exist files. +// Valid sublocales for which there is no file are not included. +// The root locale is always sorted first. +func (cldr *CLDR) Locales() []string { + loc := []string{"root"} + hasRoot := false + for l, _ := range cldr.locale { + if l == "root" { + hasRoot = true + continue + } + loc = append(loc, l) + } + sort.Strings(loc[1:]) + if !hasRoot { + return loc[1:] + } + return loc +} + +// Get fills in the fields of x based on the XPath path. +func Get(e Elem, path string) (res Elem, err error) { + return walkXPath(e, path) +} diff --git a/vendor/golang.org/x/text/unicode/cldr/collate.go b/vendor/golang.org/x/text/unicode/cldr/collate.go new file mode 100644 index 00000000..80ee28d7 --- /dev/null +++ b/vendor/golang.org/x/text/unicode/cldr/collate.go @@ -0,0 +1,359 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cldr + +import ( + "bufio" + "encoding/xml" + "errors" + "fmt" + "strconv" + "strings" + "unicode" + "unicode/utf8" +) + +// RuleProcessor can be passed to Collator's Process method, which +// parses the rules and calls the respective method for each rule found. +type RuleProcessor interface { + Reset(anchor string, before int) error + Insert(level int, str, context, extend string) error + Index(id string) +} + +const ( + // cldrIndex is a Unicode-reserved sentinel value used to mark the start + // of a grouping within an index. + // We ignore any rule that starts with this rune. + // See http://unicode.org/reports/tr35/#Collation_Elements for details. + cldrIndex = "\uFDD0" + + // specialAnchor is the format in which to represent logical reset positions, + // such as "first tertiary ignorable". + specialAnchor = "<%s/>" +) + +// Process parses the rules for the tailorings of this collation +// and calls the respective methods of p for each rule found. +func (c Collation) Process(p RuleProcessor) (err error) { + if len(c.Cr) > 0 { + if len(c.Cr) > 1 { + return fmt.Errorf("multiple cr elements, want 0 or 1") + } + return processRules(p, c.Cr[0].Data()) + } + if c.Rules.Any != nil { + return c.processXML(p) + } + return errors.New("no tailoring data") +} + +// processRules parses rules in the Collation Rule Syntax defined in +// http://www.unicode.org/reports/tr35/tr35-collation.html#Collation_Tailorings. +func processRules(p RuleProcessor, s string) (err error) { + chk := func(s string, e error) string { + if err == nil { + err = e + } + return s + } + i := 0 // Save the line number for use after the loop. + scanner := bufio.NewScanner(strings.NewReader(s)) + for ; scanner.Scan() && err == nil; i++ { + for s := skipSpace(scanner.Text()); s != "" && s[0] != '#'; s = skipSpace(s) { + level := 5 + var ch byte + switch ch, s = s[0], s[1:]; ch { + case '&': // followed by or '[' ']' + if s = skipSpace(s); consume(&s, '[') { + s = chk(parseSpecialAnchor(p, s)) + } else { + s = chk(parseAnchor(p, 0, s)) + } + case '<': // sort relation '<'{1,4}, optionally followed by '*'. + for level = 1; consume(&s, '<'); level++ { + } + if level > 4 { + err = fmt.Errorf("level %d > 4", level) + } + fallthrough + case '=': // identity relation, optionally followed by *. + if consume(&s, '*') { + s = chk(parseSequence(p, level, s)) + } else { + s = chk(parseOrder(p, level, s)) + } + default: + chk("", fmt.Errorf("illegal operator %q", ch)) + break + } + } + } + if chk("", scanner.Err()); err != nil { + return fmt.Errorf("%d: %v", i, err) + } + return nil +} + +// parseSpecialAnchor parses the anchor syntax which is either of the form +// ['before' ] +// or +// [