mirror of
https://github.com/42wim/matterbridge.git
synced 2024-11-21 18:22:00 -08:00
Add vendor files for spf13/viper
This commit is contained in:
parent
79c4ad5015
commit
25a72113b1
362
vendor/github.com/armon/consul-api/LICENSE
generated
vendored
Normal file
362
vendor/github.com/armon/consul-api/LICENSE
generated
vendored
Normal file
@ -0,0 +1,362 @@
|
|||||||
|
Mozilla Public License, version 2.0
|
||||||
|
|
||||||
|
1. Definitions
|
||||||
|
|
||||||
|
1.1. "Contributor"
|
||||||
|
|
||||||
|
means each individual or legal entity that creates, contributes to the
|
||||||
|
creation of, or owns Covered Software.
|
||||||
|
|
||||||
|
1.2. "Contributor Version"
|
||||||
|
|
||||||
|
means the combination of the Contributions of others (if any) used by a
|
||||||
|
Contributor and that particular Contributor's Contribution.
|
||||||
|
|
||||||
|
1.3. "Contribution"
|
||||||
|
|
||||||
|
means Covered Software of a particular Contributor.
|
||||||
|
|
||||||
|
1.4. "Covered Software"
|
||||||
|
|
||||||
|
means Source Code Form to which the initial Contributor has attached the
|
||||||
|
notice in Exhibit A, the Executable Form of such Source Code Form, and
|
||||||
|
Modifications of such Source Code Form, in each case including portions
|
||||||
|
thereof.
|
||||||
|
|
||||||
|
1.5. "Incompatible With Secondary Licenses"
|
||||||
|
means
|
||||||
|
|
||||||
|
a. that the initial Contributor has attached the notice described in
|
||||||
|
Exhibit B to the Covered Software; or
|
||||||
|
|
||||||
|
b. that the Covered Software was made available under the terms of
|
||||||
|
version 1.1 or earlier of the License, but not also under the terms of
|
||||||
|
a Secondary License.
|
||||||
|
|
||||||
|
1.6. "Executable Form"
|
||||||
|
|
||||||
|
means any form of the work other than Source Code Form.
|
||||||
|
|
||||||
|
1.7. "Larger Work"
|
||||||
|
|
||||||
|
means a work that combines Covered Software with other material, in a
|
||||||
|
separate file or files, that is not Covered Software.
|
||||||
|
|
||||||
|
1.8. "License"
|
||||||
|
|
||||||
|
means this document.
|
||||||
|
|
||||||
|
1.9. "Licensable"
|
||||||
|
|
||||||
|
means having the right to grant, to the maximum extent possible, whether
|
||||||
|
at the time of the initial grant or subsequently, any and all of the
|
||||||
|
rights conveyed by this License.
|
||||||
|
|
||||||
|
1.10. "Modifications"
|
||||||
|
|
||||||
|
means any of the following:
|
||||||
|
|
||||||
|
a. any file in Source Code Form that results from an addition to,
|
||||||
|
deletion from, or modification of the contents of Covered Software; or
|
||||||
|
|
||||||
|
b. any new file in Source Code Form that contains any Covered Software.
|
||||||
|
|
||||||
|
1.11. "Patent Claims" of a Contributor
|
||||||
|
|
||||||
|
means any patent claim(s), including without limitation, method,
|
||||||
|
process, and apparatus claims, in any patent Licensable by such
|
||||||
|
Contributor that would be infringed, but for the grant of the License,
|
||||||
|
by the making, using, selling, offering for sale, having made, import,
|
||||||
|
or transfer of either its Contributions or its Contributor Version.
|
||||||
|
|
||||||
|
1.12. "Secondary License"
|
||||||
|
|
||||||
|
means either the GNU General Public License, Version 2.0, the GNU Lesser
|
||||||
|
General Public License, Version 2.1, the GNU Affero General Public
|
||||||
|
License, Version 3.0, or any later versions of those licenses.
|
||||||
|
|
||||||
|
1.13. "Source Code Form"
|
||||||
|
|
||||||
|
means the form of the work preferred for making modifications.
|
||||||
|
|
||||||
|
1.14. "You" (or "Your")
|
||||||
|
|
||||||
|
means an individual or a legal entity exercising rights under this
|
||||||
|
License. For legal entities, "You" includes any entity that controls, is
|
||||||
|
controlled by, or is under common control with You. For purposes of this
|
||||||
|
definition, "control" means (a) the power, direct or indirect, to cause
|
||||||
|
the direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (b) ownership of more than fifty percent (50%) of the
|
||||||
|
outstanding shares or beneficial ownership of such entity.
|
||||||
|
|
||||||
|
|
||||||
|
2. License Grants and Conditions
|
||||||
|
|
||||||
|
2.1. Grants
|
||||||
|
|
||||||
|
Each Contributor hereby grants You a world-wide, royalty-free,
|
||||||
|
non-exclusive license:
|
||||||
|
|
||||||
|
a. under intellectual property rights (other than patent or trademark)
|
||||||
|
Licensable by such Contributor to use, reproduce, make available,
|
||||||
|
modify, display, perform, distribute, and otherwise exploit its
|
||||||
|
Contributions, either on an unmodified basis, with Modifications, or
|
||||||
|
as part of a Larger Work; and
|
||||||
|
|
||||||
|
b. under Patent Claims of such Contributor to make, use, sell, offer for
|
||||||
|
sale, have made, import, and otherwise transfer either its
|
||||||
|
Contributions or its Contributor Version.
|
||||||
|
|
||||||
|
2.2. Effective Date
|
||||||
|
|
||||||
|
The licenses granted in Section 2.1 with respect to any Contribution
|
||||||
|
become effective for each Contribution on the date the Contributor first
|
||||||
|
distributes such Contribution.
|
||||||
|
|
||||||
|
2.3. Limitations on Grant Scope
|
||||||
|
|
||||||
|
The licenses granted in this Section 2 are the only rights granted under
|
||||||
|
this License. No additional rights or licenses will be implied from the
|
||||||
|
distribution or licensing of Covered Software under this License.
|
||||||
|
Notwithstanding Section 2.1(b) above, no patent license is granted by a
|
||||||
|
Contributor:
|
||||||
|
|
||||||
|
a. for any code that a Contributor has removed from Covered Software; or
|
||||||
|
|
||||||
|
b. for infringements caused by: (i) Your and any other third party's
|
||||||
|
modifications of Covered Software, or (ii) the combination of its
|
||||||
|
Contributions with other software (except as part of its Contributor
|
||||||
|
Version); or
|
||||||
|
|
||||||
|
c. under Patent Claims infringed by Covered Software in the absence of
|
||||||
|
its Contributions.
|
||||||
|
|
||||||
|
This License does not grant any rights in the trademarks, service marks,
|
||||||
|
or logos of any Contributor (except as may be necessary to comply with
|
||||||
|
the notice requirements in Section 3.4).
|
||||||
|
|
||||||
|
2.4. Subsequent Licenses
|
||||||
|
|
||||||
|
No Contributor makes additional grants as a result of Your choice to
|
||||||
|
distribute the Covered Software under a subsequent version of this
|
||||||
|
License (see Section 10.2) or under the terms of a Secondary License (if
|
||||||
|
permitted under the terms of Section 3.3).
|
||||||
|
|
||||||
|
2.5. Representation
|
||||||
|
|
||||||
|
Each Contributor represents that the Contributor believes its
|
||||||
|
Contributions are its original creation(s) or it has sufficient rights to
|
||||||
|
grant the rights to its Contributions conveyed by this License.
|
||||||
|
|
||||||
|
2.6. Fair Use
|
||||||
|
|
||||||
|
This License is not intended to limit any rights You have under
|
||||||
|
applicable copyright doctrines of fair use, fair dealing, or other
|
||||||
|
equivalents.
|
||||||
|
|
||||||
|
2.7. Conditions
|
||||||
|
|
||||||
|
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
|
||||||
|
Section 2.1.
|
||||||
|
|
||||||
|
|
||||||
|
3. Responsibilities
|
||||||
|
|
||||||
|
3.1. Distribution of Source Form
|
||||||
|
|
||||||
|
All distribution of Covered Software in Source Code Form, including any
|
||||||
|
Modifications that You create or to which You contribute, must be under
|
||||||
|
the terms of this License. You must inform recipients that the Source
|
||||||
|
Code Form of the Covered Software is governed by the terms of this
|
||||||
|
License, and how they can obtain a copy of this License. You may not
|
||||||
|
attempt to alter or restrict the recipients' rights in the Source Code
|
||||||
|
Form.
|
||||||
|
|
||||||
|
3.2. Distribution of Executable Form
|
||||||
|
|
||||||
|
If You distribute Covered Software in Executable Form then:
|
||||||
|
|
||||||
|
a. such Covered Software must also be made available in Source Code Form,
|
||||||
|
as described in Section 3.1, and You must inform recipients of the
|
||||||
|
Executable Form how they can obtain a copy of such Source Code Form by
|
||||||
|
reasonable means in a timely manner, at a charge no more than the cost
|
||||||
|
of distribution to the recipient; and
|
||||||
|
|
||||||
|
b. You may distribute such Executable Form under the terms of this
|
||||||
|
License, or sublicense it under different terms, provided that the
|
||||||
|
license for the Executable Form does not attempt to limit or alter the
|
||||||
|
recipients' rights in the Source Code Form under this License.
|
||||||
|
|
||||||
|
3.3. Distribution of a Larger Work
|
||||||
|
|
||||||
|
You may create and distribute a Larger Work under terms of Your choice,
|
||||||
|
provided that You also comply with the requirements of this License for
|
||||||
|
the Covered Software. If the Larger Work is a combination of Covered
|
||||||
|
Software with a work governed by one or more Secondary Licenses, and the
|
||||||
|
Covered Software is not Incompatible With Secondary Licenses, this
|
||||||
|
License permits You to additionally distribute such Covered Software
|
||||||
|
under the terms of such Secondary License(s), so that the recipient of
|
||||||
|
the Larger Work may, at their option, further distribute the Covered
|
||||||
|
Software under the terms of either this License or such Secondary
|
||||||
|
License(s).
|
||||||
|
|
||||||
|
3.4. Notices
|
||||||
|
|
||||||
|
You may not remove or alter the substance of any license notices
|
||||||
|
(including copyright notices, patent notices, disclaimers of warranty, or
|
||||||
|
limitations of liability) contained within the Source Code Form of the
|
||||||
|
Covered Software, except that You may alter any license notices to the
|
||||||
|
extent required to remedy known factual inaccuracies.
|
||||||
|
|
||||||
|
3.5. Application of Additional Terms
|
||||||
|
|
||||||
|
You may choose to offer, and to charge a fee for, warranty, support,
|
||||||
|
indemnity or liability obligations to one or more recipients of Covered
|
||||||
|
Software. However, You may do so only on Your own behalf, and not on
|
||||||
|
behalf of any Contributor. You must make it absolutely clear that any
|
||||||
|
such warranty, support, indemnity, or liability obligation is offered by
|
||||||
|
You alone, and You hereby agree to indemnify every Contributor for any
|
||||||
|
liability incurred by such Contributor as a result of warranty, support,
|
||||||
|
indemnity or liability terms You offer. You may include additional
|
||||||
|
disclaimers of warranty and limitations of liability specific to any
|
||||||
|
jurisdiction.
|
||||||
|
|
||||||
|
4. Inability to Comply Due to Statute or Regulation
|
||||||
|
|
||||||
|
If it is impossible for You to comply with any of the terms of this License
|
||||||
|
with respect to some or all of the Covered Software due to statute,
|
||||||
|
judicial order, or regulation then You must: (a) comply with the terms of
|
||||||
|
this License to the maximum extent possible; and (b) describe the
|
||||||
|
limitations and the code they affect. Such description must be placed in a
|
||||||
|
text file included with all distributions of the Covered Software under
|
||||||
|
this License. Except to the extent prohibited by statute or regulation,
|
||||||
|
such description must be sufficiently detailed for a recipient of ordinary
|
||||||
|
skill to be able to understand it.
|
||||||
|
|
||||||
|
5. Termination
|
||||||
|
|
||||||
|
5.1. The rights granted under this License will terminate automatically if You
|
||||||
|
fail to comply with any of its terms. However, if You become compliant,
|
||||||
|
then the rights granted under this License from a particular Contributor
|
||||||
|
are reinstated (a) provisionally, unless and until such Contributor
|
||||||
|
explicitly and finally terminates Your grants, and (b) on an ongoing
|
||||||
|
basis, if such Contributor fails to notify You of the non-compliance by
|
||||||
|
some reasonable means prior to 60 days after You have come back into
|
||||||
|
compliance. Moreover, Your grants from a particular Contributor are
|
||||||
|
reinstated on an ongoing basis if such Contributor notifies You of the
|
||||||
|
non-compliance by some reasonable means, this is the first time You have
|
||||||
|
received notice of non-compliance with this License from such
|
||||||
|
Contributor, and You become compliant prior to 30 days after Your receipt
|
||||||
|
of the notice.
|
||||||
|
|
||||||
|
5.2. If You initiate litigation against any entity by asserting a patent
|
||||||
|
infringement claim (excluding declaratory judgment actions,
|
||||||
|
counter-claims, and cross-claims) alleging that a Contributor Version
|
||||||
|
directly or indirectly infringes any patent, then the rights granted to
|
||||||
|
You by any and all Contributors for the Covered Software under Section
|
||||||
|
2.1 of this License shall terminate.
|
||||||
|
|
||||||
|
5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
|
||||||
|
license agreements (excluding distributors and resellers) which have been
|
||||||
|
validly granted by You or Your distributors under this License prior to
|
||||||
|
termination shall survive termination.
|
||||||
|
|
||||||
|
6. Disclaimer of Warranty
|
||||||
|
|
||||||
|
Covered Software is provided under this License on an "as is" basis,
|
||||||
|
without warranty of any kind, either expressed, implied, or statutory,
|
||||||
|
including, without limitation, warranties that the Covered Software is free
|
||||||
|
of defects, merchantable, fit for a particular purpose or non-infringing.
|
||||||
|
The entire risk as to the quality and performance of the Covered Software
|
||||||
|
is with You. Should any Covered Software prove defective in any respect,
|
||||||
|
You (not any Contributor) assume the cost of any necessary servicing,
|
||||||
|
repair, or correction. This disclaimer of warranty constitutes an essential
|
||||||
|
part of this License. No use of any Covered Software is authorized under
|
||||||
|
this License except under this disclaimer.
|
||||||
|
|
||||||
|
7. Limitation of Liability
|
||||||
|
|
||||||
|
Under no circumstances and under no legal theory, whether tort (including
|
||||||
|
negligence), contract, or otherwise, shall any Contributor, or anyone who
|
||||||
|
distributes Covered Software as permitted above, be liable to You for any
|
||||||
|
direct, indirect, special, incidental, or consequential damages of any
|
||||||
|
character including, without limitation, damages for lost profits, loss of
|
||||||
|
goodwill, work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses, even if such party shall have been
|
||||||
|
informed of the possibility of such damages. This limitation of liability
|
||||||
|
shall not apply to liability for death or personal injury resulting from
|
||||||
|
such party's negligence to the extent applicable law prohibits such
|
||||||
|
limitation. Some jurisdictions do not allow the exclusion or limitation of
|
||||||
|
incidental or consequential damages, so this exclusion and limitation may
|
||||||
|
not apply to You.
|
||||||
|
|
||||||
|
8. Litigation
|
||||||
|
|
||||||
|
Any litigation relating to this License may be brought only in the courts
|
||||||
|
of a jurisdiction where the defendant maintains its principal place of
|
||||||
|
business and such litigation shall be governed by laws of that
|
||||||
|
jurisdiction, without reference to its conflict-of-law provisions. Nothing
|
||||||
|
in this Section shall prevent a party's ability to bring cross-claims or
|
||||||
|
counter-claims.
|
||||||
|
|
||||||
|
9. Miscellaneous
|
||||||
|
|
||||||
|
This License represents the complete agreement concerning the subject
|
||||||
|
matter hereof. If any provision of this License is held to be
|
||||||
|
unenforceable, such provision shall be reformed only to the extent
|
||||||
|
necessary to make it enforceable. Any law or regulation which provides that
|
||||||
|
the language of a contract shall be construed against the drafter shall not
|
||||||
|
be used to construe this License against a Contributor.
|
||||||
|
|
||||||
|
|
||||||
|
10. Versions of the License
|
||||||
|
|
||||||
|
10.1. New Versions
|
||||||
|
|
||||||
|
Mozilla Foundation is the license steward. Except as provided in Section
|
||||||
|
10.3, no one other than the license steward has the right to modify or
|
||||||
|
publish new versions of this License. Each version will be given a
|
||||||
|
distinguishing version number.
|
||||||
|
|
||||||
|
10.2. Effect of New Versions
|
||||||
|
|
||||||
|
You may distribute the Covered Software under the terms of the version
|
||||||
|
of the License under which You originally received the Covered Software,
|
||||||
|
or under the terms of any subsequent version published by the license
|
||||||
|
steward.
|
||||||
|
|
||||||
|
10.3. Modified Versions
|
||||||
|
|
||||||
|
If you create software not governed by this License, and you want to
|
||||||
|
create a new license for such software, you may create and use a
|
||||||
|
modified version of this License if you rename the license and remove
|
||||||
|
any references to the name of the license steward (except to note that
|
||||||
|
such modified license differs from this License).
|
||||||
|
|
||||||
|
10.4. Distributing Source Code Form that is Incompatible With Secondary
|
||||||
|
Licenses If You choose to distribute Source Code Form that is
|
||||||
|
Incompatible With Secondary Licenses under the terms of this version of
|
||||||
|
the License, the notice described in Exhibit B of this License must be
|
||||||
|
attached.
|
||||||
|
|
||||||
|
Exhibit A - Source Code Form License Notice
|
||||||
|
|
||||||
|
This Source Code Form is subject to the
|
||||||
|
terms of the Mozilla Public License, v.
|
||||||
|
2.0. If a copy of the MPL was not
|
||||||
|
distributed with this file, You can
|
||||||
|
obtain one at
|
||||||
|
http://mozilla.org/MPL/2.0/.
|
||||||
|
|
||||||
|
If it is not possible or desirable to put the notice in a particular file,
|
||||||
|
then You may include the notice in a location (such as a LICENSE file in a
|
||||||
|
relevant directory) where a recipient would be likely to look for such a
|
||||||
|
notice.
|
||||||
|
|
||||||
|
You may add additional accurate notices of copyright ownership.
|
||||||
|
|
||||||
|
Exhibit B - "Incompatible With Secondary Licenses" Notice
|
||||||
|
|
||||||
|
This Source Code Form is "Incompatible
|
||||||
|
With Secondary Licenses", as defined by
|
||||||
|
the Mozilla Public License, v. 2.0.
|
140
vendor/github.com/armon/consul-api/acl.go
generated
vendored
Normal file
140
vendor/github.com/armon/consul-api/acl.go
generated
vendored
Normal file
@ -0,0 +1,140 @@
|
|||||||
|
package consulapi
|
||||||
|
|
||||||
|
const (
|
||||||
|
// ACLCLientType is the client type token
|
||||||
|
ACLClientType = "client"
|
||||||
|
|
||||||
|
// ACLManagementType is the management type token
|
||||||
|
ACLManagementType = "management"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ACLEntry is used to represent an ACL entry
|
||||||
|
type ACLEntry struct {
|
||||||
|
CreateIndex uint64
|
||||||
|
ModifyIndex uint64
|
||||||
|
ID string
|
||||||
|
Name string
|
||||||
|
Type string
|
||||||
|
Rules string
|
||||||
|
}
|
||||||
|
|
||||||
|
// ACL can be used to query the ACL endpoints
|
||||||
|
type ACL struct {
|
||||||
|
c *Client
|
||||||
|
}
|
||||||
|
|
||||||
|
// ACL returns a handle to the ACL endpoints
|
||||||
|
func (c *Client) ACL() *ACL {
|
||||||
|
return &ACL{c}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create is used to generate a new token with the given parameters
|
||||||
|
func (a *ACL) Create(acl *ACLEntry, q *WriteOptions) (string, *WriteMeta, error) {
|
||||||
|
r := a.c.newRequest("PUT", "/v1/acl/create")
|
||||||
|
r.setWriteOptions(q)
|
||||||
|
r.obj = acl
|
||||||
|
rtt, resp, err := requireOK(a.c.doRequest(r))
|
||||||
|
if err != nil {
|
||||||
|
return "", nil, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
wm := &WriteMeta{RequestTime: rtt}
|
||||||
|
var out struct{ ID string }
|
||||||
|
if err := decodeBody(resp, &out); err != nil {
|
||||||
|
return "", nil, err
|
||||||
|
}
|
||||||
|
return out.ID, wm, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update is used to update the rules of an existing token
|
||||||
|
func (a *ACL) Update(acl *ACLEntry, q *WriteOptions) (*WriteMeta, error) {
|
||||||
|
r := a.c.newRequest("PUT", "/v1/acl/update")
|
||||||
|
r.setWriteOptions(q)
|
||||||
|
r.obj = acl
|
||||||
|
rtt, resp, err := requireOK(a.c.doRequest(r))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
wm := &WriteMeta{RequestTime: rtt}
|
||||||
|
return wm, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Destroy is used to destroy a given ACL token ID
|
||||||
|
func (a *ACL) Destroy(id string, q *WriteOptions) (*WriteMeta, error) {
|
||||||
|
r := a.c.newRequest("PUT", "/v1/acl/destroy/"+id)
|
||||||
|
r.setWriteOptions(q)
|
||||||
|
rtt, resp, err := requireOK(a.c.doRequest(r))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
resp.Body.Close()
|
||||||
|
|
||||||
|
wm := &WriteMeta{RequestTime: rtt}
|
||||||
|
return wm, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clone is used to return a new token cloned from an existing one
|
||||||
|
func (a *ACL) Clone(id string, q *WriteOptions) (string, *WriteMeta, error) {
|
||||||
|
r := a.c.newRequest("PUT", "/v1/acl/clone/"+id)
|
||||||
|
r.setWriteOptions(q)
|
||||||
|
rtt, resp, err := requireOK(a.c.doRequest(r))
|
||||||
|
if err != nil {
|
||||||
|
return "", nil, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
wm := &WriteMeta{RequestTime: rtt}
|
||||||
|
var out struct{ ID string }
|
||||||
|
if err := decodeBody(resp, &out); err != nil {
|
||||||
|
return "", nil, err
|
||||||
|
}
|
||||||
|
return out.ID, wm, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Info is used to query for information about an ACL token
|
||||||
|
func (a *ACL) Info(id string, q *QueryOptions) (*ACLEntry, *QueryMeta, error) {
|
||||||
|
r := a.c.newRequest("GET", "/v1/acl/info/"+id)
|
||||||
|
r.setQueryOptions(q)
|
||||||
|
rtt, resp, err := requireOK(a.c.doRequest(r))
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
qm := &QueryMeta{}
|
||||||
|
parseQueryMeta(resp, qm)
|
||||||
|
qm.RequestTime = rtt
|
||||||
|
|
||||||
|
var entries []*ACLEntry
|
||||||
|
if err := decodeBody(resp, &entries); err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
if len(entries) > 0 {
|
||||||
|
return entries[0], qm, nil
|
||||||
|
}
|
||||||
|
return nil, qm, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// List is used to get all the ACL tokens
|
||||||
|
func (a *ACL) List(q *QueryOptions) ([]*ACLEntry, *QueryMeta, error) {
|
||||||
|
r := a.c.newRequest("GET", "/v1/acl/list")
|
||||||
|
r.setQueryOptions(q)
|
||||||
|
rtt, resp, err := requireOK(a.c.doRequest(r))
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
qm := &QueryMeta{}
|
||||||
|
parseQueryMeta(resp, qm)
|
||||||
|
qm.RequestTime = rtt
|
||||||
|
|
||||||
|
var entries []*ACLEntry
|
||||||
|
if err := decodeBody(resp, &entries); err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
return entries, qm, nil
|
||||||
|
}
|
272
vendor/github.com/armon/consul-api/agent.go
generated
vendored
Normal file
272
vendor/github.com/armon/consul-api/agent.go
generated
vendored
Normal file
@ -0,0 +1,272 @@
|
|||||||
|
package consulapi
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
)
|
||||||
|
|
||||||
|
// AgentCheck represents a check known to the agent
|
||||||
|
type AgentCheck struct {
|
||||||
|
Node string
|
||||||
|
CheckID string
|
||||||
|
Name string
|
||||||
|
Status string
|
||||||
|
Notes string
|
||||||
|
Output string
|
||||||
|
ServiceID string
|
||||||
|
ServiceName string
|
||||||
|
}
|
||||||
|
|
||||||
|
// AgentService represents a service known to the agent
|
||||||
|
type AgentService struct {
|
||||||
|
ID string
|
||||||
|
Service string
|
||||||
|
Tags []string
|
||||||
|
Port int
|
||||||
|
}
|
||||||
|
|
||||||
|
// AgentMember represents a cluster member known to the agent
|
||||||
|
type AgentMember struct {
|
||||||
|
Name string
|
||||||
|
Addr string
|
||||||
|
Port uint16
|
||||||
|
Tags map[string]string
|
||||||
|
Status int
|
||||||
|
ProtocolMin uint8
|
||||||
|
ProtocolMax uint8
|
||||||
|
ProtocolCur uint8
|
||||||
|
DelegateMin uint8
|
||||||
|
DelegateMax uint8
|
||||||
|
DelegateCur uint8
|
||||||
|
}
|
||||||
|
|
||||||
|
// AgentServiceRegistration is used to register a new service
|
||||||
|
type AgentServiceRegistration struct {
|
||||||
|
ID string `json:",omitempty"`
|
||||||
|
Name string `json:",omitempty"`
|
||||||
|
Tags []string `json:",omitempty"`
|
||||||
|
Port int `json:",omitempty"`
|
||||||
|
Check *AgentServiceCheck
|
||||||
|
}
|
||||||
|
|
||||||
|
// AgentCheckRegistration is used to register a new check
|
||||||
|
type AgentCheckRegistration struct {
|
||||||
|
ID string `json:",omitempty"`
|
||||||
|
Name string `json:",omitempty"`
|
||||||
|
Notes string `json:",omitempty"`
|
||||||
|
AgentServiceCheck
|
||||||
|
}
|
||||||
|
|
||||||
|
// AgentServiceCheck is used to create an associated
|
||||||
|
// check for a service
|
||||||
|
type AgentServiceCheck struct {
|
||||||
|
Script string `json:",omitempty"`
|
||||||
|
Interval string `json:",omitempty"`
|
||||||
|
TTL string `json:",omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Agent can be used to query the Agent endpoints
|
||||||
|
type Agent struct {
|
||||||
|
c *Client
|
||||||
|
|
||||||
|
// cache the node name
|
||||||
|
nodeName string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Agent returns a handle to the agent endpoints
|
||||||
|
func (c *Client) Agent() *Agent {
|
||||||
|
return &Agent{c: c}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Self is used to query the agent we are speaking to for
|
||||||
|
// information about itself
|
||||||
|
func (a *Agent) Self() (map[string]map[string]interface{}, error) {
|
||||||
|
r := a.c.newRequest("GET", "/v1/agent/self")
|
||||||
|
_, resp, err := requireOK(a.c.doRequest(r))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
var out map[string]map[string]interface{}
|
||||||
|
if err := decodeBody(resp, &out); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NodeName is used to get the node name of the agent
|
||||||
|
func (a *Agent) NodeName() (string, error) {
|
||||||
|
if a.nodeName != "" {
|
||||||
|
return a.nodeName, nil
|
||||||
|
}
|
||||||
|
info, err := a.Self()
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
name := info["Config"]["NodeName"].(string)
|
||||||
|
a.nodeName = name
|
||||||
|
return name, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Checks returns the locally registered checks
|
||||||
|
func (a *Agent) Checks() (map[string]*AgentCheck, error) {
|
||||||
|
r := a.c.newRequest("GET", "/v1/agent/checks")
|
||||||
|
_, resp, err := requireOK(a.c.doRequest(r))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
var out map[string]*AgentCheck
|
||||||
|
if err := decodeBody(resp, &out); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Services returns the locally registered services
|
||||||
|
func (a *Agent) Services() (map[string]*AgentService, error) {
|
||||||
|
r := a.c.newRequest("GET", "/v1/agent/services")
|
||||||
|
_, resp, err := requireOK(a.c.doRequest(r))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
var out map[string]*AgentService
|
||||||
|
if err := decodeBody(resp, &out); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Members returns the known gossip members. The WAN
|
||||||
|
// flag can be used to query a server for WAN members.
|
||||||
|
func (a *Agent) Members(wan bool) ([]*AgentMember, error) {
|
||||||
|
r := a.c.newRequest("GET", "/v1/agent/members")
|
||||||
|
if wan {
|
||||||
|
r.params.Set("wan", "1")
|
||||||
|
}
|
||||||
|
_, resp, err := requireOK(a.c.doRequest(r))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
var out []*AgentMember
|
||||||
|
if err := decodeBody(resp, &out); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServiceRegister is used to register a new service with
|
||||||
|
// the local agent
|
||||||
|
func (a *Agent) ServiceRegister(service *AgentServiceRegistration) error {
|
||||||
|
r := a.c.newRequest("PUT", "/v1/agent/service/register")
|
||||||
|
r.obj = service
|
||||||
|
_, resp, err := requireOK(a.c.doRequest(r))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
resp.Body.Close()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServiceDeregister is used to deregister a service with
|
||||||
|
// the local agent
|
||||||
|
func (a *Agent) ServiceDeregister(serviceID string) error {
|
||||||
|
r := a.c.newRequest("PUT", "/v1/agent/service/deregister/"+serviceID)
|
||||||
|
_, resp, err := requireOK(a.c.doRequest(r))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
resp.Body.Close()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// PassTTL is used to set a TTL check to the passing state
|
||||||
|
func (a *Agent) PassTTL(checkID, note string) error {
|
||||||
|
return a.UpdateTTL(checkID, note, "pass")
|
||||||
|
}
|
||||||
|
|
||||||
|
// WarnTTL is used to set a TTL check to the warning state
|
||||||
|
func (a *Agent) WarnTTL(checkID, note string) error {
|
||||||
|
return a.UpdateTTL(checkID, note, "warn")
|
||||||
|
}
|
||||||
|
|
||||||
|
// FailTTL is used to set a TTL check to the failing state
|
||||||
|
func (a *Agent) FailTTL(checkID, note string) error {
|
||||||
|
return a.UpdateTTL(checkID, note, "fail")
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateTTL is used to update the TTL of a check
|
||||||
|
func (a *Agent) UpdateTTL(checkID, note, status string) error {
|
||||||
|
switch status {
|
||||||
|
case "pass":
|
||||||
|
case "warn":
|
||||||
|
case "fail":
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("Invalid status: %s", status)
|
||||||
|
}
|
||||||
|
endpoint := fmt.Sprintf("/v1/agent/check/%s/%s", status, checkID)
|
||||||
|
r := a.c.newRequest("PUT", endpoint)
|
||||||
|
r.params.Set("note", note)
|
||||||
|
_, resp, err := requireOK(a.c.doRequest(r))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
resp.Body.Close()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// CheckRegister is used to register a new check with
|
||||||
|
// the local agent
|
||||||
|
func (a *Agent) CheckRegister(check *AgentCheckRegistration) error {
|
||||||
|
r := a.c.newRequest("PUT", "/v1/agent/check/register")
|
||||||
|
r.obj = check
|
||||||
|
_, resp, err := requireOK(a.c.doRequest(r))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
resp.Body.Close()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// CheckDeregister is used to deregister a check with
|
||||||
|
// the local agent
|
||||||
|
func (a *Agent) CheckDeregister(checkID string) error {
|
||||||
|
r := a.c.newRequest("PUT", "/v1/agent/check/deregister/"+checkID)
|
||||||
|
_, resp, err := requireOK(a.c.doRequest(r))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
resp.Body.Close()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Join is used to instruct the agent to attempt a join to
|
||||||
|
// another cluster member
|
||||||
|
func (a *Agent) Join(addr string, wan bool) error {
|
||||||
|
r := a.c.newRequest("PUT", "/v1/agent/join/"+addr)
|
||||||
|
if wan {
|
||||||
|
r.params.Set("wan", "1")
|
||||||
|
}
|
||||||
|
_, resp, err := requireOK(a.c.doRequest(r))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
resp.Body.Close()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ForceLeave is used to have the agent eject a failed node
|
||||||
|
func (a *Agent) ForceLeave(node string) error {
|
||||||
|
r := a.c.newRequest("PUT", "/v1/agent/force-leave/"+node)
|
||||||
|
_, resp, err := requireOK(a.c.doRequest(r))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
resp.Body.Close()
|
||||||
|
return nil
|
||||||
|
}
|
323
vendor/github.com/armon/consul-api/api.go
generated
vendored
Normal file
323
vendor/github.com/armon/consul-api/api.go
generated
vendored
Normal file
@ -0,0 +1,323 @@
|
|||||||
|
package consulapi
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"strconv"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// QueryOptions are used to parameterize a query
|
||||||
|
type QueryOptions struct {
|
||||||
|
// Providing a datacenter overwrites the DC provided
|
||||||
|
// by the Config
|
||||||
|
Datacenter string
|
||||||
|
|
||||||
|
// AllowStale allows any Consul server (non-leader) to service
|
||||||
|
// a read. This allows for lower latency and higher throughput
|
||||||
|
AllowStale bool
|
||||||
|
|
||||||
|
// RequireConsistent forces the read to be fully consistent.
|
||||||
|
// This is more expensive but prevents ever performing a stale
|
||||||
|
// read.
|
||||||
|
RequireConsistent bool
|
||||||
|
|
||||||
|
// WaitIndex is used to enable a blocking query. Waits
|
||||||
|
// until the timeout or the next index is reached
|
||||||
|
WaitIndex uint64
|
||||||
|
|
||||||
|
// WaitTime is used to bound the duration of a wait.
|
||||||
|
// Defaults to that of the Config, but can be overriden.
|
||||||
|
WaitTime time.Duration
|
||||||
|
|
||||||
|
// Token is used to provide a per-request ACL token
|
||||||
|
// which overrides the agent's default token.
|
||||||
|
Token string
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteOptions are used to parameterize a write
|
||||||
|
type WriteOptions struct {
|
||||||
|
// Providing a datacenter overwrites the DC provided
|
||||||
|
// by the Config
|
||||||
|
Datacenter string
|
||||||
|
|
||||||
|
// Token is used to provide a per-request ACL token
|
||||||
|
// which overrides the agent's default token.
|
||||||
|
Token string
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryMeta is used to return meta data about a query
|
||||||
|
type QueryMeta struct {
|
||||||
|
// LastIndex. This can be used as a WaitIndex to perform
|
||||||
|
// a blocking query
|
||||||
|
LastIndex uint64
|
||||||
|
|
||||||
|
// Time of last contact from the leader for the
|
||||||
|
// server servicing the request
|
||||||
|
LastContact time.Duration
|
||||||
|
|
||||||
|
// Is there a known leader
|
||||||
|
KnownLeader bool
|
||||||
|
|
||||||
|
// How long did the request take
|
||||||
|
RequestTime time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteMeta is used to return meta data about a write
|
||||||
|
type WriteMeta struct {
|
||||||
|
// How long did the request take
|
||||||
|
RequestTime time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
// HttpBasicAuth is used to authenticate http client with HTTP Basic Authentication
|
||||||
|
type HttpBasicAuth struct {
|
||||||
|
// Username to use for HTTP Basic Authentication
|
||||||
|
Username string
|
||||||
|
|
||||||
|
// Password to use for HTTP Basic Authentication
|
||||||
|
Password string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Config is used to configure the creation of a client
|
||||||
|
type Config struct {
|
||||||
|
// Address is the address of the Consul server
|
||||||
|
Address string
|
||||||
|
|
||||||
|
// Scheme is the URI scheme for the Consul server
|
||||||
|
Scheme string
|
||||||
|
|
||||||
|
// Datacenter to use. If not provided, the default agent datacenter is used.
|
||||||
|
Datacenter string
|
||||||
|
|
||||||
|
// HttpClient is the client to use. Default will be
|
||||||
|
// used if not provided.
|
||||||
|
HttpClient *http.Client
|
||||||
|
|
||||||
|
// HttpAuth is the auth info to use for http access.
|
||||||
|
HttpAuth *HttpBasicAuth
|
||||||
|
|
||||||
|
// WaitTime limits how long a Watch will block. If not provided,
|
||||||
|
// the agent default values will be used.
|
||||||
|
WaitTime time.Duration
|
||||||
|
|
||||||
|
// Token is used to provide a per-request ACL token
|
||||||
|
// which overrides the agent's default token.
|
||||||
|
Token string
|
||||||
|
}
|
||||||
|
|
||||||
|
// DefaultConfig returns a default configuration for the client
|
||||||
|
func DefaultConfig() *Config {
|
||||||
|
return &Config{
|
||||||
|
Address: "127.0.0.1:8500",
|
||||||
|
Scheme: "http",
|
||||||
|
HttpClient: http.DefaultClient,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Client provides a client to the Consul API
|
||||||
|
type Client struct {
|
||||||
|
config Config
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewClient returns a new client
|
||||||
|
func NewClient(config *Config) (*Client, error) {
|
||||||
|
// bootstrap the config
|
||||||
|
defConfig := DefaultConfig()
|
||||||
|
|
||||||
|
if len(config.Address) == 0 {
|
||||||
|
config.Address = defConfig.Address
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(config.Scheme) == 0 {
|
||||||
|
config.Scheme = defConfig.Scheme
|
||||||
|
}
|
||||||
|
|
||||||
|
if config.HttpClient == nil {
|
||||||
|
config.HttpClient = defConfig.HttpClient
|
||||||
|
}
|
||||||
|
|
||||||
|
client := &Client{
|
||||||
|
config: *config,
|
||||||
|
}
|
||||||
|
return client, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// request is used to help build up a request
|
||||||
|
type request struct {
|
||||||
|
config *Config
|
||||||
|
method string
|
||||||
|
url *url.URL
|
||||||
|
params url.Values
|
||||||
|
body io.Reader
|
||||||
|
obj interface{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// setQueryOptions is used to annotate the request with
|
||||||
|
// additional query options
|
||||||
|
func (r *request) setQueryOptions(q *QueryOptions) {
|
||||||
|
if q == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if q.Datacenter != "" {
|
||||||
|
r.params.Set("dc", q.Datacenter)
|
||||||
|
}
|
||||||
|
if q.AllowStale {
|
||||||
|
r.params.Set("stale", "")
|
||||||
|
}
|
||||||
|
if q.RequireConsistent {
|
||||||
|
r.params.Set("consistent", "")
|
||||||
|
}
|
||||||
|
if q.WaitIndex != 0 {
|
||||||
|
r.params.Set("index", strconv.FormatUint(q.WaitIndex, 10))
|
||||||
|
}
|
||||||
|
if q.WaitTime != 0 {
|
||||||
|
r.params.Set("wait", durToMsec(q.WaitTime))
|
||||||
|
}
|
||||||
|
if q.Token != "" {
|
||||||
|
r.params.Set("token", q.Token)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// durToMsec converts a duration to a millisecond specified string
|
||||||
|
func durToMsec(dur time.Duration) string {
|
||||||
|
return fmt.Sprintf("%dms", dur/time.Millisecond)
|
||||||
|
}
|
||||||
|
|
||||||
|
// setWriteOptions is used to annotate the request with
|
||||||
|
// additional write options
|
||||||
|
func (r *request) setWriteOptions(q *WriteOptions) {
|
||||||
|
if q == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if q.Datacenter != "" {
|
||||||
|
r.params.Set("dc", q.Datacenter)
|
||||||
|
}
|
||||||
|
if q.Token != "" {
|
||||||
|
r.params.Set("token", q.Token)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// toHTTP converts the request to an HTTP request
|
||||||
|
func (r *request) toHTTP() (*http.Request, error) {
|
||||||
|
// Encode the query parameters
|
||||||
|
r.url.RawQuery = r.params.Encode()
|
||||||
|
|
||||||
|
// Get the url sring
|
||||||
|
urlRaw := r.url.String()
|
||||||
|
|
||||||
|
// Check if we should encode the body
|
||||||
|
if r.body == nil && r.obj != nil {
|
||||||
|
if b, err := encodeBody(r.obj); err != nil {
|
||||||
|
return nil, err
|
||||||
|
} else {
|
||||||
|
r.body = b
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create the HTTP request
|
||||||
|
req, err := http.NewRequest(r.method, urlRaw, r.body)
|
||||||
|
|
||||||
|
// Setup auth
|
||||||
|
if err == nil && r.config.HttpAuth != nil {
|
||||||
|
req.SetBasicAuth(r.config.HttpAuth.Username, r.config.HttpAuth.Password)
|
||||||
|
}
|
||||||
|
|
||||||
|
return req, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// newRequest is used to create a new request
|
||||||
|
func (c *Client) newRequest(method, path string) *request {
|
||||||
|
r := &request{
|
||||||
|
config: &c.config,
|
||||||
|
method: method,
|
||||||
|
url: &url.URL{
|
||||||
|
Scheme: c.config.Scheme,
|
||||||
|
Host: c.config.Address,
|
||||||
|
Path: path,
|
||||||
|
},
|
||||||
|
params: make(map[string][]string),
|
||||||
|
}
|
||||||
|
if c.config.Datacenter != "" {
|
||||||
|
r.params.Set("dc", c.config.Datacenter)
|
||||||
|
}
|
||||||
|
if c.config.WaitTime != 0 {
|
||||||
|
r.params.Set("wait", durToMsec(r.config.WaitTime))
|
||||||
|
}
|
||||||
|
if c.config.Token != "" {
|
||||||
|
r.params.Set("token", r.config.Token)
|
||||||
|
}
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
|
// doRequest runs a request with our client
|
||||||
|
func (c *Client) doRequest(r *request) (time.Duration, *http.Response, error) {
|
||||||
|
req, err := r.toHTTP()
|
||||||
|
if err != nil {
|
||||||
|
return 0, nil, err
|
||||||
|
}
|
||||||
|
start := time.Now()
|
||||||
|
resp, err := c.config.HttpClient.Do(req)
|
||||||
|
diff := time.Now().Sub(start)
|
||||||
|
return diff, resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseQueryMeta is used to help parse query meta-data
|
||||||
|
func parseQueryMeta(resp *http.Response, q *QueryMeta) error {
|
||||||
|
header := resp.Header
|
||||||
|
|
||||||
|
// Parse the X-Consul-Index
|
||||||
|
index, err := strconv.ParseUint(header.Get("X-Consul-Index"), 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Failed to parse X-Consul-Index: %v", err)
|
||||||
|
}
|
||||||
|
q.LastIndex = index
|
||||||
|
|
||||||
|
// Parse the X-Consul-LastContact
|
||||||
|
last, err := strconv.ParseUint(header.Get("X-Consul-LastContact"), 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Failed to parse X-Consul-LastContact: %v", err)
|
||||||
|
}
|
||||||
|
q.LastContact = time.Duration(last) * time.Millisecond
|
||||||
|
|
||||||
|
// Parse the X-Consul-KnownLeader
|
||||||
|
switch header.Get("X-Consul-KnownLeader") {
|
||||||
|
case "true":
|
||||||
|
q.KnownLeader = true
|
||||||
|
default:
|
||||||
|
q.KnownLeader = false
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// decodeBody is used to JSON decode a body
|
||||||
|
func decodeBody(resp *http.Response, out interface{}) error {
|
||||||
|
dec := json.NewDecoder(resp.Body)
|
||||||
|
return dec.Decode(out)
|
||||||
|
}
|
||||||
|
|
||||||
|
// encodeBody is used to encode a request body
|
||||||
|
func encodeBody(obj interface{}) (io.Reader, error) {
|
||||||
|
buf := bytes.NewBuffer(nil)
|
||||||
|
enc := json.NewEncoder(buf)
|
||||||
|
if err := enc.Encode(obj); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return buf, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// requireOK is used to wrap doRequest and check for a 200
|
||||||
|
func requireOK(d time.Duration, resp *http.Response, e error) (time.Duration, *http.Response, error) {
|
||||||
|
if e != nil {
|
||||||
|
return d, resp, e
|
||||||
|
}
|
||||||
|
if resp.StatusCode != 200 {
|
||||||
|
var buf bytes.Buffer
|
||||||
|
io.Copy(&buf, resp.Body)
|
||||||
|
return d, resp, fmt.Errorf("Unexpected response code: %d (%s)", resp.StatusCode, buf.Bytes())
|
||||||
|
}
|
||||||
|
return d, resp, e
|
||||||
|
}
|
181
vendor/github.com/armon/consul-api/catalog.go
generated
vendored
Normal file
181
vendor/github.com/armon/consul-api/catalog.go
generated
vendored
Normal file
@ -0,0 +1,181 @@
|
|||||||
|
package consulapi
|
||||||
|
|
||||||
|
type Node struct {
|
||||||
|
Node string
|
||||||
|
Address string
|
||||||
|
}
|
||||||
|
|
||||||
|
type CatalogService struct {
|
||||||
|
Node string
|
||||||
|
Address string
|
||||||
|
ServiceID string
|
||||||
|
ServiceName string
|
||||||
|
ServiceTags []string
|
||||||
|
ServicePort int
|
||||||
|
}
|
||||||
|
|
||||||
|
type CatalogNode struct {
|
||||||
|
Node *Node
|
||||||
|
Services map[string]*AgentService
|
||||||
|
}
|
||||||
|
|
||||||
|
type CatalogRegistration struct {
|
||||||
|
Node string
|
||||||
|
Address string
|
||||||
|
Datacenter string
|
||||||
|
Service *AgentService
|
||||||
|
Check *AgentCheck
|
||||||
|
}
|
||||||
|
|
||||||
|
type CatalogDeregistration struct {
|
||||||
|
Node string
|
||||||
|
Address string
|
||||||
|
Datacenter string
|
||||||
|
ServiceID string
|
||||||
|
CheckID string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Catalog can be used to query the Catalog endpoints
|
||||||
|
type Catalog struct {
|
||||||
|
c *Client
|
||||||
|
}
|
||||||
|
|
||||||
|
// Catalog returns a handle to the catalog endpoints
|
||||||
|
func (c *Client) Catalog() *Catalog {
|
||||||
|
return &Catalog{c}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Catalog) Register(reg *CatalogRegistration, q *WriteOptions) (*WriteMeta, error) {
|
||||||
|
r := c.c.newRequest("PUT", "/v1/catalog/register")
|
||||||
|
r.setWriteOptions(q)
|
||||||
|
r.obj = reg
|
||||||
|
rtt, resp, err := requireOK(c.c.doRequest(r))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
resp.Body.Close()
|
||||||
|
|
||||||
|
wm := &WriteMeta{}
|
||||||
|
wm.RequestTime = rtt
|
||||||
|
|
||||||
|
return wm, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Catalog) Deregister(dereg *CatalogDeregistration, q *WriteOptions) (*WriteMeta, error) {
|
||||||
|
r := c.c.newRequest("PUT", "/v1/catalog/deregister")
|
||||||
|
r.setWriteOptions(q)
|
||||||
|
r.obj = dereg
|
||||||
|
rtt, resp, err := requireOK(c.c.doRequest(r))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
resp.Body.Close()
|
||||||
|
|
||||||
|
wm := &WriteMeta{}
|
||||||
|
wm.RequestTime = rtt
|
||||||
|
|
||||||
|
return wm, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Datacenters is used to query for all the known datacenters
|
||||||
|
func (c *Catalog) Datacenters() ([]string, error) {
|
||||||
|
r := c.c.newRequest("GET", "/v1/catalog/datacenters")
|
||||||
|
_, resp, err := requireOK(c.c.doRequest(r))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
var out []string
|
||||||
|
if err := decodeBody(resp, &out); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Nodes is used to query all the known nodes
|
||||||
|
func (c *Catalog) Nodes(q *QueryOptions) ([]*Node, *QueryMeta, error) {
|
||||||
|
r := c.c.newRequest("GET", "/v1/catalog/nodes")
|
||||||
|
r.setQueryOptions(q)
|
||||||
|
rtt, resp, err := requireOK(c.c.doRequest(r))
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
qm := &QueryMeta{}
|
||||||
|
parseQueryMeta(resp, qm)
|
||||||
|
qm.RequestTime = rtt
|
||||||
|
|
||||||
|
var out []*Node
|
||||||
|
if err := decodeBody(resp, &out); err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
return out, qm, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Services is used to query for all known services
|
||||||
|
func (c *Catalog) Services(q *QueryOptions) (map[string][]string, *QueryMeta, error) {
|
||||||
|
r := c.c.newRequest("GET", "/v1/catalog/services")
|
||||||
|
r.setQueryOptions(q)
|
||||||
|
rtt, resp, err := requireOK(c.c.doRequest(r))
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
qm := &QueryMeta{}
|
||||||
|
parseQueryMeta(resp, qm)
|
||||||
|
qm.RequestTime = rtt
|
||||||
|
|
||||||
|
var out map[string][]string
|
||||||
|
if err := decodeBody(resp, &out); err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
return out, qm, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Service is used to query catalog entries for a given service
|
||||||
|
func (c *Catalog) Service(service, tag string, q *QueryOptions) ([]*CatalogService, *QueryMeta, error) {
|
||||||
|
r := c.c.newRequest("GET", "/v1/catalog/service/"+service)
|
||||||
|
r.setQueryOptions(q)
|
||||||
|
if tag != "" {
|
||||||
|
r.params.Set("tag", tag)
|
||||||
|
}
|
||||||
|
rtt, resp, err := requireOK(c.c.doRequest(r))
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
qm := &QueryMeta{}
|
||||||
|
parseQueryMeta(resp, qm)
|
||||||
|
qm.RequestTime = rtt
|
||||||
|
|
||||||
|
var out []*CatalogService
|
||||||
|
if err := decodeBody(resp, &out); err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
return out, qm, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Node is used to query for service information about a single node
|
||||||
|
func (c *Catalog) Node(node string, q *QueryOptions) (*CatalogNode, *QueryMeta, error) {
|
||||||
|
r := c.c.newRequest("GET", "/v1/catalog/node/"+node)
|
||||||
|
r.setQueryOptions(q)
|
||||||
|
rtt, resp, err := requireOK(c.c.doRequest(r))
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
qm := &QueryMeta{}
|
||||||
|
parseQueryMeta(resp, qm)
|
||||||
|
qm.RequestTime = rtt
|
||||||
|
|
||||||
|
var out *CatalogNode
|
||||||
|
if err := decodeBody(resp, &out); err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
return out, qm, nil
|
||||||
|
}
|
104
vendor/github.com/armon/consul-api/event.go
generated
vendored
Normal file
104
vendor/github.com/armon/consul-api/event.go
generated
vendored
Normal file
@ -0,0 +1,104 @@
|
|||||||
|
package consulapi
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"strconv"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Event can be used to query the Event endpoints
|
||||||
|
type Event struct {
|
||||||
|
c *Client
|
||||||
|
}
|
||||||
|
|
||||||
|
// UserEvent represents an event that was fired by the user
|
||||||
|
type UserEvent struct {
|
||||||
|
ID string
|
||||||
|
Name string
|
||||||
|
Payload []byte
|
||||||
|
NodeFilter string
|
||||||
|
ServiceFilter string
|
||||||
|
TagFilter string
|
||||||
|
Version int
|
||||||
|
LTime uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
// Event returns a handle to the event endpoints
|
||||||
|
func (c *Client) Event() *Event {
|
||||||
|
return &Event{c}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fire is used to fire a new user event. Only the Name, Payload and Filters
|
||||||
|
// are respected. This returns the ID or an associated error. Cross DC requests
|
||||||
|
// are supported.
|
||||||
|
func (e *Event) Fire(params *UserEvent, q *WriteOptions) (string, *WriteMeta, error) {
|
||||||
|
r := e.c.newRequest("PUT", "/v1/event/fire/"+params.Name)
|
||||||
|
r.setWriteOptions(q)
|
||||||
|
if params.NodeFilter != "" {
|
||||||
|
r.params.Set("node", params.NodeFilter)
|
||||||
|
}
|
||||||
|
if params.ServiceFilter != "" {
|
||||||
|
r.params.Set("service", params.ServiceFilter)
|
||||||
|
}
|
||||||
|
if params.TagFilter != "" {
|
||||||
|
r.params.Set("tag", params.TagFilter)
|
||||||
|
}
|
||||||
|
if params.Payload != nil {
|
||||||
|
r.body = bytes.NewReader(params.Payload)
|
||||||
|
}
|
||||||
|
|
||||||
|
rtt, resp, err := requireOK(e.c.doRequest(r))
|
||||||
|
if err != nil {
|
||||||
|
return "", nil, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
wm := &WriteMeta{RequestTime: rtt}
|
||||||
|
var out UserEvent
|
||||||
|
if err := decodeBody(resp, &out); err != nil {
|
||||||
|
return "", nil, err
|
||||||
|
}
|
||||||
|
return out.ID, wm, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// List is used to get the most recent events an agent has received.
|
||||||
|
// This list can be optionally filtered by the name. This endpoint supports
|
||||||
|
// quasi-blocking queries. The index is not monotonic, nor does it provide provide
|
||||||
|
// LastContact or KnownLeader.
|
||||||
|
func (e *Event) List(name string, q *QueryOptions) ([]*UserEvent, *QueryMeta, error) {
|
||||||
|
r := e.c.newRequest("GET", "/v1/event/list")
|
||||||
|
r.setQueryOptions(q)
|
||||||
|
if name != "" {
|
||||||
|
r.params.Set("name", name)
|
||||||
|
}
|
||||||
|
rtt, resp, err := requireOK(e.c.doRequest(r))
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
qm := &QueryMeta{}
|
||||||
|
parseQueryMeta(resp, qm)
|
||||||
|
qm.RequestTime = rtt
|
||||||
|
|
||||||
|
var entries []*UserEvent
|
||||||
|
if err := decodeBody(resp, &entries); err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
return entries, qm, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDToIndex is a bit of a hack. This simulates the index generation to
|
||||||
|
// convert an event ID into a WaitIndex.
|
||||||
|
func (e *Event) IDToIndex(uuid string) uint64 {
|
||||||
|
lower := uuid[0:8] + uuid[9:13] + uuid[14:18]
|
||||||
|
upper := uuid[19:23] + uuid[24:36]
|
||||||
|
lowVal, err := strconv.ParseUint(lower, 16, 64)
|
||||||
|
if err != nil {
|
||||||
|
panic("Failed to convert " + lower)
|
||||||
|
}
|
||||||
|
highVal, err := strconv.ParseUint(upper, 16, 64)
|
||||||
|
if err != nil {
|
||||||
|
panic("Failed to convert " + upper)
|
||||||
|
}
|
||||||
|
return lowVal ^ highVal
|
||||||
|
}
|
136
vendor/github.com/armon/consul-api/health.go
generated
vendored
Normal file
136
vendor/github.com/armon/consul-api/health.go
generated
vendored
Normal file
@ -0,0 +1,136 @@
|
|||||||
|
package consulapi
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
)
|
||||||
|
|
||||||
|
// HealthCheck is used to represent a single check
|
||||||
|
type HealthCheck struct {
|
||||||
|
Node string
|
||||||
|
CheckID string
|
||||||
|
Name string
|
||||||
|
Status string
|
||||||
|
Notes string
|
||||||
|
Output string
|
||||||
|
ServiceID string
|
||||||
|
ServiceName string
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServiceEntry is used for the health service endpoint
|
||||||
|
type ServiceEntry struct {
|
||||||
|
Node *Node
|
||||||
|
Service *AgentService
|
||||||
|
Checks []*HealthCheck
|
||||||
|
}
|
||||||
|
|
||||||
|
// Health can be used to query the Health endpoints
|
||||||
|
type Health struct {
|
||||||
|
c *Client
|
||||||
|
}
|
||||||
|
|
||||||
|
// Health returns a handle to the health endpoints
|
||||||
|
func (c *Client) Health() *Health {
|
||||||
|
return &Health{c}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Node is used to query for checks belonging to a given node
|
||||||
|
func (h *Health) Node(node string, q *QueryOptions) ([]*HealthCheck, *QueryMeta, error) {
|
||||||
|
r := h.c.newRequest("GET", "/v1/health/node/"+node)
|
||||||
|
r.setQueryOptions(q)
|
||||||
|
rtt, resp, err := requireOK(h.c.doRequest(r))
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
qm := &QueryMeta{}
|
||||||
|
parseQueryMeta(resp, qm)
|
||||||
|
qm.RequestTime = rtt
|
||||||
|
|
||||||
|
var out []*HealthCheck
|
||||||
|
if err := decodeBody(resp, &out); err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
return out, qm, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Checks is used to return the checks associated with a service
|
||||||
|
func (h *Health) Checks(service string, q *QueryOptions) ([]*HealthCheck, *QueryMeta, error) {
|
||||||
|
r := h.c.newRequest("GET", "/v1/health/checks/"+service)
|
||||||
|
r.setQueryOptions(q)
|
||||||
|
rtt, resp, err := requireOK(h.c.doRequest(r))
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
qm := &QueryMeta{}
|
||||||
|
parseQueryMeta(resp, qm)
|
||||||
|
qm.RequestTime = rtt
|
||||||
|
|
||||||
|
var out []*HealthCheck
|
||||||
|
if err := decodeBody(resp, &out); err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
return out, qm, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Service is used to query health information along with service info
|
||||||
|
// for a given service. It can optionally do server-side filtering on a tag
|
||||||
|
// or nodes with passing health checks only.
|
||||||
|
func (h *Health) Service(service, tag string, passingOnly bool, q *QueryOptions) ([]*ServiceEntry, *QueryMeta, error) {
|
||||||
|
r := h.c.newRequest("GET", "/v1/health/service/"+service)
|
||||||
|
r.setQueryOptions(q)
|
||||||
|
if tag != "" {
|
||||||
|
r.params.Set("tag", tag)
|
||||||
|
}
|
||||||
|
if passingOnly {
|
||||||
|
r.params.Set("passing", "1")
|
||||||
|
}
|
||||||
|
rtt, resp, err := requireOK(h.c.doRequest(r))
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
qm := &QueryMeta{}
|
||||||
|
parseQueryMeta(resp, qm)
|
||||||
|
qm.RequestTime = rtt
|
||||||
|
|
||||||
|
var out []*ServiceEntry
|
||||||
|
if err := decodeBody(resp, &out); err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
return out, qm, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// State is used to retrieve all the checks in a given state.
|
||||||
|
// The wildcard "any" state can also be used for all checks.
|
||||||
|
func (h *Health) State(state string, q *QueryOptions) ([]*HealthCheck, *QueryMeta, error) {
|
||||||
|
switch state {
|
||||||
|
case "any":
|
||||||
|
case "warning":
|
||||||
|
case "critical":
|
||||||
|
case "passing":
|
||||||
|
case "unknown":
|
||||||
|
default:
|
||||||
|
return nil, nil, fmt.Errorf("Unsupported state: %v", state)
|
||||||
|
}
|
||||||
|
r := h.c.newRequest("GET", "/v1/health/state/"+state)
|
||||||
|
r.setQueryOptions(q)
|
||||||
|
rtt, resp, err := requireOK(h.c.doRequest(r))
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
qm := &QueryMeta{}
|
||||||
|
parseQueryMeta(resp, qm)
|
||||||
|
qm.RequestTime = rtt
|
||||||
|
|
||||||
|
var out []*HealthCheck
|
||||||
|
if err := decodeBody(resp, &out); err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
return out, qm, nil
|
||||||
|
}
|
219
vendor/github.com/armon/consul-api/kv.go
generated
vendored
Normal file
219
vendor/github.com/armon/consul-api/kv.go
generated
vendored
Normal file
@ -0,0 +1,219 @@
|
|||||||
|
package consulapi
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// KVPair is used to represent a single K/V entry
|
||||||
|
type KVPair struct {
|
||||||
|
Key string
|
||||||
|
CreateIndex uint64
|
||||||
|
ModifyIndex uint64
|
||||||
|
LockIndex uint64
|
||||||
|
Flags uint64
|
||||||
|
Value []byte
|
||||||
|
Session string
|
||||||
|
}
|
||||||
|
|
||||||
|
// KVPairs is a list of KVPair objects
|
||||||
|
type KVPairs []*KVPair
|
||||||
|
|
||||||
|
// KV is used to manipulate the K/V API
|
||||||
|
type KV struct {
|
||||||
|
c *Client
|
||||||
|
}
|
||||||
|
|
||||||
|
// KV is used to return a handle to the K/V apis
|
||||||
|
func (c *Client) KV() *KV {
|
||||||
|
return &KV{c}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get is used to lookup a single key
|
||||||
|
func (k *KV) Get(key string, q *QueryOptions) (*KVPair, *QueryMeta, error) {
|
||||||
|
resp, qm, err := k.getInternal(key, nil, q)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
if resp == nil {
|
||||||
|
return nil, qm, nil
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
var entries []*KVPair
|
||||||
|
if err := decodeBody(resp, &entries); err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
if len(entries) > 0 {
|
||||||
|
return entries[0], qm, nil
|
||||||
|
}
|
||||||
|
return nil, qm, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// List is used to lookup all keys under a prefix
|
||||||
|
func (k *KV) List(prefix string, q *QueryOptions) (KVPairs, *QueryMeta, error) {
|
||||||
|
resp, qm, err := k.getInternal(prefix, map[string]string{"recurse": ""}, q)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
if resp == nil {
|
||||||
|
return nil, qm, nil
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
var entries []*KVPair
|
||||||
|
if err := decodeBody(resp, &entries); err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
return entries, qm, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Keys is used to list all the keys under a prefix. Optionally,
|
||||||
|
// a separator can be used to limit the responses.
|
||||||
|
func (k *KV) Keys(prefix, separator string, q *QueryOptions) ([]string, *QueryMeta, error) {
|
||||||
|
params := map[string]string{"keys": ""}
|
||||||
|
if separator != "" {
|
||||||
|
params["separator"] = separator
|
||||||
|
}
|
||||||
|
resp, qm, err := k.getInternal(prefix, params, q)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
if resp == nil {
|
||||||
|
return nil, qm, nil
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
var entries []string
|
||||||
|
if err := decodeBody(resp, &entries); err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
return entries, qm, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (k *KV) getInternal(key string, params map[string]string, q *QueryOptions) (*http.Response, *QueryMeta, error) {
|
||||||
|
r := k.c.newRequest("GET", "/v1/kv/"+key)
|
||||||
|
r.setQueryOptions(q)
|
||||||
|
for param, val := range params {
|
||||||
|
r.params.Set(param, val)
|
||||||
|
}
|
||||||
|
rtt, resp, err := k.c.doRequest(r)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
qm := &QueryMeta{}
|
||||||
|
parseQueryMeta(resp, qm)
|
||||||
|
qm.RequestTime = rtt
|
||||||
|
|
||||||
|
if resp.StatusCode == 404 {
|
||||||
|
resp.Body.Close()
|
||||||
|
return nil, qm, nil
|
||||||
|
} else if resp.StatusCode != 200 {
|
||||||
|
resp.Body.Close()
|
||||||
|
return nil, nil, fmt.Errorf("Unexpected response code: %d", resp.StatusCode)
|
||||||
|
}
|
||||||
|
return resp, qm, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Put is used to write a new value. Only the
|
||||||
|
// Key, Flags and Value is respected.
|
||||||
|
func (k *KV) Put(p *KVPair, q *WriteOptions) (*WriteMeta, error) {
|
||||||
|
params := make(map[string]string, 1)
|
||||||
|
if p.Flags != 0 {
|
||||||
|
params["flags"] = strconv.FormatUint(p.Flags, 10)
|
||||||
|
}
|
||||||
|
_, wm, err := k.put(p.Key, params, p.Value, q)
|
||||||
|
return wm, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// CAS is used for a Check-And-Set operation. The Key,
|
||||||
|
// ModifyIndex, Flags and Value are respected. Returns true
|
||||||
|
// on success or false on failures.
|
||||||
|
func (k *KV) CAS(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) {
|
||||||
|
params := make(map[string]string, 2)
|
||||||
|
if p.Flags != 0 {
|
||||||
|
params["flags"] = strconv.FormatUint(p.Flags, 10)
|
||||||
|
}
|
||||||
|
params["cas"] = strconv.FormatUint(p.ModifyIndex, 10)
|
||||||
|
return k.put(p.Key, params, p.Value, q)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Acquire is used for a lock acquisiiton operation. The Key,
|
||||||
|
// Flags, Value and Session are respected. Returns true
|
||||||
|
// on success or false on failures.
|
||||||
|
func (k *KV) Acquire(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) {
|
||||||
|
params := make(map[string]string, 2)
|
||||||
|
if p.Flags != 0 {
|
||||||
|
params["flags"] = strconv.FormatUint(p.Flags, 10)
|
||||||
|
}
|
||||||
|
params["acquire"] = p.Session
|
||||||
|
return k.put(p.Key, params, p.Value, q)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Release is used for a lock release operation. The Key,
|
||||||
|
// Flags, Value and Session are respected. Returns true
|
||||||
|
// on success or false on failures.
|
||||||
|
func (k *KV) Release(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) {
|
||||||
|
params := make(map[string]string, 2)
|
||||||
|
if p.Flags != 0 {
|
||||||
|
params["flags"] = strconv.FormatUint(p.Flags, 10)
|
||||||
|
}
|
||||||
|
params["release"] = p.Session
|
||||||
|
return k.put(p.Key, params, p.Value, q)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (k *KV) put(key string, params map[string]string, body []byte, q *WriteOptions) (bool, *WriteMeta, error) {
|
||||||
|
r := k.c.newRequest("PUT", "/v1/kv/"+key)
|
||||||
|
r.setWriteOptions(q)
|
||||||
|
for param, val := range params {
|
||||||
|
r.params.Set(param, val)
|
||||||
|
}
|
||||||
|
r.body = bytes.NewReader(body)
|
||||||
|
rtt, resp, err := requireOK(k.c.doRequest(r))
|
||||||
|
if err != nil {
|
||||||
|
return false, nil, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
qm := &WriteMeta{}
|
||||||
|
qm.RequestTime = rtt
|
||||||
|
|
||||||
|
var buf bytes.Buffer
|
||||||
|
if _, err := io.Copy(&buf, resp.Body); err != nil {
|
||||||
|
return false, nil, fmt.Errorf("Failed to read response: %v", err)
|
||||||
|
}
|
||||||
|
res := strings.Contains(string(buf.Bytes()), "true")
|
||||||
|
return res, qm, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete is used to delete a single key
|
||||||
|
func (k *KV) Delete(key string, w *WriteOptions) (*WriteMeta, error) {
|
||||||
|
return k.deleteInternal(key, nil, w)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteTree is used to delete all keys under a prefix
|
||||||
|
func (k *KV) DeleteTree(prefix string, w *WriteOptions) (*WriteMeta, error) {
|
||||||
|
return k.deleteInternal(prefix, []string{"recurse"}, w)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (k *KV) deleteInternal(key string, params []string, q *WriteOptions) (*WriteMeta, error) {
|
||||||
|
r := k.c.newRequest("DELETE", "/v1/kv/"+key)
|
||||||
|
r.setWriteOptions(q)
|
||||||
|
for _, param := range params {
|
||||||
|
r.params.Set(param, "")
|
||||||
|
}
|
||||||
|
rtt, resp, err := requireOK(k.c.doRequest(r))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
resp.Body.Close()
|
||||||
|
|
||||||
|
qm := &WriteMeta{}
|
||||||
|
qm.RequestTime = rtt
|
||||||
|
return qm, nil
|
||||||
|
}
|
204
vendor/github.com/armon/consul-api/session.go
generated
vendored
Normal file
204
vendor/github.com/armon/consul-api/session.go
generated
vendored
Normal file
@ -0,0 +1,204 @@
|
|||||||
|
package consulapi
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// SessionEntry represents a session in consul
|
||||||
|
type SessionEntry struct {
|
||||||
|
CreateIndex uint64
|
||||||
|
ID string
|
||||||
|
Name string
|
||||||
|
Node string
|
||||||
|
Checks []string
|
||||||
|
LockDelay time.Duration
|
||||||
|
Behavior string
|
||||||
|
TTL string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Session can be used to query the Session endpoints
|
||||||
|
type Session struct {
|
||||||
|
c *Client
|
||||||
|
}
|
||||||
|
|
||||||
|
// Session returns a handle to the session endpoints
|
||||||
|
func (c *Client) Session() *Session {
|
||||||
|
return &Session{c}
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateNoChecks is like Create but is used specifically to create
|
||||||
|
// a session with no associated health checks.
|
||||||
|
func (s *Session) CreateNoChecks(se *SessionEntry, q *WriteOptions) (string, *WriteMeta, error) {
|
||||||
|
body := make(map[string]interface{})
|
||||||
|
body["Checks"] = []string{}
|
||||||
|
if se != nil {
|
||||||
|
if se.Name != "" {
|
||||||
|
body["Name"] = se.Name
|
||||||
|
}
|
||||||
|
if se.Node != "" {
|
||||||
|
body["Node"] = se.Node
|
||||||
|
}
|
||||||
|
if se.LockDelay != 0 {
|
||||||
|
body["LockDelay"] = durToMsec(se.LockDelay)
|
||||||
|
}
|
||||||
|
if se.Behavior != "" {
|
||||||
|
body["Behavior"] = se.Behavior
|
||||||
|
}
|
||||||
|
if se.TTL != "" {
|
||||||
|
body["TTL"] = se.TTL
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return s.create(body, q)
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create makes a new session. Providing a session entry can
|
||||||
|
// customize the session. It can also be nil to use defaults.
|
||||||
|
func (s *Session) Create(se *SessionEntry, q *WriteOptions) (string, *WriteMeta, error) {
|
||||||
|
var obj interface{}
|
||||||
|
if se != nil {
|
||||||
|
body := make(map[string]interface{})
|
||||||
|
obj = body
|
||||||
|
if se.Name != "" {
|
||||||
|
body["Name"] = se.Name
|
||||||
|
}
|
||||||
|
if se.Node != "" {
|
||||||
|
body["Node"] = se.Node
|
||||||
|
}
|
||||||
|
if se.LockDelay != 0 {
|
||||||
|
body["LockDelay"] = durToMsec(se.LockDelay)
|
||||||
|
}
|
||||||
|
if len(se.Checks) > 0 {
|
||||||
|
body["Checks"] = se.Checks
|
||||||
|
}
|
||||||
|
if se.Behavior != "" {
|
||||||
|
body["Behavior"] = se.Behavior
|
||||||
|
}
|
||||||
|
if se.TTL != "" {
|
||||||
|
body["TTL"] = se.TTL
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return s.create(obj, q)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Session) create(obj interface{}, q *WriteOptions) (string, *WriteMeta, error) {
|
||||||
|
r := s.c.newRequest("PUT", "/v1/session/create")
|
||||||
|
r.setWriteOptions(q)
|
||||||
|
r.obj = obj
|
||||||
|
rtt, resp, err := requireOK(s.c.doRequest(r))
|
||||||
|
if err != nil {
|
||||||
|
return "", nil, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
wm := &WriteMeta{RequestTime: rtt}
|
||||||
|
var out struct{ ID string }
|
||||||
|
if err := decodeBody(resp, &out); err != nil {
|
||||||
|
return "", nil, err
|
||||||
|
}
|
||||||
|
return out.ID, wm, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Destroy invalides a given session
|
||||||
|
func (s *Session) Destroy(id string, q *WriteOptions) (*WriteMeta, error) {
|
||||||
|
r := s.c.newRequest("PUT", "/v1/session/destroy/"+id)
|
||||||
|
r.setWriteOptions(q)
|
||||||
|
rtt, resp, err := requireOK(s.c.doRequest(r))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
resp.Body.Close()
|
||||||
|
|
||||||
|
wm := &WriteMeta{RequestTime: rtt}
|
||||||
|
return wm, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Renew renews the TTL on a given session
|
||||||
|
func (s *Session) Renew(id string, q *WriteOptions) (*SessionEntry, *WriteMeta, error) {
|
||||||
|
r := s.c.newRequest("PUT", "/v1/session/renew/"+id)
|
||||||
|
r.setWriteOptions(q)
|
||||||
|
rtt, resp, err := requireOK(s.c.doRequest(r))
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
wm := &WriteMeta{RequestTime: rtt}
|
||||||
|
|
||||||
|
var entries []*SessionEntry
|
||||||
|
if err := decodeBody(resp, &entries); err != nil {
|
||||||
|
return nil, wm, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(entries) > 0 {
|
||||||
|
return entries[0], wm, nil
|
||||||
|
}
|
||||||
|
return nil, wm, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Info looks up a single session
|
||||||
|
func (s *Session) Info(id string, q *QueryOptions) (*SessionEntry, *QueryMeta, error) {
|
||||||
|
r := s.c.newRequest("GET", "/v1/session/info/"+id)
|
||||||
|
r.setQueryOptions(q)
|
||||||
|
rtt, resp, err := requireOK(s.c.doRequest(r))
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
qm := &QueryMeta{}
|
||||||
|
parseQueryMeta(resp, qm)
|
||||||
|
qm.RequestTime = rtt
|
||||||
|
|
||||||
|
var entries []*SessionEntry
|
||||||
|
if err := decodeBody(resp, &entries); err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(entries) > 0 {
|
||||||
|
return entries[0], qm, nil
|
||||||
|
}
|
||||||
|
return nil, qm, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// List gets sessions for a node
|
||||||
|
func (s *Session) Node(node string, q *QueryOptions) ([]*SessionEntry, *QueryMeta, error) {
|
||||||
|
r := s.c.newRequest("GET", "/v1/session/node/"+node)
|
||||||
|
r.setQueryOptions(q)
|
||||||
|
rtt, resp, err := requireOK(s.c.doRequest(r))
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
qm := &QueryMeta{}
|
||||||
|
parseQueryMeta(resp, qm)
|
||||||
|
qm.RequestTime = rtt
|
||||||
|
|
||||||
|
var entries []*SessionEntry
|
||||||
|
if err := decodeBody(resp, &entries); err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
return entries, qm, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// List gets all active sessions
|
||||||
|
func (s *Session) List(q *QueryOptions) ([]*SessionEntry, *QueryMeta, error) {
|
||||||
|
r := s.c.newRequest("GET", "/v1/session/list")
|
||||||
|
r.setQueryOptions(q)
|
||||||
|
rtt, resp, err := requireOK(s.c.doRequest(r))
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
qm := &QueryMeta{}
|
||||||
|
parseQueryMeta(resp, qm)
|
||||||
|
qm.RequestTime = rtt
|
||||||
|
|
||||||
|
var entries []*SessionEntry
|
||||||
|
if err := decodeBody(resp, &entries); err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
return entries, qm, nil
|
||||||
|
}
|
43
vendor/github.com/armon/consul-api/status.go
generated
vendored
Normal file
43
vendor/github.com/armon/consul-api/status.go
generated
vendored
Normal file
@ -0,0 +1,43 @@
|
|||||||
|
package consulapi
|
||||||
|
|
||||||
|
// Status can be used to query the Status endpoints
|
||||||
|
type Status struct {
|
||||||
|
c *Client
|
||||||
|
}
|
||||||
|
|
||||||
|
// Status returns a handle to the status endpoints
|
||||||
|
func (c *Client) Status() *Status {
|
||||||
|
return &Status{c}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Leader is used to query for a known leader
|
||||||
|
func (s *Status) Leader() (string, error) {
|
||||||
|
r := s.c.newRequest("GET", "/v1/status/leader")
|
||||||
|
_, resp, err := requireOK(s.c.doRequest(r))
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
var leader string
|
||||||
|
if err := decodeBody(resp, &leader); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return leader, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Peers is used to query for a known raft peers
|
||||||
|
func (s *Status) Peers() ([]string, error) {
|
||||||
|
r := s.c.newRequest("GET", "/v1/status/peers")
|
||||||
|
_, resp, err := requireOK(s.c.doRequest(r))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
var peers []string
|
||||||
|
if err := decodeBody(resp, &peers); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return peers, nil
|
||||||
|
}
|
202
vendor/github.com/coreos/etcd/client/LICENSE
generated
vendored
Normal file
202
vendor/github.com/coreos/etcd/client/LICENSE
generated
vendored
Normal file
@ -0,0 +1,202 @@
|
|||||||
|
|
||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
APPENDIX: How to apply the Apache License to your work.
|
||||||
|
|
||||||
|
To apply the Apache License to your work, attach the following
|
||||||
|
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||||
|
replaced with your own identifying information. (Don't include
|
||||||
|
the brackets!) The text should be enclosed in the appropriate
|
||||||
|
comment syntax for the file format. We also recommend that a
|
||||||
|
file or class name and description of purpose be included on the
|
||||||
|
same "printed page" as the copyright notice for easier
|
||||||
|
identification within third-party archives.
|
||||||
|
|
||||||
|
Copyright [yyyy] [name of copyright owner]
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
236
vendor/github.com/coreos/etcd/client/auth_role.go
generated
vendored
Normal file
236
vendor/github.com/coreos/etcd/client/auth_role.go
generated
vendored
Normal file
@ -0,0 +1,236 @@
|
|||||||
|
// Copyright 2015 The etcd Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package client
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Role struct {
|
||||||
|
Role string `json:"role"`
|
||||||
|
Permissions Permissions `json:"permissions"`
|
||||||
|
Grant *Permissions `json:"grant,omitempty"`
|
||||||
|
Revoke *Permissions `json:"revoke,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type Permissions struct {
|
||||||
|
KV rwPermission `json:"kv"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type rwPermission struct {
|
||||||
|
Read []string `json:"read"`
|
||||||
|
Write []string `json:"write"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type PermissionType int
|
||||||
|
|
||||||
|
const (
|
||||||
|
ReadPermission PermissionType = iota
|
||||||
|
WritePermission
|
||||||
|
ReadWritePermission
|
||||||
|
)
|
||||||
|
|
||||||
|
// NewAuthRoleAPI constructs a new AuthRoleAPI that uses HTTP to
|
||||||
|
// interact with etcd's role creation and modification features.
|
||||||
|
func NewAuthRoleAPI(c Client) AuthRoleAPI {
|
||||||
|
return &httpAuthRoleAPI{
|
||||||
|
client: c,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type AuthRoleAPI interface {
|
||||||
|
// AddRole adds a role.
|
||||||
|
AddRole(ctx context.Context, role string) error
|
||||||
|
|
||||||
|
// RemoveRole removes a role.
|
||||||
|
RemoveRole(ctx context.Context, role string) error
|
||||||
|
|
||||||
|
// GetRole retrieves role details.
|
||||||
|
GetRole(ctx context.Context, role string) (*Role, error)
|
||||||
|
|
||||||
|
// GrantRoleKV grants a role some permission prefixes for the KV store.
|
||||||
|
GrantRoleKV(ctx context.Context, role string, prefixes []string, permType PermissionType) (*Role, error)
|
||||||
|
|
||||||
|
// RevokeRoleKV revokes some permission prefixes for a role on the KV store.
|
||||||
|
RevokeRoleKV(ctx context.Context, role string, prefixes []string, permType PermissionType) (*Role, error)
|
||||||
|
|
||||||
|
// ListRoles lists roles.
|
||||||
|
ListRoles(ctx context.Context) ([]string, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
type httpAuthRoleAPI struct {
|
||||||
|
client httpClient
|
||||||
|
}
|
||||||
|
|
||||||
|
type authRoleAPIAction struct {
|
||||||
|
verb string
|
||||||
|
name string
|
||||||
|
role *Role
|
||||||
|
}
|
||||||
|
|
||||||
|
type authRoleAPIList struct{}
|
||||||
|
|
||||||
|
func (list *authRoleAPIList) HTTPRequest(ep url.URL) *http.Request {
|
||||||
|
u := v2AuthURL(ep, "roles", "")
|
||||||
|
req, _ := http.NewRequest("GET", u.String(), nil)
|
||||||
|
req.Header.Set("Content-Type", "application/json")
|
||||||
|
return req
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *authRoleAPIAction) HTTPRequest(ep url.URL) *http.Request {
|
||||||
|
u := v2AuthURL(ep, "roles", l.name)
|
||||||
|
if l.role == nil {
|
||||||
|
req, _ := http.NewRequest(l.verb, u.String(), nil)
|
||||||
|
return req
|
||||||
|
}
|
||||||
|
b, err := json.Marshal(l.role)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
body := bytes.NewReader(b)
|
||||||
|
req, _ := http.NewRequest(l.verb, u.String(), body)
|
||||||
|
req.Header.Set("Content-Type", "application/json")
|
||||||
|
return req
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *httpAuthRoleAPI) ListRoles(ctx context.Context) ([]string, error) {
|
||||||
|
resp, body, err := r.client.Do(ctx, &authRoleAPIList{})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var roleList struct {
|
||||||
|
Roles []Role `json:"roles"`
|
||||||
|
}
|
||||||
|
if err = json.Unmarshal(body, &roleList); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
ret := make([]string, 0, len(roleList.Roles))
|
||||||
|
for _, r := range roleList.Roles {
|
||||||
|
ret = append(ret, r.Role)
|
||||||
|
}
|
||||||
|
return ret, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *httpAuthRoleAPI) AddRole(ctx context.Context, rolename string) error {
|
||||||
|
role := &Role{
|
||||||
|
Role: rolename,
|
||||||
|
}
|
||||||
|
return r.addRemoveRole(ctx, &authRoleAPIAction{
|
||||||
|
verb: "PUT",
|
||||||
|
name: rolename,
|
||||||
|
role: role,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *httpAuthRoleAPI) RemoveRole(ctx context.Context, rolename string) error {
|
||||||
|
return r.addRemoveRole(ctx, &authRoleAPIAction{
|
||||||
|
verb: "DELETE",
|
||||||
|
name: rolename,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *httpAuthRoleAPI) addRemoveRole(ctx context.Context, req *authRoleAPIAction) error {
|
||||||
|
resp, body, err := r.client.Do(ctx, req)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := assertStatusCode(resp.StatusCode, http.StatusOK, http.StatusCreated); err != nil {
|
||||||
|
var sec authError
|
||||||
|
err := json.Unmarshal(body, &sec)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return sec
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *httpAuthRoleAPI) GetRole(ctx context.Context, rolename string) (*Role, error) {
|
||||||
|
return r.modRole(ctx, &authRoleAPIAction{
|
||||||
|
verb: "GET",
|
||||||
|
name: rolename,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func buildRWPermission(prefixes []string, permType PermissionType) rwPermission {
|
||||||
|
var out rwPermission
|
||||||
|
switch permType {
|
||||||
|
case ReadPermission:
|
||||||
|
out.Read = prefixes
|
||||||
|
case WritePermission:
|
||||||
|
out.Write = prefixes
|
||||||
|
case ReadWritePermission:
|
||||||
|
out.Read = prefixes
|
||||||
|
out.Write = prefixes
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *httpAuthRoleAPI) GrantRoleKV(ctx context.Context, rolename string, prefixes []string, permType PermissionType) (*Role, error) {
|
||||||
|
rwp := buildRWPermission(prefixes, permType)
|
||||||
|
role := &Role{
|
||||||
|
Role: rolename,
|
||||||
|
Grant: &Permissions{
|
||||||
|
KV: rwp,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
return r.modRole(ctx, &authRoleAPIAction{
|
||||||
|
verb: "PUT",
|
||||||
|
name: rolename,
|
||||||
|
role: role,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *httpAuthRoleAPI) RevokeRoleKV(ctx context.Context, rolename string, prefixes []string, permType PermissionType) (*Role, error) {
|
||||||
|
rwp := buildRWPermission(prefixes, permType)
|
||||||
|
role := &Role{
|
||||||
|
Role: rolename,
|
||||||
|
Revoke: &Permissions{
|
||||||
|
KV: rwp,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
return r.modRole(ctx, &authRoleAPIAction{
|
||||||
|
verb: "PUT",
|
||||||
|
name: rolename,
|
||||||
|
role: role,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *httpAuthRoleAPI) modRole(ctx context.Context, req *authRoleAPIAction) (*Role, error) {
|
||||||
|
resp, body, err := r.client.Do(ctx, req)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
|
||||||
|
var sec authError
|
||||||
|
err = json.Unmarshal(body, &sec)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return nil, sec
|
||||||
|
}
|
||||||
|
var role Role
|
||||||
|
if err = json.Unmarshal(body, &role); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &role, nil
|
||||||
|
}
|
319
vendor/github.com/coreos/etcd/client/auth_user.go
generated
vendored
Normal file
319
vendor/github.com/coreos/etcd/client/auth_user.go
generated
vendored
Normal file
@ -0,0 +1,319 @@
|
|||||||
|
// Copyright 2015 The etcd Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package client
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"path"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
defaultV2AuthPrefix = "/v2/auth"
|
||||||
|
)
|
||||||
|
|
||||||
|
type User struct {
|
||||||
|
User string `json:"user"`
|
||||||
|
Password string `json:"password,omitempty"`
|
||||||
|
Roles []string `json:"roles"`
|
||||||
|
Grant []string `json:"grant,omitempty"`
|
||||||
|
Revoke []string `json:"revoke,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// userListEntry is the user representation given by the server for ListUsers
|
||||||
|
type userListEntry struct {
|
||||||
|
User string `json:"user"`
|
||||||
|
Roles []Role `json:"roles"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type UserRoles struct {
|
||||||
|
User string `json:"user"`
|
||||||
|
Roles []Role `json:"roles"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func v2AuthURL(ep url.URL, action string, name string) *url.URL {
|
||||||
|
if name != "" {
|
||||||
|
ep.Path = path.Join(ep.Path, defaultV2AuthPrefix, action, name)
|
||||||
|
return &ep
|
||||||
|
}
|
||||||
|
ep.Path = path.Join(ep.Path, defaultV2AuthPrefix, action)
|
||||||
|
return &ep
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewAuthAPI constructs a new AuthAPI that uses HTTP to
|
||||||
|
// interact with etcd's general auth features.
|
||||||
|
func NewAuthAPI(c Client) AuthAPI {
|
||||||
|
return &httpAuthAPI{
|
||||||
|
client: c,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type AuthAPI interface {
|
||||||
|
// Enable auth.
|
||||||
|
Enable(ctx context.Context) error
|
||||||
|
|
||||||
|
// Disable auth.
|
||||||
|
Disable(ctx context.Context) error
|
||||||
|
}
|
||||||
|
|
||||||
|
type httpAuthAPI struct {
|
||||||
|
client httpClient
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *httpAuthAPI) Enable(ctx context.Context) error {
|
||||||
|
return s.enableDisable(ctx, &authAPIAction{"PUT"})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *httpAuthAPI) Disable(ctx context.Context) error {
|
||||||
|
return s.enableDisable(ctx, &authAPIAction{"DELETE"})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *httpAuthAPI) enableDisable(ctx context.Context, req httpAction) error {
|
||||||
|
resp, body, err := s.client.Do(ctx, req)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err = assertStatusCode(resp.StatusCode, http.StatusOK, http.StatusCreated); err != nil {
|
||||||
|
var sec authError
|
||||||
|
err = json.Unmarshal(body, &sec)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return sec
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type authAPIAction struct {
|
||||||
|
verb string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *authAPIAction) HTTPRequest(ep url.URL) *http.Request {
|
||||||
|
u := v2AuthURL(ep, "enable", "")
|
||||||
|
req, _ := http.NewRequest(l.verb, u.String(), nil)
|
||||||
|
return req
|
||||||
|
}
|
||||||
|
|
||||||
|
type authError struct {
|
||||||
|
Message string `json:"message"`
|
||||||
|
Code int `json:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e authError) Error() string {
|
||||||
|
return e.Message
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewAuthUserAPI constructs a new AuthUserAPI that uses HTTP to
|
||||||
|
// interact with etcd's user creation and modification features.
|
||||||
|
func NewAuthUserAPI(c Client) AuthUserAPI {
|
||||||
|
return &httpAuthUserAPI{
|
||||||
|
client: c,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type AuthUserAPI interface {
|
||||||
|
// AddUser adds a user.
|
||||||
|
AddUser(ctx context.Context, username string, password string) error
|
||||||
|
|
||||||
|
// RemoveUser removes a user.
|
||||||
|
RemoveUser(ctx context.Context, username string) error
|
||||||
|
|
||||||
|
// GetUser retrieves user details.
|
||||||
|
GetUser(ctx context.Context, username string) (*User, error)
|
||||||
|
|
||||||
|
// GrantUser grants a user some permission roles.
|
||||||
|
GrantUser(ctx context.Context, username string, roles []string) (*User, error)
|
||||||
|
|
||||||
|
// RevokeUser revokes some permission roles from a user.
|
||||||
|
RevokeUser(ctx context.Context, username string, roles []string) (*User, error)
|
||||||
|
|
||||||
|
// ChangePassword changes the user's password.
|
||||||
|
ChangePassword(ctx context.Context, username string, password string) (*User, error)
|
||||||
|
|
||||||
|
// ListUsers lists the users.
|
||||||
|
ListUsers(ctx context.Context) ([]string, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
type httpAuthUserAPI struct {
|
||||||
|
client httpClient
|
||||||
|
}
|
||||||
|
|
||||||
|
type authUserAPIAction struct {
|
||||||
|
verb string
|
||||||
|
username string
|
||||||
|
user *User
|
||||||
|
}
|
||||||
|
|
||||||
|
type authUserAPIList struct{}
|
||||||
|
|
||||||
|
func (list *authUserAPIList) HTTPRequest(ep url.URL) *http.Request {
|
||||||
|
u := v2AuthURL(ep, "users", "")
|
||||||
|
req, _ := http.NewRequest("GET", u.String(), nil)
|
||||||
|
req.Header.Set("Content-Type", "application/json")
|
||||||
|
return req
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *authUserAPIAction) HTTPRequest(ep url.URL) *http.Request {
|
||||||
|
u := v2AuthURL(ep, "users", l.username)
|
||||||
|
if l.user == nil {
|
||||||
|
req, _ := http.NewRequest(l.verb, u.String(), nil)
|
||||||
|
return req
|
||||||
|
}
|
||||||
|
b, err := json.Marshal(l.user)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
body := bytes.NewReader(b)
|
||||||
|
req, _ := http.NewRequest(l.verb, u.String(), body)
|
||||||
|
req.Header.Set("Content-Type", "application/json")
|
||||||
|
return req
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u *httpAuthUserAPI) ListUsers(ctx context.Context) ([]string, error) {
|
||||||
|
resp, body, err := u.client.Do(ctx, &authUserAPIList{})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
|
||||||
|
var sec authError
|
||||||
|
err = json.Unmarshal(body, &sec)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return nil, sec
|
||||||
|
}
|
||||||
|
|
||||||
|
var userList struct {
|
||||||
|
Users []userListEntry `json:"users"`
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = json.Unmarshal(body, &userList); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
ret := make([]string, 0, len(userList.Users))
|
||||||
|
for _, u := range userList.Users {
|
||||||
|
ret = append(ret, u.User)
|
||||||
|
}
|
||||||
|
return ret, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u *httpAuthUserAPI) AddUser(ctx context.Context, username string, password string) error {
|
||||||
|
user := &User{
|
||||||
|
User: username,
|
||||||
|
Password: password,
|
||||||
|
}
|
||||||
|
return u.addRemoveUser(ctx, &authUserAPIAction{
|
||||||
|
verb: "PUT",
|
||||||
|
username: username,
|
||||||
|
user: user,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u *httpAuthUserAPI) RemoveUser(ctx context.Context, username string) error {
|
||||||
|
return u.addRemoveUser(ctx, &authUserAPIAction{
|
||||||
|
verb: "DELETE",
|
||||||
|
username: username,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u *httpAuthUserAPI) addRemoveUser(ctx context.Context, req *authUserAPIAction) error {
|
||||||
|
resp, body, err := u.client.Do(ctx, req)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err = assertStatusCode(resp.StatusCode, http.StatusOK, http.StatusCreated); err != nil {
|
||||||
|
var sec authError
|
||||||
|
err = json.Unmarshal(body, &sec)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return sec
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u *httpAuthUserAPI) GetUser(ctx context.Context, username string) (*User, error) {
|
||||||
|
return u.modUser(ctx, &authUserAPIAction{
|
||||||
|
verb: "GET",
|
||||||
|
username: username,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u *httpAuthUserAPI) GrantUser(ctx context.Context, username string, roles []string) (*User, error) {
|
||||||
|
user := &User{
|
||||||
|
User: username,
|
||||||
|
Grant: roles,
|
||||||
|
}
|
||||||
|
return u.modUser(ctx, &authUserAPIAction{
|
||||||
|
verb: "PUT",
|
||||||
|
username: username,
|
||||||
|
user: user,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u *httpAuthUserAPI) RevokeUser(ctx context.Context, username string, roles []string) (*User, error) {
|
||||||
|
user := &User{
|
||||||
|
User: username,
|
||||||
|
Revoke: roles,
|
||||||
|
}
|
||||||
|
return u.modUser(ctx, &authUserAPIAction{
|
||||||
|
verb: "PUT",
|
||||||
|
username: username,
|
||||||
|
user: user,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u *httpAuthUserAPI) ChangePassword(ctx context.Context, username string, password string) (*User, error) {
|
||||||
|
user := &User{
|
||||||
|
User: username,
|
||||||
|
Password: password,
|
||||||
|
}
|
||||||
|
return u.modUser(ctx, &authUserAPIAction{
|
||||||
|
verb: "PUT",
|
||||||
|
username: username,
|
||||||
|
user: user,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u *httpAuthUserAPI) modUser(ctx context.Context, req *authUserAPIAction) (*User, error) {
|
||||||
|
resp, body, err := u.client.Do(ctx, req)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
|
||||||
|
var sec authError
|
||||||
|
err = json.Unmarshal(body, &sec)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return nil, sec
|
||||||
|
}
|
||||||
|
var user User
|
||||||
|
if err = json.Unmarshal(body, &user); err != nil {
|
||||||
|
var userR UserRoles
|
||||||
|
if urerr := json.Unmarshal(body, &userR); urerr != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
user.User = userR.User
|
||||||
|
for _, r := range userR.Roles {
|
||||||
|
user.Roles = append(user.Roles, r.Role)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return &user, nil
|
||||||
|
}
|
18
vendor/github.com/coreos/etcd/client/cancelreq.go
generated
vendored
Normal file
18
vendor/github.com/coreos/etcd/client/cancelreq.go
generated
vendored
Normal file
@ -0,0 +1,18 @@
|
|||||||
|
// Copyright 2015 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// borrowed from golang/net/context/ctxhttp/cancelreq.go
|
||||||
|
|
||||||
|
package client
|
||||||
|
|
||||||
|
import "net/http"
|
||||||
|
|
||||||
|
func requestCanceler(tr CancelableTransport, req *http.Request) func() {
|
||||||
|
ch := make(chan struct{})
|
||||||
|
req.Cancel = ch
|
||||||
|
|
||||||
|
return func() {
|
||||||
|
close(ch)
|
||||||
|
}
|
||||||
|
}
|
710
vendor/github.com/coreos/etcd/client/client.go
generated
vendored
Normal file
710
vendor/github.com/coreos/etcd/client/client.go
generated
vendored
Normal file
@ -0,0 +1,710 @@
|
|||||||
|
// Copyright 2015 The etcd Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package client
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"math/rand"
|
||||||
|
"net"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"sort"
|
||||||
|
"strconv"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/coreos/etcd/version"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
ErrNoEndpoints = errors.New("client: no endpoints available")
|
||||||
|
ErrTooManyRedirects = errors.New("client: too many redirects")
|
||||||
|
ErrClusterUnavailable = errors.New("client: etcd cluster is unavailable or misconfigured")
|
||||||
|
ErrNoLeaderEndpoint = errors.New("client: no leader endpoint available")
|
||||||
|
errTooManyRedirectChecks = errors.New("client: too many redirect checks")
|
||||||
|
|
||||||
|
// oneShotCtxValue is set on a context using WithValue(&oneShotValue) so
|
||||||
|
// that Do() will not retry a request
|
||||||
|
oneShotCtxValue interface{}
|
||||||
|
)
|
||||||
|
|
||||||
|
var DefaultRequestTimeout = 5 * time.Second
|
||||||
|
|
||||||
|
var DefaultTransport CancelableTransport = &http.Transport{
|
||||||
|
Proxy: http.ProxyFromEnvironment,
|
||||||
|
Dial: (&net.Dialer{
|
||||||
|
Timeout: 30 * time.Second,
|
||||||
|
KeepAlive: 30 * time.Second,
|
||||||
|
}).Dial,
|
||||||
|
TLSHandshakeTimeout: 10 * time.Second,
|
||||||
|
}
|
||||||
|
|
||||||
|
type EndpointSelectionMode int
|
||||||
|
|
||||||
|
const (
|
||||||
|
// EndpointSelectionRandom is the default value of the 'SelectionMode'.
|
||||||
|
// As the name implies, the client object will pick a node from the members
|
||||||
|
// of the cluster in a random fashion. If the cluster has three members, A, B,
|
||||||
|
// and C, the client picks any node from its three members as its request
|
||||||
|
// destination.
|
||||||
|
EndpointSelectionRandom EndpointSelectionMode = iota
|
||||||
|
|
||||||
|
// If 'SelectionMode' is set to 'EndpointSelectionPrioritizeLeader',
|
||||||
|
// requests are sent directly to the cluster leader. This reduces
|
||||||
|
// forwarding roundtrips compared to making requests to etcd followers
|
||||||
|
// who then forward them to the cluster leader. In the event of a leader
|
||||||
|
// failure, however, clients configured this way cannot prioritize among
|
||||||
|
// the remaining etcd followers. Therefore, when a client sets 'SelectionMode'
|
||||||
|
// to 'EndpointSelectionPrioritizeLeader', it must use 'client.AutoSync()' to
|
||||||
|
// maintain its knowledge of current cluster state.
|
||||||
|
//
|
||||||
|
// This mode should be used with Client.AutoSync().
|
||||||
|
EndpointSelectionPrioritizeLeader
|
||||||
|
)
|
||||||
|
|
||||||
|
type Config struct {
|
||||||
|
// Endpoints defines a set of URLs (schemes, hosts and ports only)
|
||||||
|
// that can be used to communicate with a logical etcd cluster. For
|
||||||
|
// example, a three-node cluster could be provided like so:
|
||||||
|
//
|
||||||
|
// Endpoints: []string{
|
||||||
|
// "http://node1.example.com:2379",
|
||||||
|
// "http://node2.example.com:2379",
|
||||||
|
// "http://node3.example.com:2379",
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// If multiple endpoints are provided, the Client will attempt to
|
||||||
|
// use them all in the event that one or more of them are unusable.
|
||||||
|
//
|
||||||
|
// If Client.Sync is ever called, the Client may cache an alternate
|
||||||
|
// set of endpoints to continue operation.
|
||||||
|
Endpoints []string
|
||||||
|
|
||||||
|
// Transport is used by the Client to drive HTTP requests. If not
|
||||||
|
// provided, DefaultTransport will be used.
|
||||||
|
Transport CancelableTransport
|
||||||
|
|
||||||
|
// CheckRedirect specifies the policy for handling HTTP redirects.
|
||||||
|
// If CheckRedirect is not nil, the Client calls it before
|
||||||
|
// following an HTTP redirect. The sole argument is the number of
|
||||||
|
// requests that have already been made. If CheckRedirect returns
|
||||||
|
// an error, Client.Do will not make any further requests and return
|
||||||
|
// the error back it to the caller.
|
||||||
|
//
|
||||||
|
// If CheckRedirect is nil, the Client uses its default policy,
|
||||||
|
// which is to stop after 10 consecutive requests.
|
||||||
|
CheckRedirect CheckRedirectFunc
|
||||||
|
|
||||||
|
// Username specifies the user credential to add as an authorization header
|
||||||
|
Username string
|
||||||
|
|
||||||
|
// Password is the password for the specified user to add as an authorization header
|
||||||
|
// to the request.
|
||||||
|
Password string
|
||||||
|
|
||||||
|
// HeaderTimeoutPerRequest specifies the time limit to wait for response
|
||||||
|
// header in a single request made by the Client. The timeout includes
|
||||||
|
// connection time, any redirects, and header wait time.
|
||||||
|
//
|
||||||
|
// For non-watch GET request, server returns the response body immediately.
|
||||||
|
// For PUT/POST/DELETE request, server will attempt to commit request
|
||||||
|
// before responding, which is expected to take `100ms + 2 * RTT`.
|
||||||
|
// For watch request, server returns the header immediately to notify Client
|
||||||
|
// watch start. But if server is behind some kind of proxy, the response
|
||||||
|
// header may be cached at proxy, and Client cannot rely on this behavior.
|
||||||
|
//
|
||||||
|
// Especially, wait request will ignore this timeout.
|
||||||
|
//
|
||||||
|
// One API call may send multiple requests to different etcd servers until it
|
||||||
|
// succeeds. Use context of the API to specify the overall timeout.
|
||||||
|
//
|
||||||
|
// A HeaderTimeoutPerRequest of zero means no timeout.
|
||||||
|
HeaderTimeoutPerRequest time.Duration
|
||||||
|
|
||||||
|
// SelectionMode is an EndpointSelectionMode enum that specifies the
|
||||||
|
// policy for choosing the etcd cluster node to which requests are sent.
|
||||||
|
SelectionMode EndpointSelectionMode
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cfg *Config) transport() CancelableTransport {
|
||||||
|
if cfg.Transport == nil {
|
||||||
|
return DefaultTransport
|
||||||
|
}
|
||||||
|
return cfg.Transport
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cfg *Config) checkRedirect() CheckRedirectFunc {
|
||||||
|
if cfg.CheckRedirect == nil {
|
||||||
|
return DefaultCheckRedirect
|
||||||
|
}
|
||||||
|
return cfg.CheckRedirect
|
||||||
|
}
|
||||||
|
|
||||||
|
// CancelableTransport mimics net/http.Transport, but requires that
|
||||||
|
// the object also support request cancellation.
|
||||||
|
type CancelableTransport interface {
|
||||||
|
http.RoundTripper
|
||||||
|
CancelRequest(req *http.Request)
|
||||||
|
}
|
||||||
|
|
||||||
|
type CheckRedirectFunc func(via int) error
|
||||||
|
|
||||||
|
// DefaultCheckRedirect follows up to 10 redirects, but no more.
|
||||||
|
var DefaultCheckRedirect CheckRedirectFunc = func(via int) error {
|
||||||
|
if via > 10 {
|
||||||
|
return ErrTooManyRedirects
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type Client interface {
|
||||||
|
// Sync updates the internal cache of the etcd cluster's membership.
|
||||||
|
Sync(context.Context) error
|
||||||
|
|
||||||
|
// AutoSync periodically calls Sync() every given interval.
|
||||||
|
// The recommended sync interval is 10 seconds to 1 minute, which does
|
||||||
|
// not bring too much overhead to server and makes client catch up the
|
||||||
|
// cluster change in time.
|
||||||
|
//
|
||||||
|
// The example to use it:
|
||||||
|
//
|
||||||
|
// for {
|
||||||
|
// err := client.AutoSync(ctx, 10*time.Second)
|
||||||
|
// if err == context.DeadlineExceeded || err == context.Canceled {
|
||||||
|
// break
|
||||||
|
// }
|
||||||
|
// log.Print(err)
|
||||||
|
// }
|
||||||
|
AutoSync(context.Context, time.Duration) error
|
||||||
|
|
||||||
|
// Endpoints returns a copy of the current set of API endpoints used
|
||||||
|
// by Client to resolve HTTP requests. If Sync has ever been called,
|
||||||
|
// this may differ from the initial Endpoints provided in the Config.
|
||||||
|
Endpoints() []string
|
||||||
|
|
||||||
|
// SetEndpoints sets the set of API endpoints used by Client to resolve
|
||||||
|
// HTTP requests. If the given endpoints are not valid, an error will be
|
||||||
|
// returned
|
||||||
|
SetEndpoints(eps []string) error
|
||||||
|
|
||||||
|
// GetVersion retrieves the current etcd server and cluster version
|
||||||
|
GetVersion(ctx context.Context) (*version.Versions, error)
|
||||||
|
|
||||||
|
httpClient
|
||||||
|
}
|
||||||
|
|
||||||
|
func New(cfg Config) (Client, error) {
|
||||||
|
c := &httpClusterClient{
|
||||||
|
clientFactory: newHTTPClientFactory(cfg.transport(), cfg.checkRedirect(), cfg.HeaderTimeoutPerRequest),
|
||||||
|
rand: rand.New(rand.NewSource(int64(time.Now().Nanosecond()))),
|
||||||
|
selectionMode: cfg.SelectionMode,
|
||||||
|
}
|
||||||
|
if cfg.Username != "" {
|
||||||
|
c.credentials = &credentials{
|
||||||
|
username: cfg.Username,
|
||||||
|
password: cfg.Password,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err := c.SetEndpoints(cfg.Endpoints); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return c, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type httpClient interface {
|
||||||
|
Do(context.Context, httpAction) (*http.Response, []byte, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
func newHTTPClientFactory(tr CancelableTransport, cr CheckRedirectFunc, headerTimeout time.Duration) httpClientFactory {
|
||||||
|
return func(ep url.URL) httpClient {
|
||||||
|
return &redirectFollowingHTTPClient{
|
||||||
|
checkRedirect: cr,
|
||||||
|
client: &simpleHTTPClient{
|
||||||
|
transport: tr,
|
||||||
|
endpoint: ep,
|
||||||
|
headerTimeout: headerTimeout,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type credentials struct {
|
||||||
|
username string
|
||||||
|
password string
|
||||||
|
}
|
||||||
|
|
||||||
|
type httpClientFactory func(url.URL) httpClient
|
||||||
|
|
||||||
|
type httpAction interface {
|
||||||
|
HTTPRequest(url.URL) *http.Request
|
||||||
|
}
|
||||||
|
|
||||||
|
type httpClusterClient struct {
|
||||||
|
clientFactory httpClientFactory
|
||||||
|
endpoints []url.URL
|
||||||
|
pinned int
|
||||||
|
credentials *credentials
|
||||||
|
sync.RWMutex
|
||||||
|
rand *rand.Rand
|
||||||
|
selectionMode EndpointSelectionMode
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *httpClusterClient) getLeaderEndpoint(ctx context.Context, eps []url.URL) (string, error) {
|
||||||
|
ceps := make([]url.URL, len(eps))
|
||||||
|
copy(ceps, eps)
|
||||||
|
|
||||||
|
// To perform a lookup on the new endpoint list without using the current
|
||||||
|
// client, we'll copy it
|
||||||
|
clientCopy := &httpClusterClient{
|
||||||
|
clientFactory: c.clientFactory,
|
||||||
|
credentials: c.credentials,
|
||||||
|
rand: c.rand,
|
||||||
|
|
||||||
|
pinned: 0,
|
||||||
|
endpoints: ceps,
|
||||||
|
}
|
||||||
|
|
||||||
|
mAPI := NewMembersAPI(clientCopy)
|
||||||
|
leader, err := mAPI.Leader(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
if len(leader.ClientURLs) == 0 {
|
||||||
|
return "", ErrNoLeaderEndpoint
|
||||||
|
}
|
||||||
|
|
||||||
|
return leader.ClientURLs[0], nil // TODO: how to handle multiple client URLs?
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *httpClusterClient) parseEndpoints(eps []string) ([]url.URL, error) {
|
||||||
|
if len(eps) == 0 {
|
||||||
|
return []url.URL{}, ErrNoEndpoints
|
||||||
|
}
|
||||||
|
|
||||||
|
neps := make([]url.URL, len(eps))
|
||||||
|
for i, ep := range eps {
|
||||||
|
u, err := url.Parse(ep)
|
||||||
|
if err != nil {
|
||||||
|
return []url.URL{}, err
|
||||||
|
}
|
||||||
|
neps[i] = *u
|
||||||
|
}
|
||||||
|
return neps, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *httpClusterClient) SetEndpoints(eps []string) error {
|
||||||
|
neps, err := c.parseEndpoints(eps)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
c.Lock()
|
||||||
|
defer c.Unlock()
|
||||||
|
|
||||||
|
c.endpoints = shuffleEndpoints(c.rand, neps)
|
||||||
|
// We're not doing anything for PrioritizeLeader here. This is
|
||||||
|
// due to not having a context meaning we can't call getLeaderEndpoint
|
||||||
|
// However, if you're using PrioritizeLeader, you've already been told
|
||||||
|
// to regularly call sync, where we do have a ctx, and can figure the
|
||||||
|
// leader. PrioritizeLeader is also quite a loose guarantee, so deal
|
||||||
|
// with it
|
||||||
|
c.pinned = 0
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *httpClusterClient) Do(ctx context.Context, act httpAction) (*http.Response, []byte, error) {
|
||||||
|
action := act
|
||||||
|
c.RLock()
|
||||||
|
leps := len(c.endpoints)
|
||||||
|
eps := make([]url.URL, leps)
|
||||||
|
n := copy(eps, c.endpoints)
|
||||||
|
pinned := c.pinned
|
||||||
|
|
||||||
|
if c.credentials != nil {
|
||||||
|
action = &authedAction{
|
||||||
|
act: act,
|
||||||
|
credentials: *c.credentials,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
c.RUnlock()
|
||||||
|
|
||||||
|
if leps == 0 {
|
||||||
|
return nil, nil, ErrNoEndpoints
|
||||||
|
}
|
||||||
|
|
||||||
|
if leps != n {
|
||||||
|
return nil, nil, errors.New("unable to pick endpoint: copy failed")
|
||||||
|
}
|
||||||
|
|
||||||
|
var resp *http.Response
|
||||||
|
var body []byte
|
||||||
|
var err error
|
||||||
|
cerr := &ClusterError{}
|
||||||
|
isOneShot := ctx.Value(&oneShotCtxValue) != nil
|
||||||
|
|
||||||
|
for i := pinned; i < leps+pinned; i++ {
|
||||||
|
k := i % leps
|
||||||
|
hc := c.clientFactory(eps[k])
|
||||||
|
resp, body, err = hc.Do(ctx, action)
|
||||||
|
if err != nil {
|
||||||
|
cerr.Errors = append(cerr.Errors, err)
|
||||||
|
if err == ctx.Err() {
|
||||||
|
return nil, nil, ctx.Err()
|
||||||
|
}
|
||||||
|
if err == context.Canceled || err == context.DeadlineExceeded {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
} else if resp.StatusCode/100 == 5 {
|
||||||
|
switch resp.StatusCode {
|
||||||
|
case http.StatusInternalServerError, http.StatusServiceUnavailable:
|
||||||
|
// TODO: make sure this is a no leader response
|
||||||
|
cerr.Errors = append(cerr.Errors, fmt.Errorf("client: etcd member %s has no leader", eps[k].String()))
|
||||||
|
default:
|
||||||
|
cerr.Errors = append(cerr.Errors, fmt.Errorf("client: etcd member %s returns server error [%s]", eps[k].String(), http.StatusText(resp.StatusCode)))
|
||||||
|
}
|
||||||
|
err = cerr.Errors[0]
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
if !isOneShot {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
c.Lock()
|
||||||
|
c.pinned = (k + 1) % leps
|
||||||
|
c.Unlock()
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
if k != pinned {
|
||||||
|
c.Lock()
|
||||||
|
c.pinned = k
|
||||||
|
c.Unlock()
|
||||||
|
}
|
||||||
|
return resp, body, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, nil, cerr
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *httpClusterClient) Endpoints() []string {
|
||||||
|
c.RLock()
|
||||||
|
defer c.RUnlock()
|
||||||
|
|
||||||
|
eps := make([]string, len(c.endpoints))
|
||||||
|
for i, ep := range c.endpoints {
|
||||||
|
eps[i] = ep.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
return eps
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *httpClusterClient) Sync(ctx context.Context) error {
|
||||||
|
mAPI := NewMembersAPI(c)
|
||||||
|
ms, err := mAPI.List(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
var eps []string
|
||||||
|
for _, m := range ms {
|
||||||
|
eps = append(eps, m.ClientURLs...)
|
||||||
|
}
|
||||||
|
|
||||||
|
neps, err := c.parseEndpoints(eps)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
npin := 0
|
||||||
|
|
||||||
|
switch c.selectionMode {
|
||||||
|
case EndpointSelectionRandom:
|
||||||
|
c.RLock()
|
||||||
|
eq := endpointsEqual(c.endpoints, neps)
|
||||||
|
c.RUnlock()
|
||||||
|
|
||||||
|
if eq {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// When items in the endpoint list changes, we choose a new pin
|
||||||
|
neps = shuffleEndpoints(c.rand, neps)
|
||||||
|
case EndpointSelectionPrioritizeLeader:
|
||||||
|
nle, err := c.getLeaderEndpoint(ctx, neps)
|
||||||
|
if err != nil {
|
||||||
|
return ErrNoLeaderEndpoint
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, n := range neps {
|
||||||
|
if n.String() == nle {
|
||||||
|
npin = i
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("invalid endpoint selection mode: %d", c.selectionMode)
|
||||||
|
}
|
||||||
|
|
||||||
|
c.Lock()
|
||||||
|
defer c.Unlock()
|
||||||
|
c.endpoints = neps
|
||||||
|
c.pinned = npin
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *httpClusterClient) AutoSync(ctx context.Context, interval time.Duration) error {
|
||||||
|
ticker := time.NewTicker(interval)
|
||||||
|
defer ticker.Stop()
|
||||||
|
for {
|
||||||
|
err := c.Sync(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return ctx.Err()
|
||||||
|
case <-ticker.C:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *httpClusterClient) GetVersion(ctx context.Context) (*version.Versions, error) {
|
||||||
|
act := &getAction{Prefix: "/version"}
|
||||||
|
|
||||||
|
resp, body, err := c.Do(ctx, act)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
switch resp.StatusCode {
|
||||||
|
case http.StatusOK:
|
||||||
|
if len(body) == 0 {
|
||||||
|
return nil, ErrEmptyBody
|
||||||
|
}
|
||||||
|
var vresp version.Versions
|
||||||
|
if err := json.Unmarshal(body, &vresp); err != nil {
|
||||||
|
return nil, ErrInvalidJSON
|
||||||
|
}
|
||||||
|
return &vresp, nil
|
||||||
|
default:
|
||||||
|
var etcdErr Error
|
||||||
|
if err := json.Unmarshal(body, &etcdErr); err != nil {
|
||||||
|
return nil, ErrInvalidJSON
|
||||||
|
}
|
||||||
|
return nil, etcdErr
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type roundTripResponse struct {
|
||||||
|
resp *http.Response
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
type simpleHTTPClient struct {
|
||||||
|
transport CancelableTransport
|
||||||
|
endpoint url.URL
|
||||||
|
headerTimeout time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *simpleHTTPClient) Do(ctx context.Context, act httpAction) (*http.Response, []byte, error) {
|
||||||
|
req := act.HTTPRequest(c.endpoint)
|
||||||
|
|
||||||
|
if err := printcURL(req); err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
isWait := false
|
||||||
|
if req != nil && req.URL != nil {
|
||||||
|
ws := req.URL.Query().Get("wait")
|
||||||
|
if len(ws) != 0 {
|
||||||
|
var err error
|
||||||
|
isWait, err = strconv.ParseBool(ws)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, fmt.Errorf("wrong wait value %s (%v for %+v)", ws, err, req)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var hctx context.Context
|
||||||
|
var hcancel context.CancelFunc
|
||||||
|
if !isWait && c.headerTimeout > 0 {
|
||||||
|
hctx, hcancel = context.WithTimeout(ctx, c.headerTimeout)
|
||||||
|
} else {
|
||||||
|
hctx, hcancel = context.WithCancel(ctx)
|
||||||
|
}
|
||||||
|
defer hcancel()
|
||||||
|
|
||||||
|
reqcancel := requestCanceler(c.transport, req)
|
||||||
|
|
||||||
|
rtchan := make(chan roundTripResponse, 1)
|
||||||
|
go func() {
|
||||||
|
resp, err := c.transport.RoundTrip(req)
|
||||||
|
rtchan <- roundTripResponse{resp: resp, err: err}
|
||||||
|
close(rtchan)
|
||||||
|
}()
|
||||||
|
|
||||||
|
var resp *http.Response
|
||||||
|
var err error
|
||||||
|
|
||||||
|
select {
|
||||||
|
case rtresp := <-rtchan:
|
||||||
|
resp, err = rtresp.resp, rtresp.err
|
||||||
|
case <-hctx.Done():
|
||||||
|
// cancel and wait for request to actually exit before continuing
|
||||||
|
reqcancel()
|
||||||
|
rtresp := <-rtchan
|
||||||
|
resp = rtresp.resp
|
||||||
|
switch {
|
||||||
|
case ctx.Err() != nil:
|
||||||
|
err = ctx.Err()
|
||||||
|
case hctx.Err() != nil:
|
||||||
|
err = fmt.Errorf("client: endpoint %s exceeded header timeout", c.endpoint.String())
|
||||||
|
default:
|
||||||
|
panic("failed to get error from context")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// always check for resp nil-ness to deal with possible
|
||||||
|
// race conditions between channels above
|
||||||
|
defer func() {
|
||||||
|
if resp != nil {
|
||||||
|
resp.Body.Close()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var body []byte
|
||||||
|
done := make(chan struct{})
|
||||||
|
go func() {
|
||||||
|
body, err = ioutil.ReadAll(resp.Body)
|
||||||
|
done <- struct{}{}
|
||||||
|
}()
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
resp.Body.Close()
|
||||||
|
<-done
|
||||||
|
return nil, nil, ctx.Err()
|
||||||
|
case <-done:
|
||||||
|
}
|
||||||
|
|
||||||
|
return resp, body, err
|
||||||
|
}
|
||||||
|
|
||||||
|
type authedAction struct {
|
||||||
|
act httpAction
|
||||||
|
credentials credentials
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *authedAction) HTTPRequest(url url.URL) *http.Request {
|
||||||
|
r := a.act.HTTPRequest(url)
|
||||||
|
r.SetBasicAuth(a.credentials.username, a.credentials.password)
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
|
type redirectFollowingHTTPClient struct {
|
||||||
|
client httpClient
|
||||||
|
checkRedirect CheckRedirectFunc
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *redirectFollowingHTTPClient) Do(ctx context.Context, act httpAction) (*http.Response, []byte, error) {
|
||||||
|
next := act
|
||||||
|
for i := 0; i < 100; i++ {
|
||||||
|
if i > 0 {
|
||||||
|
if err := r.checkRedirect(i); err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
resp, body, err := r.client.Do(ctx, next)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
if resp.StatusCode/100 == 3 {
|
||||||
|
hdr := resp.Header.Get("Location")
|
||||||
|
if hdr == "" {
|
||||||
|
return nil, nil, fmt.Errorf("Location header not set")
|
||||||
|
}
|
||||||
|
loc, err := url.Parse(hdr)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, fmt.Errorf("Location header not valid URL: %s", hdr)
|
||||||
|
}
|
||||||
|
next = &redirectedHTTPAction{
|
||||||
|
action: act,
|
||||||
|
location: *loc,
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return resp, body, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, nil, errTooManyRedirectChecks
|
||||||
|
}
|
||||||
|
|
||||||
|
type redirectedHTTPAction struct {
|
||||||
|
action httpAction
|
||||||
|
location url.URL
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *redirectedHTTPAction) HTTPRequest(ep url.URL) *http.Request {
|
||||||
|
orig := r.action.HTTPRequest(ep)
|
||||||
|
orig.URL = &r.location
|
||||||
|
return orig
|
||||||
|
}
|
||||||
|
|
||||||
|
func shuffleEndpoints(r *rand.Rand, eps []url.URL) []url.URL {
|
||||||
|
// copied from Go 1.9<= rand.Rand.Perm
|
||||||
|
n := len(eps)
|
||||||
|
p := make([]int, n)
|
||||||
|
for i := 0; i < n; i++ {
|
||||||
|
j := r.Intn(i + 1)
|
||||||
|
p[i] = p[j]
|
||||||
|
p[j] = i
|
||||||
|
}
|
||||||
|
neps := make([]url.URL, n)
|
||||||
|
for i, k := range p {
|
||||||
|
neps[i] = eps[k]
|
||||||
|
}
|
||||||
|
return neps
|
||||||
|
}
|
||||||
|
|
||||||
|
func endpointsEqual(left, right []url.URL) bool {
|
||||||
|
if len(left) != len(right) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
sLeft := make([]string, len(left))
|
||||||
|
sRight := make([]string, len(right))
|
||||||
|
for i, l := range left {
|
||||||
|
sLeft[i] = l.String()
|
||||||
|
}
|
||||||
|
for i, r := range right {
|
||||||
|
sRight[i] = r.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
sort.Strings(sLeft)
|
||||||
|
sort.Strings(sRight)
|
||||||
|
for i := range sLeft {
|
||||||
|
if sLeft[i] != sRight[i] {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
37
vendor/github.com/coreos/etcd/client/cluster_error.go
generated
vendored
Normal file
37
vendor/github.com/coreos/etcd/client/cluster_error.go
generated
vendored
Normal file
@ -0,0 +1,37 @@
|
|||||||
|
// Copyright 2015 The etcd Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package client
|
||||||
|
|
||||||
|
import "fmt"
|
||||||
|
|
||||||
|
type ClusterError struct {
|
||||||
|
Errors []error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ce *ClusterError) Error() string {
|
||||||
|
s := ErrClusterUnavailable.Error()
|
||||||
|
for i, e := range ce.Errors {
|
||||||
|
s += fmt.Sprintf("; error #%d: %s\n", i, e)
|
||||||
|
}
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ce *ClusterError) Detail() string {
|
||||||
|
s := ""
|
||||||
|
for i, e := range ce.Errors {
|
||||||
|
s += fmt.Sprintf("error #%d: %s\n", i, e)
|
||||||
|
}
|
||||||
|
return s
|
||||||
|
}
|
70
vendor/github.com/coreos/etcd/client/curl.go
generated
vendored
Normal file
70
vendor/github.com/coreos/etcd/client/curl.go
generated
vendored
Normal file
@ -0,0 +1,70 @@
|
|||||||
|
// Copyright 2015 The etcd Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package client
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
cURLDebug = false
|
||||||
|
)
|
||||||
|
|
||||||
|
func EnablecURLDebug() {
|
||||||
|
cURLDebug = true
|
||||||
|
}
|
||||||
|
|
||||||
|
func DisablecURLDebug() {
|
||||||
|
cURLDebug = false
|
||||||
|
}
|
||||||
|
|
||||||
|
// printcURL prints the cURL equivalent request to stderr.
|
||||||
|
// It returns an error if the body of the request cannot
|
||||||
|
// be read.
|
||||||
|
// The caller MUST cancel the request if there is an error.
|
||||||
|
func printcURL(req *http.Request) error {
|
||||||
|
if !cURLDebug {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
var (
|
||||||
|
command string
|
||||||
|
b []byte
|
||||||
|
err error
|
||||||
|
)
|
||||||
|
|
||||||
|
if req.URL != nil {
|
||||||
|
command = fmt.Sprintf("curl -X %s %s", req.Method, req.URL.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
if req.Body != nil {
|
||||||
|
b, err = ioutil.ReadAll(req.Body)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
command += fmt.Sprintf(" -d %q", string(b))
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Fprintf(os.Stderr, "cURL Command: %s\n", command)
|
||||||
|
|
||||||
|
// reset body
|
||||||
|
body := bytes.NewBuffer(b)
|
||||||
|
req.Body = ioutil.NopCloser(body)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
40
vendor/github.com/coreos/etcd/client/discover.go
generated
vendored
Normal file
40
vendor/github.com/coreos/etcd/client/discover.go
generated
vendored
Normal file
@ -0,0 +1,40 @@
|
|||||||
|
// Copyright 2015 The etcd Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package client
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/coreos/etcd/pkg/srv"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Discoverer is an interface that wraps the Discover method.
|
||||||
|
type Discoverer interface {
|
||||||
|
// Discover looks up the etcd servers for the domain.
|
||||||
|
Discover(domain string) ([]string, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
type srvDiscover struct{}
|
||||||
|
|
||||||
|
// NewSRVDiscover constructs a new Discoverer that uses the stdlib to lookup SRV records.
|
||||||
|
func NewSRVDiscover() Discoverer {
|
||||||
|
return &srvDiscover{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *srvDiscover) Discover(domain string) ([]string, error) {
|
||||||
|
srvs, err := srv.GetClient("etcd-client", domain)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return srvs.Endpoints, nil
|
||||||
|
}
|
73
vendor/github.com/coreos/etcd/client/doc.go
generated
vendored
Normal file
73
vendor/github.com/coreos/etcd/client/doc.go
generated
vendored
Normal file
@ -0,0 +1,73 @@
|
|||||||
|
// Copyright 2015 The etcd Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
/*
|
||||||
|
Package client provides bindings for the etcd APIs.
|
||||||
|
|
||||||
|
Create a Config and exchange it for a Client:
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/http"
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"github.com/coreos/etcd/client"
|
||||||
|
)
|
||||||
|
|
||||||
|
cfg := client.Config{
|
||||||
|
Endpoints: []string{"http://127.0.0.1:2379"},
|
||||||
|
Transport: DefaultTransport,
|
||||||
|
}
|
||||||
|
|
||||||
|
c, err := client.New(cfg)
|
||||||
|
if err != nil {
|
||||||
|
// handle error
|
||||||
|
}
|
||||||
|
|
||||||
|
Clients are safe for concurrent use by multiple goroutines.
|
||||||
|
|
||||||
|
Create a KeysAPI using the Client, then use it to interact with etcd:
|
||||||
|
|
||||||
|
kAPI := client.NewKeysAPI(c)
|
||||||
|
|
||||||
|
// create a new key /foo with the value "bar"
|
||||||
|
_, err = kAPI.Create(context.Background(), "/foo", "bar")
|
||||||
|
if err != nil {
|
||||||
|
// handle error
|
||||||
|
}
|
||||||
|
|
||||||
|
// delete the newly created key only if the value is still "bar"
|
||||||
|
_, err = kAPI.Delete(context.Background(), "/foo", &DeleteOptions{PrevValue: "bar"})
|
||||||
|
if err != nil {
|
||||||
|
// handle error
|
||||||
|
}
|
||||||
|
|
||||||
|
Use a custom context to set timeouts on your operations:
|
||||||
|
|
||||||
|
import "time"
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
// set a new key, ignoring its previous state
|
||||||
|
_, err := kAPI.Set(ctx, "/ping", "pong", nil)
|
||||||
|
if err != nil {
|
||||||
|
if err == context.DeadlineExceeded {
|
||||||
|
// request took longer than 5s
|
||||||
|
} else {
|
||||||
|
// handle error
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
*/
|
||||||
|
package client
|
17
vendor/github.com/coreos/etcd/client/integration/doc.go
generated
vendored
Normal file
17
vendor/github.com/coreos/etcd/client/integration/doc.go
generated
vendored
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
// Copyright 2016 The etcd Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// Package integration implements tests built upon embedded etcd, focusing on
|
||||||
|
// the correctness of the etcd v2 client.
|
||||||
|
package integration
|
5218
vendor/github.com/coreos/etcd/client/keys.generated.go
generated
vendored
Normal file
5218
vendor/github.com/coreos/etcd/client/keys.generated.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
681
vendor/github.com/coreos/etcd/client/keys.go
generated
vendored
Normal file
681
vendor/github.com/coreos/etcd/client/keys.go
generated
vendored
Normal file
@ -0,0 +1,681 @@
|
|||||||
|
// Copyright 2015 The etcd Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package client
|
||||||
|
|
||||||
|
//go:generate codecgen -d 1819 -r "Node|Response|Nodes" -o keys.generated.go keys.go
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/coreos/etcd/pkg/pathutil"
|
||||||
|
"github.com/ugorji/go/codec"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
ErrorCodeKeyNotFound = 100
|
||||||
|
ErrorCodeTestFailed = 101
|
||||||
|
ErrorCodeNotFile = 102
|
||||||
|
ErrorCodeNotDir = 104
|
||||||
|
ErrorCodeNodeExist = 105
|
||||||
|
ErrorCodeRootROnly = 107
|
||||||
|
ErrorCodeDirNotEmpty = 108
|
||||||
|
ErrorCodeUnauthorized = 110
|
||||||
|
|
||||||
|
ErrorCodePrevValueRequired = 201
|
||||||
|
ErrorCodeTTLNaN = 202
|
||||||
|
ErrorCodeIndexNaN = 203
|
||||||
|
ErrorCodeInvalidField = 209
|
||||||
|
ErrorCodeInvalidForm = 210
|
||||||
|
|
||||||
|
ErrorCodeRaftInternal = 300
|
||||||
|
ErrorCodeLeaderElect = 301
|
||||||
|
|
||||||
|
ErrorCodeWatcherCleared = 400
|
||||||
|
ErrorCodeEventIndexCleared = 401
|
||||||
|
)
|
||||||
|
|
||||||
|
type Error struct {
|
||||||
|
Code int `json:"errorCode"`
|
||||||
|
Message string `json:"message"`
|
||||||
|
Cause string `json:"cause"`
|
||||||
|
Index uint64 `json:"index"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e Error) Error() string {
|
||||||
|
return fmt.Sprintf("%v: %v (%v) [%v]", e.Code, e.Message, e.Cause, e.Index)
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
ErrInvalidJSON = errors.New("client: response is invalid json. The endpoint is probably not valid etcd cluster endpoint.")
|
||||||
|
ErrEmptyBody = errors.New("client: response body is empty")
|
||||||
|
)
|
||||||
|
|
||||||
|
// PrevExistType is used to define an existence condition when setting
|
||||||
|
// or deleting Nodes.
|
||||||
|
type PrevExistType string
|
||||||
|
|
||||||
|
const (
|
||||||
|
PrevIgnore = PrevExistType("")
|
||||||
|
PrevExist = PrevExistType("true")
|
||||||
|
PrevNoExist = PrevExistType("false")
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
defaultV2KeysPrefix = "/v2/keys"
|
||||||
|
)
|
||||||
|
|
||||||
|
// NewKeysAPI builds a KeysAPI that interacts with etcd's key-value
|
||||||
|
// API over HTTP.
|
||||||
|
func NewKeysAPI(c Client) KeysAPI {
|
||||||
|
return NewKeysAPIWithPrefix(c, defaultV2KeysPrefix)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewKeysAPIWithPrefix acts like NewKeysAPI, but allows the caller
|
||||||
|
// to provide a custom base URL path. This should only be used in
|
||||||
|
// very rare cases.
|
||||||
|
func NewKeysAPIWithPrefix(c Client, p string) KeysAPI {
|
||||||
|
return &httpKeysAPI{
|
||||||
|
client: c,
|
||||||
|
prefix: p,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type KeysAPI interface {
|
||||||
|
// Get retrieves a set of Nodes from etcd
|
||||||
|
Get(ctx context.Context, key string, opts *GetOptions) (*Response, error)
|
||||||
|
|
||||||
|
// Set assigns a new value to a Node identified by a given key. The caller
|
||||||
|
// may define a set of conditions in the SetOptions. If SetOptions.Dir=true
|
||||||
|
// then value is ignored.
|
||||||
|
Set(ctx context.Context, key, value string, opts *SetOptions) (*Response, error)
|
||||||
|
|
||||||
|
// Delete removes a Node identified by the given key, optionally destroying
|
||||||
|
// all of its children as well. The caller may define a set of required
|
||||||
|
// conditions in an DeleteOptions object.
|
||||||
|
Delete(ctx context.Context, key string, opts *DeleteOptions) (*Response, error)
|
||||||
|
|
||||||
|
// Create is an alias for Set w/ PrevExist=false
|
||||||
|
Create(ctx context.Context, key, value string) (*Response, error)
|
||||||
|
|
||||||
|
// CreateInOrder is used to atomically create in-order keys within the given directory.
|
||||||
|
CreateInOrder(ctx context.Context, dir, value string, opts *CreateInOrderOptions) (*Response, error)
|
||||||
|
|
||||||
|
// Update is an alias for Set w/ PrevExist=true
|
||||||
|
Update(ctx context.Context, key, value string) (*Response, error)
|
||||||
|
|
||||||
|
// Watcher builds a new Watcher targeted at a specific Node identified
|
||||||
|
// by the given key. The Watcher may be configured at creation time
|
||||||
|
// through a WatcherOptions object. The returned Watcher is designed
|
||||||
|
// to emit events that happen to a Node, and optionally to its children.
|
||||||
|
Watcher(key string, opts *WatcherOptions) Watcher
|
||||||
|
}
|
||||||
|
|
||||||
|
type WatcherOptions struct {
|
||||||
|
// AfterIndex defines the index after-which the Watcher should
|
||||||
|
// start emitting events. For example, if a value of 5 is
|
||||||
|
// provided, the first event will have an index >= 6.
|
||||||
|
//
|
||||||
|
// Setting AfterIndex to 0 (default) means that the Watcher
|
||||||
|
// should start watching for events starting at the current
|
||||||
|
// index, whatever that may be.
|
||||||
|
AfterIndex uint64
|
||||||
|
|
||||||
|
// Recursive specifies whether or not the Watcher should emit
|
||||||
|
// events that occur in children of the given keyspace. If set
|
||||||
|
// to false (default), events will be limited to those that
|
||||||
|
// occur for the exact key.
|
||||||
|
Recursive bool
|
||||||
|
}
|
||||||
|
|
||||||
|
type CreateInOrderOptions struct {
|
||||||
|
// TTL defines a period of time after-which the Node should
|
||||||
|
// expire and no longer exist. Values <= 0 are ignored. Given
|
||||||
|
// that the zero-value is ignored, TTL cannot be used to set
|
||||||
|
// a TTL of 0.
|
||||||
|
TTL time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
type SetOptions struct {
|
||||||
|
// PrevValue specifies what the current value of the Node must
|
||||||
|
// be in order for the Set operation to succeed.
|
||||||
|
//
|
||||||
|
// Leaving this field empty means that the caller wishes to
|
||||||
|
// ignore the current value of the Node. This cannot be used
|
||||||
|
// to compare the Node's current value to an empty string.
|
||||||
|
//
|
||||||
|
// PrevValue is ignored if Dir=true
|
||||||
|
PrevValue string
|
||||||
|
|
||||||
|
// PrevIndex indicates what the current ModifiedIndex of the
|
||||||
|
// Node must be in order for the Set operation to succeed.
|
||||||
|
//
|
||||||
|
// If PrevIndex is set to 0 (default), no comparison is made.
|
||||||
|
PrevIndex uint64
|
||||||
|
|
||||||
|
// PrevExist specifies whether the Node must currently exist
|
||||||
|
// (PrevExist) or not (PrevNoExist). If the caller does not
|
||||||
|
// care about existence, set PrevExist to PrevIgnore, or simply
|
||||||
|
// leave it unset.
|
||||||
|
PrevExist PrevExistType
|
||||||
|
|
||||||
|
// TTL defines a period of time after-which the Node should
|
||||||
|
// expire and no longer exist. Values <= 0 are ignored. Given
|
||||||
|
// that the zero-value is ignored, TTL cannot be used to set
|
||||||
|
// a TTL of 0.
|
||||||
|
TTL time.Duration
|
||||||
|
|
||||||
|
// Refresh set to true means a TTL value can be updated
|
||||||
|
// without firing a watch or changing the node value. A
|
||||||
|
// value must not be provided when refreshing a key.
|
||||||
|
Refresh bool
|
||||||
|
|
||||||
|
// Dir specifies whether or not this Node should be created as a directory.
|
||||||
|
Dir bool
|
||||||
|
|
||||||
|
// NoValueOnSuccess specifies whether the response contains the current value of the Node.
|
||||||
|
// If set, the response will only contain the current value when the request fails.
|
||||||
|
NoValueOnSuccess bool
|
||||||
|
}
|
||||||
|
|
||||||
|
type GetOptions struct {
|
||||||
|
// Recursive defines whether or not all children of the Node
|
||||||
|
// should be returned.
|
||||||
|
Recursive bool
|
||||||
|
|
||||||
|
// Sort instructs the server whether or not to sort the Nodes.
|
||||||
|
// If true, the Nodes are sorted alphabetically by key in
|
||||||
|
// ascending order (A to z). If false (default), the Nodes will
|
||||||
|
// not be sorted and the ordering used should not be considered
|
||||||
|
// predictable.
|
||||||
|
Sort bool
|
||||||
|
|
||||||
|
// Quorum specifies whether it gets the latest committed value that
|
||||||
|
// has been applied in quorum of members, which ensures external
|
||||||
|
// consistency (or linearizability).
|
||||||
|
Quorum bool
|
||||||
|
}
|
||||||
|
|
||||||
|
type DeleteOptions struct {
|
||||||
|
// PrevValue specifies what the current value of the Node must
|
||||||
|
// be in order for the Delete operation to succeed.
|
||||||
|
//
|
||||||
|
// Leaving this field empty means that the caller wishes to
|
||||||
|
// ignore the current value of the Node. This cannot be used
|
||||||
|
// to compare the Node's current value to an empty string.
|
||||||
|
PrevValue string
|
||||||
|
|
||||||
|
// PrevIndex indicates what the current ModifiedIndex of the
|
||||||
|
// Node must be in order for the Delete operation to succeed.
|
||||||
|
//
|
||||||
|
// If PrevIndex is set to 0 (default), no comparison is made.
|
||||||
|
PrevIndex uint64
|
||||||
|
|
||||||
|
// Recursive defines whether or not all children of the Node
|
||||||
|
// should be deleted. If set to true, all children of the Node
|
||||||
|
// identified by the given key will be deleted. If left unset
|
||||||
|
// or explicitly set to false, only a single Node will be
|
||||||
|
// deleted.
|
||||||
|
Recursive bool
|
||||||
|
|
||||||
|
// Dir specifies whether or not this Node should be removed as a directory.
|
||||||
|
Dir bool
|
||||||
|
}
|
||||||
|
|
||||||
|
type Watcher interface {
|
||||||
|
// Next blocks until an etcd event occurs, then returns a Response
|
||||||
|
// representing that event. The behavior of Next depends on the
|
||||||
|
// WatcherOptions used to construct the Watcher. Next is designed to
|
||||||
|
// be called repeatedly, each time blocking until a subsequent event
|
||||||
|
// is available.
|
||||||
|
//
|
||||||
|
// If the provided context is cancelled, Next will return a non-nil
|
||||||
|
// error. Any other failures encountered while waiting for the next
|
||||||
|
// event (connection issues, deserialization failures, etc) will
|
||||||
|
// also result in a non-nil error.
|
||||||
|
Next(context.Context) (*Response, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
type Response struct {
|
||||||
|
// Action is the name of the operation that occurred. Possible values
|
||||||
|
// include get, set, delete, update, create, compareAndSwap,
|
||||||
|
// compareAndDelete and expire.
|
||||||
|
Action string `json:"action"`
|
||||||
|
|
||||||
|
// Node represents the state of the relevant etcd Node.
|
||||||
|
Node *Node `json:"node"`
|
||||||
|
|
||||||
|
// PrevNode represents the previous state of the Node. PrevNode is non-nil
|
||||||
|
// only if the Node existed before the action occurred and the action
|
||||||
|
// caused a change to the Node.
|
||||||
|
PrevNode *Node `json:"prevNode"`
|
||||||
|
|
||||||
|
// Index holds the cluster-level index at the time the Response was generated.
|
||||||
|
// This index is not tied to the Node(s) contained in this Response.
|
||||||
|
Index uint64 `json:"-"`
|
||||||
|
|
||||||
|
// ClusterID holds the cluster-level ID reported by the server. This
|
||||||
|
// should be different for different etcd clusters.
|
||||||
|
ClusterID string `json:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type Node struct {
|
||||||
|
// Key represents the unique location of this Node (e.g. "/foo/bar").
|
||||||
|
Key string `json:"key"`
|
||||||
|
|
||||||
|
// Dir reports whether node describes a directory.
|
||||||
|
Dir bool `json:"dir,omitempty"`
|
||||||
|
|
||||||
|
// Value is the current data stored on this Node. If this Node
|
||||||
|
// is a directory, Value will be empty.
|
||||||
|
Value string `json:"value"`
|
||||||
|
|
||||||
|
// Nodes holds the children of this Node, only if this Node is a directory.
|
||||||
|
// This slice of will be arbitrarily deep (children, grandchildren, great-
|
||||||
|
// grandchildren, etc.) if a recursive Get or Watch request were made.
|
||||||
|
Nodes Nodes `json:"nodes"`
|
||||||
|
|
||||||
|
// CreatedIndex is the etcd index at-which this Node was created.
|
||||||
|
CreatedIndex uint64 `json:"createdIndex"`
|
||||||
|
|
||||||
|
// ModifiedIndex is the etcd index at-which this Node was last modified.
|
||||||
|
ModifiedIndex uint64 `json:"modifiedIndex"`
|
||||||
|
|
||||||
|
// Expiration is the server side expiration time of the key.
|
||||||
|
Expiration *time.Time `json:"expiration,omitempty"`
|
||||||
|
|
||||||
|
// TTL is the time to live of the key in second.
|
||||||
|
TTL int64 `json:"ttl,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *Node) String() string {
|
||||||
|
return fmt.Sprintf("{Key: %s, CreatedIndex: %d, ModifiedIndex: %d, TTL: %d}", n.Key, n.CreatedIndex, n.ModifiedIndex, n.TTL)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TTLDuration returns the Node's TTL as a time.Duration object
|
||||||
|
func (n *Node) TTLDuration() time.Duration {
|
||||||
|
return time.Duration(n.TTL) * time.Second
|
||||||
|
}
|
||||||
|
|
||||||
|
type Nodes []*Node
|
||||||
|
|
||||||
|
// interfaces for sorting
|
||||||
|
|
||||||
|
func (ns Nodes) Len() int { return len(ns) }
|
||||||
|
func (ns Nodes) Less(i, j int) bool { return ns[i].Key < ns[j].Key }
|
||||||
|
func (ns Nodes) Swap(i, j int) { ns[i], ns[j] = ns[j], ns[i] }
|
||||||
|
|
||||||
|
type httpKeysAPI struct {
|
||||||
|
client httpClient
|
||||||
|
prefix string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (k *httpKeysAPI) Set(ctx context.Context, key, val string, opts *SetOptions) (*Response, error) {
|
||||||
|
act := &setAction{
|
||||||
|
Prefix: k.prefix,
|
||||||
|
Key: key,
|
||||||
|
Value: val,
|
||||||
|
}
|
||||||
|
|
||||||
|
if opts != nil {
|
||||||
|
act.PrevValue = opts.PrevValue
|
||||||
|
act.PrevIndex = opts.PrevIndex
|
||||||
|
act.PrevExist = opts.PrevExist
|
||||||
|
act.TTL = opts.TTL
|
||||||
|
act.Refresh = opts.Refresh
|
||||||
|
act.Dir = opts.Dir
|
||||||
|
act.NoValueOnSuccess = opts.NoValueOnSuccess
|
||||||
|
}
|
||||||
|
|
||||||
|
doCtx := ctx
|
||||||
|
if act.PrevExist == PrevNoExist {
|
||||||
|
doCtx = context.WithValue(doCtx, &oneShotCtxValue, &oneShotCtxValue)
|
||||||
|
}
|
||||||
|
resp, body, err := k.client.Do(doCtx, act)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return unmarshalHTTPResponse(resp.StatusCode, resp.Header, body)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (k *httpKeysAPI) Create(ctx context.Context, key, val string) (*Response, error) {
|
||||||
|
return k.Set(ctx, key, val, &SetOptions{PrevExist: PrevNoExist})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (k *httpKeysAPI) CreateInOrder(ctx context.Context, dir, val string, opts *CreateInOrderOptions) (*Response, error) {
|
||||||
|
act := &createInOrderAction{
|
||||||
|
Prefix: k.prefix,
|
||||||
|
Dir: dir,
|
||||||
|
Value: val,
|
||||||
|
}
|
||||||
|
|
||||||
|
if opts != nil {
|
||||||
|
act.TTL = opts.TTL
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, body, err := k.client.Do(ctx, act)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return unmarshalHTTPResponse(resp.StatusCode, resp.Header, body)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (k *httpKeysAPI) Update(ctx context.Context, key, val string) (*Response, error) {
|
||||||
|
return k.Set(ctx, key, val, &SetOptions{PrevExist: PrevExist})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (k *httpKeysAPI) Delete(ctx context.Context, key string, opts *DeleteOptions) (*Response, error) {
|
||||||
|
act := &deleteAction{
|
||||||
|
Prefix: k.prefix,
|
||||||
|
Key: key,
|
||||||
|
}
|
||||||
|
|
||||||
|
if opts != nil {
|
||||||
|
act.PrevValue = opts.PrevValue
|
||||||
|
act.PrevIndex = opts.PrevIndex
|
||||||
|
act.Dir = opts.Dir
|
||||||
|
act.Recursive = opts.Recursive
|
||||||
|
}
|
||||||
|
|
||||||
|
doCtx := context.WithValue(ctx, &oneShotCtxValue, &oneShotCtxValue)
|
||||||
|
resp, body, err := k.client.Do(doCtx, act)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return unmarshalHTTPResponse(resp.StatusCode, resp.Header, body)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (k *httpKeysAPI) Get(ctx context.Context, key string, opts *GetOptions) (*Response, error) {
|
||||||
|
act := &getAction{
|
||||||
|
Prefix: k.prefix,
|
||||||
|
Key: key,
|
||||||
|
}
|
||||||
|
|
||||||
|
if opts != nil {
|
||||||
|
act.Recursive = opts.Recursive
|
||||||
|
act.Sorted = opts.Sort
|
||||||
|
act.Quorum = opts.Quorum
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, body, err := k.client.Do(ctx, act)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return unmarshalHTTPResponse(resp.StatusCode, resp.Header, body)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (k *httpKeysAPI) Watcher(key string, opts *WatcherOptions) Watcher {
|
||||||
|
act := waitAction{
|
||||||
|
Prefix: k.prefix,
|
||||||
|
Key: key,
|
||||||
|
}
|
||||||
|
|
||||||
|
if opts != nil {
|
||||||
|
act.Recursive = opts.Recursive
|
||||||
|
if opts.AfterIndex > 0 {
|
||||||
|
act.WaitIndex = opts.AfterIndex + 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return &httpWatcher{
|
||||||
|
client: k.client,
|
||||||
|
nextWait: act,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type httpWatcher struct {
|
||||||
|
client httpClient
|
||||||
|
nextWait waitAction
|
||||||
|
}
|
||||||
|
|
||||||
|
func (hw *httpWatcher) Next(ctx context.Context) (*Response, error) {
|
||||||
|
for {
|
||||||
|
httpresp, body, err := hw.client.Do(ctx, &hw.nextWait)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := unmarshalHTTPResponse(httpresp.StatusCode, httpresp.Header, body)
|
||||||
|
if err != nil {
|
||||||
|
if err == ErrEmptyBody {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
hw.nextWait.WaitIndex = resp.Node.ModifiedIndex + 1
|
||||||
|
return resp, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// v2KeysURL forms a URL representing the location of a key.
|
||||||
|
// The endpoint argument represents the base URL of an etcd
|
||||||
|
// server. The prefix is the path needed to route from the
|
||||||
|
// provided endpoint's path to the root of the keys API
|
||||||
|
// (typically "/v2/keys").
|
||||||
|
func v2KeysURL(ep url.URL, prefix, key string) *url.URL {
|
||||||
|
// We concatenate all parts together manually. We cannot use
|
||||||
|
// path.Join because it does not reserve trailing slash.
|
||||||
|
// We call CanonicalURLPath to further cleanup the path.
|
||||||
|
if prefix != "" && prefix[0] != '/' {
|
||||||
|
prefix = "/" + prefix
|
||||||
|
}
|
||||||
|
if key != "" && key[0] != '/' {
|
||||||
|
key = "/" + key
|
||||||
|
}
|
||||||
|
ep.Path = pathutil.CanonicalURLPath(ep.Path + prefix + key)
|
||||||
|
return &ep
|
||||||
|
}
|
||||||
|
|
||||||
|
type getAction struct {
|
||||||
|
Prefix string
|
||||||
|
Key string
|
||||||
|
Recursive bool
|
||||||
|
Sorted bool
|
||||||
|
Quorum bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *getAction) HTTPRequest(ep url.URL) *http.Request {
|
||||||
|
u := v2KeysURL(ep, g.Prefix, g.Key)
|
||||||
|
|
||||||
|
params := u.Query()
|
||||||
|
params.Set("recursive", strconv.FormatBool(g.Recursive))
|
||||||
|
params.Set("sorted", strconv.FormatBool(g.Sorted))
|
||||||
|
params.Set("quorum", strconv.FormatBool(g.Quorum))
|
||||||
|
u.RawQuery = params.Encode()
|
||||||
|
|
||||||
|
req, _ := http.NewRequest("GET", u.String(), nil)
|
||||||
|
return req
|
||||||
|
}
|
||||||
|
|
||||||
|
type waitAction struct {
|
||||||
|
Prefix string
|
||||||
|
Key string
|
||||||
|
WaitIndex uint64
|
||||||
|
Recursive bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *waitAction) HTTPRequest(ep url.URL) *http.Request {
|
||||||
|
u := v2KeysURL(ep, w.Prefix, w.Key)
|
||||||
|
|
||||||
|
params := u.Query()
|
||||||
|
params.Set("wait", "true")
|
||||||
|
params.Set("waitIndex", strconv.FormatUint(w.WaitIndex, 10))
|
||||||
|
params.Set("recursive", strconv.FormatBool(w.Recursive))
|
||||||
|
u.RawQuery = params.Encode()
|
||||||
|
|
||||||
|
req, _ := http.NewRequest("GET", u.String(), nil)
|
||||||
|
return req
|
||||||
|
}
|
||||||
|
|
||||||
|
type setAction struct {
|
||||||
|
Prefix string
|
||||||
|
Key string
|
||||||
|
Value string
|
||||||
|
PrevValue string
|
||||||
|
PrevIndex uint64
|
||||||
|
PrevExist PrevExistType
|
||||||
|
TTL time.Duration
|
||||||
|
Refresh bool
|
||||||
|
Dir bool
|
||||||
|
NoValueOnSuccess bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *setAction) HTTPRequest(ep url.URL) *http.Request {
|
||||||
|
u := v2KeysURL(ep, a.Prefix, a.Key)
|
||||||
|
|
||||||
|
params := u.Query()
|
||||||
|
form := url.Values{}
|
||||||
|
|
||||||
|
// we're either creating a directory or setting a key
|
||||||
|
if a.Dir {
|
||||||
|
params.Set("dir", strconv.FormatBool(a.Dir))
|
||||||
|
} else {
|
||||||
|
// These options are only valid for setting a key
|
||||||
|
if a.PrevValue != "" {
|
||||||
|
params.Set("prevValue", a.PrevValue)
|
||||||
|
}
|
||||||
|
form.Add("value", a.Value)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Options which apply to both setting a key and creating a dir
|
||||||
|
if a.PrevIndex != 0 {
|
||||||
|
params.Set("prevIndex", strconv.FormatUint(a.PrevIndex, 10))
|
||||||
|
}
|
||||||
|
if a.PrevExist != PrevIgnore {
|
||||||
|
params.Set("prevExist", string(a.PrevExist))
|
||||||
|
}
|
||||||
|
if a.TTL > 0 {
|
||||||
|
form.Add("ttl", strconv.FormatUint(uint64(a.TTL.Seconds()), 10))
|
||||||
|
}
|
||||||
|
|
||||||
|
if a.Refresh {
|
||||||
|
form.Add("refresh", "true")
|
||||||
|
}
|
||||||
|
if a.NoValueOnSuccess {
|
||||||
|
params.Set("noValueOnSuccess", strconv.FormatBool(a.NoValueOnSuccess))
|
||||||
|
}
|
||||||
|
|
||||||
|
u.RawQuery = params.Encode()
|
||||||
|
body := strings.NewReader(form.Encode())
|
||||||
|
|
||||||
|
req, _ := http.NewRequest("PUT", u.String(), body)
|
||||||
|
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
|
||||||
|
|
||||||
|
return req
|
||||||
|
}
|
||||||
|
|
||||||
|
type deleteAction struct {
|
||||||
|
Prefix string
|
||||||
|
Key string
|
||||||
|
PrevValue string
|
||||||
|
PrevIndex uint64
|
||||||
|
Dir bool
|
||||||
|
Recursive bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *deleteAction) HTTPRequest(ep url.URL) *http.Request {
|
||||||
|
u := v2KeysURL(ep, a.Prefix, a.Key)
|
||||||
|
|
||||||
|
params := u.Query()
|
||||||
|
if a.PrevValue != "" {
|
||||||
|
params.Set("prevValue", a.PrevValue)
|
||||||
|
}
|
||||||
|
if a.PrevIndex != 0 {
|
||||||
|
params.Set("prevIndex", strconv.FormatUint(a.PrevIndex, 10))
|
||||||
|
}
|
||||||
|
if a.Dir {
|
||||||
|
params.Set("dir", "true")
|
||||||
|
}
|
||||||
|
if a.Recursive {
|
||||||
|
params.Set("recursive", "true")
|
||||||
|
}
|
||||||
|
u.RawQuery = params.Encode()
|
||||||
|
|
||||||
|
req, _ := http.NewRequest("DELETE", u.String(), nil)
|
||||||
|
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
|
||||||
|
|
||||||
|
return req
|
||||||
|
}
|
||||||
|
|
||||||
|
type createInOrderAction struct {
|
||||||
|
Prefix string
|
||||||
|
Dir string
|
||||||
|
Value string
|
||||||
|
TTL time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *createInOrderAction) HTTPRequest(ep url.URL) *http.Request {
|
||||||
|
u := v2KeysURL(ep, a.Prefix, a.Dir)
|
||||||
|
|
||||||
|
form := url.Values{}
|
||||||
|
form.Add("value", a.Value)
|
||||||
|
if a.TTL > 0 {
|
||||||
|
form.Add("ttl", strconv.FormatUint(uint64(a.TTL.Seconds()), 10))
|
||||||
|
}
|
||||||
|
body := strings.NewReader(form.Encode())
|
||||||
|
|
||||||
|
req, _ := http.NewRequest("POST", u.String(), body)
|
||||||
|
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
|
||||||
|
return req
|
||||||
|
}
|
||||||
|
|
||||||
|
func unmarshalHTTPResponse(code int, header http.Header, body []byte) (res *Response, err error) {
|
||||||
|
switch code {
|
||||||
|
case http.StatusOK, http.StatusCreated:
|
||||||
|
if len(body) == 0 {
|
||||||
|
return nil, ErrEmptyBody
|
||||||
|
}
|
||||||
|
res, err = unmarshalSuccessfulKeysResponse(header, body)
|
||||||
|
default:
|
||||||
|
err = unmarshalFailedKeysResponse(body)
|
||||||
|
}
|
||||||
|
return res, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func unmarshalSuccessfulKeysResponse(header http.Header, body []byte) (*Response, error) {
|
||||||
|
var res Response
|
||||||
|
err := codec.NewDecoderBytes(body, new(codec.JsonHandle)).Decode(&res)
|
||||||
|
if err != nil {
|
||||||
|
return nil, ErrInvalidJSON
|
||||||
|
}
|
||||||
|
if header.Get("X-Etcd-Index") != "" {
|
||||||
|
res.Index, err = strconv.ParseUint(header.Get("X-Etcd-Index"), 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
res.ClusterID = header.Get("X-Etcd-Cluster-ID")
|
||||||
|
return &res, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func unmarshalFailedKeysResponse(body []byte) error {
|
||||||
|
var etcdErr Error
|
||||||
|
if err := json.Unmarshal(body, &etcdErr); err != nil {
|
||||||
|
return ErrInvalidJSON
|
||||||
|
}
|
||||||
|
return etcdErr
|
||||||
|
}
|
303
vendor/github.com/coreos/etcd/client/members.go
generated
vendored
Normal file
303
vendor/github.com/coreos/etcd/client/members.go
generated
vendored
Normal file
@ -0,0 +1,303 @@
|
|||||||
|
// Copyright 2015 The etcd Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package client
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"path"
|
||||||
|
|
||||||
|
"github.com/coreos/etcd/pkg/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
defaultV2MembersPrefix = "/v2/members"
|
||||||
|
defaultLeaderSuffix = "/leader"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Member struct {
|
||||||
|
// ID is the unique identifier of this Member.
|
||||||
|
ID string `json:"id"`
|
||||||
|
|
||||||
|
// Name is a human-readable, non-unique identifier of this Member.
|
||||||
|
Name string `json:"name"`
|
||||||
|
|
||||||
|
// PeerURLs represents the HTTP(S) endpoints this Member uses to
|
||||||
|
// participate in etcd's consensus protocol.
|
||||||
|
PeerURLs []string `json:"peerURLs"`
|
||||||
|
|
||||||
|
// ClientURLs represents the HTTP(S) endpoints on which this Member
|
||||||
|
// serves its client-facing APIs.
|
||||||
|
ClientURLs []string `json:"clientURLs"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type memberCollection []Member
|
||||||
|
|
||||||
|
func (c *memberCollection) UnmarshalJSON(data []byte) error {
|
||||||
|
d := struct {
|
||||||
|
Members []Member
|
||||||
|
}{}
|
||||||
|
|
||||||
|
if err := json.Unmarshal(data, &d); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if d.Members == nil {
|
||||||
|
*c = make([]Member, 0)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
*c = d.Members
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type memberCreateOrUpdateRequest struct {
|
||||||
|
PeerURLs types.URLs
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *memberCreateOrUpdateRequest) MarshalJSON() ([]byte, error) {
|
||||||
|
s := struct {
|
||||||
|
PeerURLs []string `json:"peerURLs"`
|
||||||
|
}{
|
||||||
|
PeerURLs: make([]string, len(m.PeerURLs)),
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, u := range m.PeerURLs {
|
||||||
|
s.PeerURLs[i] = u.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
return json.Marshal(&s)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewMembersAPI constructs a new MembersAPI that uses HTTP to
|
||||||
|
// interact with etcd's membership API.
|
||||||
|
func NewMembersAPI(c Client) MembersAPI {
|
||||||
|
return &httpMembersAPI{
|
||||||
|
client: c,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type MembersAPI interface {
|
||||||
|
// List enumerates the current cluster membership.
|
||||||
|
List(ctx context.Context) ([]Member, error)
|
||||||
|
|
||||||
|
// Add instructs etcd to accept a new Member into the cluster.
|
||||||
|
Add(ctx context.Context, peerURL string) (*Member, error)
|
||||||
|
|
||||||
|
// Remove demotes an existing Member out of the cluster.
|
||||||
|
Remove(ctx context.Context, mID string) error
|
||||||
|
|
||||||
|
// Update instructs etcd to update an existing Member in the cluster.
|
||||||
|
Update(ctx context.Context, mID string, peerURLs []string) error
|
||||||
|
|
||||||
|
// Leader gets current leader of the cluster
|
||||||
|
Leader(ctx context.Context) (*Member, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
type httpMembersAPI struct {
|
||||||
|
client httpClient
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *httpMembersAPI) List(ctx context.Context) ([]Member, error) {
|
||||||
|
req := &membersAPIActionList{}
|
||||||
|
resp, body, err := m.client.Do(ctx, req)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var mCollection memberCollection
|
||||||
|
if err := json.Unmarshal(body, &mCollection); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return []Member(mCollection), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *httpMembersAPI) Add(ctx context.Context, peerURL string) (*Member, error) {
|
||||||
|
urls, err := types.NewURLs([]string{peerURL})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
req := &membersAPIActionAdd{peerURLs: urls}
|
||||||
|
resp, body, err := m.client.Do(ctx, req)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := assertStatusCode(resp.StatusCode, http.StatusCreated, http.StatusConflict); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if resp.StatusCode != http.StatusCreated {
|
||||||
|
var merr membersError
|
||||||
|
if err := json.Unmarshal(body, &merr); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return nil, merr
|
||||||
|
}
|
||||||
|
|
||||||
|
var memb Member
|
||||||
|
if err := json.Unmarshal(body, &memb); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &memb, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *httpMembersAPI) Update(ctx context.Context, memberID string, peerURLs []string) error {
|
||||||
|
urls, err := types.NewURLs(peerURLs)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
req := &membersAPIActionUpdate{peerURLs: urls, memberID: memberID}
|
||||||
|
resp, body, err := m.client.Do(ctx, req)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := assertStatusCode(resp.StatusCode, http.StatusNoContent, http.StatusNotFound, http.StatusConflict); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if resp.StatusCode != http.StatusNoContent {
|
||||||
|
var merr membersError
|
||||||
|
if err := json.Unmarshal(body, &merr); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return merr
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *httpMembersAPI) Remove(ctx context.Context, memberID string) error {
|
||||||
|
req := &membersAPIActionRemove{memberID: memberID}
|
||||||
|
resp, _, err := m.client.Do(ctx, req)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return assertStatusCode(resp.StatusCode, http.StatusNoContent, http.StatusGone)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *httpMembersAPI) Leader(ctx context.Context) (*Member, error) {
|
||||||
|
req := &membersAPIActionLeader{}
|
||||||
|
resp, body, err := m.client.Do(ctx, req)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var leader Member
|
||||||
|
if err := json.Unmarshal(body, &leader); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &leader, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type membersAPIActionList struct{}
|
||||||
|
|
||||||
|
func (l *membersAPIActionList) HTTPRequest(ep url.URL) *http.Request {
|
||||||
|
u := v2MembersURL(ep)
|
||||||
|
req, _ := http.NewRequest("GET", u.String(), nil)
|
||||||
|
return req
|
||||||
|
}
|
||||||
|
|
||||||
|
type membersAPIActionRemove struct {
|
||||||
|
memberID string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *membersAPIActionRemove) HTTPRequest(ep url.URL) *http.Request {
|
||||||
|
u := v2MembersURL(ep)
|
||||||
|
u.Path = path.Join(u.Path, d.memberID)
|
||||||
|
req, _ := http.NewRequest("DELETE", u.String(), nil)
|
||||||
|
return req
|
||||||
|
}
|
||||||
|
|
||||||
|
type membersAPIActionAdd struct {
|
||||||
|
peerURLs types.URLs
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *membersAPIActionAdd) HTTPRequest(ep url.URL) *http.Request {
|
||||||
|
u := v2MembersURL(ep)
|
||||||
|
m := memberCreateOrUpdateRequest{PeerURLs: a.peerURLs}
|
||||||
|
b, _ := json.Marshal(&m)
|
||||||
|
req, _ := http.NewRequest("POST", u.String(), bytes.NewReader(b))
|
||||||
|
req.Header.Set("Content-Type", "application/json")
|
||||||
|
return req
|
||||||
|
}
|
||||||
|
|
||||||
|
type membersAPIActionUpdate struct {
|
||||||
|
memberID string
|
||||||
|
peerURLs types.URLs
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *membersAPIActionUpdate) HTTPRequest(ep url.URL) *http.Request {
|
||||||
|
u := v2MembersURL(ep)
|
||||||
|
m := memberCreateOrUpdateRequest{PeerURLs: a.peerURLs}
|
||||||
|
u.Path = path.Join(u.Path, a.memberID)
|
||||||
|
b, _ := json.Marshal(&m)
|
||||||
|
req, _ := http.NewRequest("PUT", u.String(), bytes.NewReader(b))
|
||||||
|
req.Header.Set("Content-Type", "application/json")
|
||||||
|
return req
|
||||||
|
}
|
||||||
|
|
||||||
|
func assertStatusCode(got int, want ...int) (err error) {
|
||||||
|
for _, w := range want {
|
||||||
|
if w == got {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return fmt.Errorf("unexpected status code %d", got)
|
||||||
|
}
|
||||||
|
|
||||||
|
type membersAPIActionLeader struct{}
|
||||||
|
|
||||||
|
func (l *membersAPIActionLeader) HTTPRequest(ep url.URL) *http.Request {
|
||||||
|
u := v2MembersURL(ep)
|
||||||
|
u.Path = path.Join(u.Path, defaultLeaderSuffix)
|
||||||
|
req, _ := http.NewRequest("GET", u.String(), nil)
|
||||||
|
return req
|
||||||
|
}
|
||||||
|
|
||||||
|
// v2MembersURL add the necessary path to the provided endpoint
|
||||||
|
// to route requests to the default v2 members API.
|
||||||
|
func v2MembersURL(ep url.URL) *url.URL {
|
||||||
|
ep.Path = path.Join(ep.Path, defaultV2MembersPrefix)
|
||||||
|
return &ep
|
||||||
|
}
|
||||||
|
|
||||||
|
type membersError struct {
|
||||||
|
Message string `json:"message"`
|
||||||
|
Code int `json:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e membersError) Error() string {
|
||||||
|
return e.Message
|
||||||
|
}
|
53
vendor/github.com/coreos/etcd/client/util.go
generated
vendored
Normal file
53
vendor/github.com/coreos/etcd/client/util.go
generated
vendored
Normal file
@ -0,0 +1,53 @@
|
|||||||
|
// Copyright 2016 The etcd Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package client
|
||||||
|
|
||||||
|
import (
|
||||||
|
"regexp"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
roleNotFoundRegExp *regexp.Regexp
|
||||||
|
userNotFoundRegExp *regexp.Regexp
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
roleNotFoundRegExp = regexp.MustCompile("auth: Role .* does not exist.")
|
||||||
|
userNotFoundRegExp = regexp.MustCompile("auth: User .* does not exist.")
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsKeyNotFound returns true if the error code is ErrorCodeKeyNotFound.
|
||||||
|
func IsKeyNotFound(err error) bool {
|
||||||
|
if cErr, ok := err.(Error); ok {
|
||||||
|
return cErr.Code == ErrorCodeKeyNotFound
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsRoleNotFound returns true if the error means role not found of v2 API.
|
||||||
|
func IsRoleNotFound(err error) bool {
|
||||||
|
if ae, ok := err.(authError); ok {
|
||||||
|
return roleNotFoundRegExp.MatchString(ae.Message)
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsUserNotFound returns true if the error means user not found of v2 API.
|
||||||
|
func IsUserNotFound(err error) bool {
|
||||||
|
if ae, ok := err.(authError); ok {
|
||||||
|
return userNotFoundRegExp.MatchString(ae.Message)
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
202
vendor/github.com/coreos/etcd/pkg/pathutil/LICENSE
generated
vendored
Normal file
202
vendor/github.com/coreos/etcd/pkg/pathutil/LICENSE
generated
vendored
Normal file
@ -0,0 +1,202 @@
|
|||||||
|
|
||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
APPENDIX: How to apply the Apache License to your work.
|
||||||
|
|
||||||
|
To apply the Apache License to your work, attach the following
|
||||||
|
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||||
|
replaced with your own identifying information. (Don't include
|
||||||
|
the brackets!) The text should be enclosed in the appropriate
|
||||||
|
comment syntax for the file format. We also recommend that a
|
||||||
|
file or class name and description of purpose be included on the
|
||||||
|
same "printed page" as the copyright notice for easier
|
||||||
|
identification within third-party archives.
|
||||||
|
|
||||||
|
Copyright [yyyy] [name of copyright owner]
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
31
vendor/github.com/coreos/etcd/pkg/pathutil/path.go
generated
vendored
Normal file
31
vendor/github.com/coreos/etcd/pkg/pathutil/path.go
generated
vendored
Normal file
@ -0,0 +1,31 @@
|
|||||||
|
// Copyright 2009 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// Package pathutil implements utility functions for handling slash-separated
|
||||||
|
// paths.
|
||||||
|
package pathutil
|
||||||
|
|
||||||
|
import "path"
|
||||||
|
|
||||||
|
// CanonicalURLPath returns the canonical url path for p, which follows the rules:
|
||||||
|
// 1. the path always starts with "/"
|
||||||
|
// 2. replace multiple slashes with a single slash
|
||||||
|
// 3. replace each '.' '..' path name element with equivalent one
|
||||||
|
// 4. keep the trailing slash
|
||||||
|
// The function is borrowed from stdlib http.cleanPath in server.go.
|
||||||
|
func CanonicalURLPath(p string) string {
|
||||||
|
if p == "" {
|
||||||
|
return "/"
|
||||||
|
}
|
||||||
|
if p[0] != '/' {
|
||||||
|
p = "/" + p
|
||||||
|
}
|
||||||
|
np := path.Clean(p)
|
||||||
|
// path.Clean removes trailing slash except for root,
|
||||||
|
// put the trailing slash back if necessary.
|
||||||
|
if p[len(p)-1] == '/' && np != "/" {
|
||||||
|
np += "/"
|
||||||
|
}
|
||||||
|
return np
|
||||||
|
}
|
202
vendor/github.com/coreos/etcd/pkg/srv/LICENSE
generated
vendored
Normal file
202
vendor/github.com/coreos/etcd/pkg/srv/LICENSE
generated
vendored
Normal file
@ -0,0 +1,202 @@
|
|||||||
|
|
||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
APPENDIX: How to apply the Apache License to your work.
|
||||||
|
|
||||||
|
To apply the Apache License to your work, attach the following
|
||||||
|
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||||
|
replaced with your own identifying information. (Don't include
|
||||||
|
the brackets!) The text should be enclosed in the appropriate
|
||||||
|
comment syntax for the file format. We also recommend that a
|
||||||
|
file or class name and description of purpose be included on the
|
||||||
|
same "printed page" as the copyright notice for easier
|
||||||
|
identification within third-party archives.
|
||||||
|
|
||||||
|
Copyright [yyyy] [name of copyright owner]
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
130
vendor/github.com/coreos/etcd/pkg/srv/srv.go
generated
vendored
Normal file
130
vendor/github.com/coreos/etcd/pkg/srv/srv.go
generated
vendored
Normal file
@ -0,0 +1,130 @@
|
|||||||
|
// Copyright 2015 The etcd Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// Package srv looks up DNS SRV records.
|
||||||
|
package srv
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"net"
|
||||||
|
"net/url"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/coreos/etcd/pkg/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// indirection for testing
|
||||||
|
lookupSRV = net.LookupSRV // net.DefaultResolver.LookupSRV when ctxs don't conflict
|
||||||
|
resolveTCPAddr = net.ResolveTCPAddr
|
||||||
|
)
|
||||||
|
|
||||||
|
// GetCluster gets the cluster information via DNS discovery.
|
||||||
|
// Also sees each entry as a separate instance.
|
||||||
|
func GetCluster(serviceScheme, service, name, dns string, apurls types.URLs) ([]string, error) {
|
||||||
|
tempName := int(0)
|
||||||
|
tcp2ap := make(map[string]url.URL)
|
||||||
|
|
||||||
|
// First, resolve the apurls
|
||||||
|
for _, url := range apurls {
|
||||||
|
tcpAddr, err := resolveTCPAddr("tcp", url.Host)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
tcp2ap[tcpAddr.String()] = url
|
||||||
|
}
|
||||||
|
|
||||||
|
stringParts := []string{}
|
||||||
|
updateNodeMap := func(service, scheme string) error {
|
||||||
|
_, addrs, err := lookupSRV(service, "tcp", dns)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, srv := range addrs {
|
||||||
|
port := fmt.Sprintf("%d", srv.Port)
|
||||||
|
host := net.JoinHostPort(srv.Target, port)
|
||||||
|
tcpAddr, terr := resolveTCPAddr("tcp", host)
|
||||||
|
if terr != nil {
|
||||||
|
err = terr
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
n := ""
|
||||||
|
url, ok := tcp2ap[tcpAddr.String()]
|
||||||
|
if ok {
|
||||||
|
n = name
|
||||||
|
}
|
||||||
|
if n == "" {
|
||||||
|
n = fmt.Sprintf("%d", tempName)
|
||||||
|
tempName++
|
||||||
|
}
|
||||||
|
// SRV records have a trailing dot but URL shouldn't.
|
||||||
|
shortHost := strings.TrimSuffix(srv.Target, ".")
|
||||||
|
urlHost := net.JoinHostPort(shortHost, port)
|
||||||
|
if ok && url.Scheme != scheme {
|
||||||
|
err = fmt.Errorf("bootstrap at %s from DNS for %s has scheme mismatch with expected peer %s", scheme+"://"+urlHost, service, url.String())
|
||||||
|
} else {
|
||||||
|
stringParts = append(stringParts, fmt.Sprintf("%s=%s://%s", n, scheme, urlHost))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(stringParts) == 0 {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
err := updateNodeMap(service, serviceScheme)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error querying DNS SRV records for _%s %s", service, err)
|
||||||
|
}
|
||||||
|
return stringParts, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type SRVClients struct {
|
||||||
|
Endpoints []string
|
||||||
|
SRVs []*net.SRV
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetClient looks up the client endpoints for a service and domain.
|
||||||
|
func GetClient(service, domain string) (*SRVClients, error) {
|
||||||
|
var urls []*url.URL
|
||||||
|
var srvs []*net.SRV
|
||||||
|
|
||||||
|
updateURLs := func(service, scheme string) error {
|
||||||
|
_, addrs, err := lookupSRV(service, "tcp", domain)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, srv := range addrs {
|
||||||
|
urls = append(urls, &url.URL{
|
||||||
|
Scheme: scheme,
|
||||||
|
Host: net.JoinHostPort(srv.Target, fmt.Sprintf("%d", srv.Port)),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
srvs = append(srvs, addrs...)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
errHTTPS := updateURLs(service+"-ssl", "https")
|
||||||
|
errHTTP := updateURLs(service, "http")
|
||||||
|
|
||||||
|
if errHTTPS != nil && errHTTP != nil {
|
||||||
|
return nil, fmt.Errorf("dns lookup errors: %s and %s", errHTTPS, errHTTP)
|
||||||
|
}
|
||||||
|
|
||||||
|
endpoints := make([]string, len(urls))
|
||||||
|
for i := range urls {
|
||||||
|
endpoints[i] = urls[i].String()
|
||||||
|
}
|
||||||
|
return &SRVClients{Endpoints: endpoints, SRVs: srvs}, nil
|
||||||
|
}
|
202
vendor/github.com/coreos/etcd/pkg/types/LICENSE
generated
vendored
Normal file
202
vendor/github.com/coreos/etcd/pkg/types/LICENSE
generated
vendored
Normal file
@ -0,0 +1,202 @@
|
|||||||
|
|
||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
APPENDIX: How to apply the Apache License to your work.
|
||||||
|
|
||||||
|
To apply the Apache License to your work, attach the following
|
||||||
|
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||||
|
replaced with your own identifying information. (Don't include
|
||||||
|
the brackets!) The text should be enclosed in the appropriate
|
||||||
|
comment syntax for the file format. We also recommend that a
|
||||||
|
file or class name and description of purpose be included on the
|
||||||
|
same "printed page" as the copyright notice for easier
|
||||||
|
identification within third-party archives.
|
||||||
|
|
||||||
|
Copyright [yyyy] [name of copyright owner]
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
17
vendor/github.com/coreos/etcd/pkg/types/doc.go
generated
vendored
Normal file
17
vendor/github.com/coreos/etcd/pkg/types/doc.go
generated
vendored
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
// Copyright 2015 The etcd Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// Package types declares various data types and implements type-checking
|
||||||
|
// functions.
|
||||||
|
package types
|
41
vendor/github.com/coreos/etcd/pkg/types/id.go
generated
vendored
Normal file
41
vendor/github.com/coreos/etcd/pkg/types/id.go
generated
vendored
Normal file
@ -0,0 +1,41 @@
|
|||||||
|
// Copyright 2015 The etcd Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package types
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strconv"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ID represents a generic identifier which is canonically
|
||||||
|
// stored as a uint64 but is typically represented as a
|
||||||
|
// base-16 string for input/output
|
||||||
|
type ID uint64
|
||||||
|
|
||||||
|
func (i ID) String() string {
|
||||||
|
return strconv.FormatUint(uint64(i), 16)
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDFromString attempts to create an ID from a base-16 string.
|
||||||
|
func IDFromString(s string) (ID, error) {
|
||||||
|
i, err := strconv.ParseUint(s, 16, 64)
|
||||||
|
return ID(i), err
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDSlice implements the sort interface
|
||||||
|
type IDSlice []ID
|
||||||
|
|
||||||
|
func (p IDSlice) Len() int { return len(p) }
|
||||||
|
func (p IDSlice) Less(i, j int) bool { return uint64(p[i]) < uint64(p[j]) }
|
||||||
|
func (p IDSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
|
178
vendor/github.com/coreos/etcd/pkg/types/set.go
generated
vendored
Normal file
178
vendor/github.com/coreos/etcd/pkg/types/set.go
generated
vendored
Normal file
@ -0,0 +1,178 @@
|
|||||||
|
// Copyright 2015 The etcd Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package types
|
||||||
|
|
||||||
|
import (
|
||||||
|
"reflect"
|
||||||
|
"sort"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Set interface {
|
||||||
|
Add(string)
|
||||||
|
Remove(string)
|
||||||
|
Contains(string) bool
|
||||||
|
Equals(Set) bool
|
||||||
|
Length() int
|
||||||
|
Values() []string
|
||||||
|
Copy() Set
|
||||||
|
Sub(Set) Set
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewUnsafeSet(values ...string) *unsafeSet {
|
||||||
|
set := &unsafeSet{make(map[string]struct{})}
|
||||||
|
for _, v := range values {
|
||||||
|
set.Add(v)
|
||||||
|
}
|
||||||
|
return set
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewThreadsafeSet(values ...string) *tsafeSet {
|
||||||
|
us := NewUnsafeSet(values...)
|
||||||
|
return &tsafeSet{us, sync.RWMutex{}}
|
||||||
|
}
|
||||||
|
|
||||||
|
type unsafeSet struct {
|
||||||
|
d map[string]struct{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add adds a new value to the set (no-op if the value is already present)
|
||||||
|
func (us *unsafeSet) Add(value string) {
|
||||||
|
us.d[value] = struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove removes the given value from the set
|
||||||
|
func (us *unsafeSet) Remove(value string) {
|
||||||
|
delete(us.d, value)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Contains returns whether the set contains the given value
|
||||||
|
func (us *unsafeSet) Contains(value string) (exists bool) {
|
||||||
|
_, exists = us.d[value]
|
||||||
|
return exists
|
||||||
|
}
|
||||||
|
|
||||||
|
// ContainsAll returns whether the set contains all given values
|
||||||
|
func (us *unsafeSet) ContainsAll(values []string) bool {
|
||||||
|
for _, s := range values {
|
||||||
|
if !us.Contains(s) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Equals returns whether the contents of two sets are identical
|
||||||
|
func (us *unsafeSet) Equals(other Set) bool {
|
||||||
|
v1 := sort.StringSlice(us.Values())
|
||||||
|
v2 := sort.StringSlice(other.Values())
|
||||||
|
v1.Sort()
|
||||||
|
v2.Sort()
|
||||||
|
return reflect.DeepEqual(v1, v2)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Length returns the number of elements in the set
|
||||||
|
func (us *unsafeSet) Length() int {
|
||||||
|
return len(us.d)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Values returns the values of the Set in an unspecified order.
|
||||||
|
func (us *unsafeSet) Values() (values []string) {
|
||||||
|
values = make([]string, 0)
|
||||||
|
for val := range us.d {
|
||||||
|
values = append(values, val)
|
||||||
|
}
|
||||||
|
return values
|
||||||
|
}
|
||||||
|
|
||||||
|
// Copy creates a new Set containing the values of the first
|
||||||
|
func (us *unsafeSet) Copy() Set {
|
||||||
|
cp := NewUnsafeSet()
|
||||||
|
for val := range us.d {
|
||||||
|
cp.Add(val)
|
||||||
|
}
|
||||||
|
|
||||||
|
return cp
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sub removes all elements in other from the set
|
||||||
|
func (us *unsafeSet) Sub(other Set) Set {
|
||||||
|
oValues := other.Values()
|
||||||
|
result := us.Copy().(*unsafeSet)
|
||||||
|
|
||||||
|
for _, val := range oValues {
|
||||||
|
if _, ok := result.d[val]; !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
delete(result.d, val)
|
||||||
|
}
|
||||||
|
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
type tsafeSet struct {
|
||||||
|
us *unsafeSet
|
||||||
|
m sync.RWMutex
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ts *tsafeSet) Add(value string) {
|
||||||
|
ts.m.Lock()
|
||||||
|
defer ts.m.Unlock()
|
||||||
|
ts.us.Add(value)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ts *tsafeSet) Remove(value string) {
|
||||||
|
ts.m.Lock()
|
||||||
|
defer ts.m.Unlock()
|
||||||
|
ts.us.Remove(value)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ts *tsafeSet) Contains(value string) (exists bool) {
|
||||||
|
ts.m.RLock()
|
||||||
|
defer ts.m.RUnlock()
|
||||||
|
return ts.us.Contains(value)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ts *tsafeSet) Equals(other Set) bool {
|
||||||
|
ts.m.RLock()
|
||||||
|
defer ts.m.RUnlock()
|
||||||
|
return ts.us.Equals(other)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ts *tsafeSet) Length() int {
|
||||||
|
ts.m.RLock()
|
||||||
|
defer ts.m.RUnlock()
|
||||||
|
return ts.us.Length()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ts *tsafeSet) Values() (values []string) {
|
||||||
|
ts.m.RLock()
|
||||||
|
defer ts.m.RUnlock()
|
||||||
|
return ts.us.Values()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ts *tsafeSet) Copy() Set {
|
||||||
|
ts.m.RLock()
|
||||||
|
defer ts.m.RUnlock()
|
||||||
|
usResult := ts.us.Copy().(*unsafeSet)
|
||||||
|
return &tsafeSet{usResult, sync.RWMutex{}}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ts *tsafeSet) Sub(other Set) Set {
|
||||||
|
ts.m.RLock()
|
||||||
|
defer ts.m.RUnlock()
|
||||||
|
usResult := ts.us.Sub(other).(*unsafeSet)
|
||||||
|
return &tsafeSet{usResult, sync.RWMutex{}}
|
||||||
|
}
|
22
vendor/github.com/coreos/etcd/pkg/types/slice.go
generated
vendored
Normal file
22
vendor/github.com/coreos/etcd/pkg/types/slice.go
generated
vendored
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
// Copyright 2015 The etcd Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package types
|
||||||
|
|
||||||
|
// Uint64Slice implements sort interface
|
||||||
|
type Uint64Slice []uint64
|
||||||
|
|
||||||
|
func (p Uint64Slice) Len() int { return len(p) }
|
||||||
|
func (p Uint64Slice) Less(i, j int) bool { return p[i] < p[j] }
|
||||||
|
func (p Uint64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
|
82
vendor/github.com/coreos/etcd/pkg/types/urls.go
generated
vendored
Normal file
82
vendor/github.com/coreos/etcd/pkg/types/urls.go
generated
vendored
Normal file
@ -0,0 +1,82 @@
|
|||||||
|
// Copyright 2015 The etcd Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package types
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"net"
|
||||||
|
"net/url"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
type URLs []url.URL
|
||||||
|
|
||||||
|
func NewURLs(strs []string) (URLs, error) {
|
||||||
|
all := make([]url.URL, len(strs))
|
||||||
|
if len(all) == 0 {
|
||||||
|
return nil, errors.New("no valid URLs given")
|
||||||
|
}
|
||||||
|
for i, in := range strs {
|
||||||
|
in = strings.TrimSpace(in)
|
||||||
|
u, err := url.Parse(in)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if u.Scheme != "http" && u.Scheme != "https" && u.Scheme != "unix" && u.Scheme != "unixs" {
|
||||||
|
return nil, fmt.Errorf("URL scheme must be http, https, unix, or unixs: %s", in)
|
||||||
|
}
|
||||||
|
if _, _, err := net.SplitHostPort(u.Host); err != nil {
|
||||||
|
return nil, fmt.Errorf(`URL address does not have the form "host:port": %s`, in)
|
||||||
|
}
|
||||||
|
if u.Path != "" {
|
||||||
|
return nil, fmt.Errorf("URL must not contain a path: %s", in)
|
||||||
|
}
|
||||||
|
all[i] = *u
|
||||||
|
}
|
||||||
|
us := URLs(all)
|
||||||
|
us.Sort()
|
||||||
|
|
||||||
|
return us, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func MustNewURLs(strs []string) URLs {
|
||||||
|
urls, err := NewURLs(strs)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return urls
|
||||||
|
}
|
||||||
|
|
||||||
|
func (us URLs) String() string {
|
||||||
|
return strings.Join(us.StringSlice(), ",")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (us *URLs) Sort() {
|
||||||
|
sort.Sort(us)
|
||||||
|
}
|
||||||
|
func (us URLs) Len() int { return len(us) }
|
||||||
|
func (us URLs) Less(i, j int) bool { return us[i].String() < us[j].String() }
|
||||||
|
func (us URLs) Swap(i, j int) { us[i], us[j] = us[j], us[i] }
|
||||||
|
|
||||||
|
func (us URLs) StringSlice() []string {
|
||||||
|
out := make([]string, len(us))
|
||||||
|
for i := range us {
|
||||||
|
out[i] = us[i].String()
|
||||||
|
}
|
||||||
|
|
||||||
|
return out
|
||||||
|
}
|
107
vendor/github.com/coreos/etcd/pkg/types/urlsmap.go
generated
vendored
Normal file
107
vendor/github.com/coreos/etcd/pkg/types/urlsmap.go
generated
vendored
Normal file
@ -0,0 +1,107 @@
|
|||||||
|
// Copyright 2015 The etcd Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package types
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// URLsMap is a map from a name to its URLs.
|
||||||
|
type URLsMap map[string]URLs
|
||||||
|
|
||||||
|
// NewURLsMap returns a URLsMap instantiated from the given string,
|
||||||
|
// which consists of discovery-formatted names-to-URLs, like:
|
||||||
|
// mach0=http://1.1.1.1:2380,mach0=http://2.2.2.2::2380,mach1=http://3.3.3.3:2380,mach2=http://4.4.4.4:2380
|
||||||
|
func NewURLsMap(s string) (URLsMap, error) {
|
||||||
|
m := parse(s)
|
||||||
|
|
||||||
|
cl := URLsMap{}
|
||||||
|
for name, urls := range m {
|
||||||
|
us, err := NewURLs(urls)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
cl[name] = us
|
||||||
|
}
|
||||||
|
return cl, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewURLsMapFromStringMap takes a map of strings and returns a URLsMap. The
|
||||||
|
// string values in the map can be multiple values separated by the sep string.
|
||||||
|
func NewURLsMapFromStringMap(m map[string]string, sep string) (URLsMap, error) {
|
||||||
|
var err error
|
||||||
|
um := URLsMap{}
|
||||||
|
for k, v := range m {
|
||||||
|
um[k], err = NewURLs(strings.Split(v, sep))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return um, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// String turns URLsMap into discovery-formatted name-to-URLs sorted by name.
|
||||||
|
func (c URLsMap) String() string {
|
||||||
|
var pairs []string
|
||||||
|
for name, urls := range c {
|
||||||
|
for _, url := range urls {
|
||||||
|
pairs = append(pairs, fmt.Sprintf("%s=%s", name, url.String()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
sort.Strings(pairs)
|
||||||
|
return strings.Join(pairs, ",")
|
||||||
|
}
|
||||||
|
|
||||||
|
// URLs returns a list of all URLs.
|
||||||
|
// The returned list is sorted in ascending lexicographical order.
|
||||||
|
func (c URLsMap) URLs() []string {
|
||||||
|
var urls []string
|
||||||
|
for _, us := range c {
|
||||||
|
for _, u := range us {
|
||||||
|
urls = append(urls, u.String())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
sort.Strings(urls)
|
||||||
|
return urls
|
||||||
|
}
|
||||||
|
|
||||||
|
// Len returns the size of URLsMap.
|
||||||
|
func (c URLsMap) Len() int {
|
||||||
|
return len(c)
|
||||||
|
}
|
||||||
|
|
||||||
|
// parse parses the given string and returns a map listing the values specified for each key.
|
||||||
|
func parse(s string) map[string][]string {
|
||||||
|
m := make(map[string][]string)
|
||||||
|
for s != "" {
|
||||||
|
key := s
|
||||||
|
if i := strings.IndexAny(key, ","); i >= 0 {
|
||||||
|
key, s = key[:i], key[i+1:]
|
||||||
|
} else {
|
||||||
|
s = ""
|
||||||
|
}
|
||||||
|
if key == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
value := ""
|
||||||
|
if i := strings.Index(key, "="); i >= 0 {
|
||||||
|
key, value = key[:i], key[i+1:]
|
||||||
|
}
|
||||||
|
m[key] = append(m[key], value)
|
||||||
|
}
|
||||||
|
return m
|
||||||
|
}
|
202
vendor/github.com/coreos/etcd/vendor/github.com/coreos/go-semver/semver/LICENSE
generated
vendored
Normal file
202
vendor/github.com/coreos/etcd/vendor/github.com/coreos/go-semver/semver/LICENSE
generated
vendored
Normal file
@ -0,0 +1,202 @@
|
|||||||
|
|
||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
APPENDIX: How to apply the Apache License to your work.
|
||||||
|
|
||||||
|
To apply the Apache License to your work, attach the following
|
||||||
|
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||||
|
replaced with your own identifying information. (Don't include
|
||||||
|
the brackets!) The text should be enclosed in the appropriate
|
||||||
|
comment syntax for the file format. We also recommend that a
|
||||||
|
file or class name and description of purpose be included on the
|
||||||
|
same "printed page" as the copyright notice for easier
|
||||||
|
identification within third-party archives.
|
||||||
|
|
||||||
|
Copyright [yyyy] [name of copyright owner]
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
268
vendor/github.com/coreos/etcd/vendor/github.com/coreos/go-semver/semver/semver.go
generated
vendored
Normal file
268
vendor/github.com/coreos/etcd/vendor/github.com/coreos/go-semver/semver/semver.go
generated
vendored
Normal file
@ -0,0 +1,268 @@
|
|||||||
|
// Copyright 2013-2015 CoreOS, Inc.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// Semantic Versions http://semver.org
|
||||||
|
package semver
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Version struct {
|
||||||
|
Major int64
|
||||||
|
Minor int64
|
||||||
|
Patch int64
|
||||||
|
PreRelease PreRelease
|
||||||
|
Metadata string
|
||||||
|
}
|
||||||
|
|
||||||
|
type PreRelease string
|
||||||
|
|
||||||
|
func splitOff(input *string, delim string) (val string) {
|
||||||
|
parts := strings.SplitN(*input, delim, 2)
|
||||||
|
|
||||||
|
if len(parts) == 2 {
|
||||||
|
*input = parts[0]
|
||||||
|
val = parts[1]
|
||||||
|
}
|
||||||
|
|
||||||
|
return val
|
||||||
|
}
|
||||||
|
|
||||||
|
func New(version string) *Version {
|
||||||
|
return Must(NewVersion(version))
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewVersion(version string) (*Version, error) {
|
||||||
|
v := Version{}
|
||||||
|
|
||||||
|
if err := v.Set(version); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &v, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Must is a helper for wrapping NewVersion and will panic if err is not nil.
|
||||||
|
func Must(v *Version, err error) *Version {
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set parses and updates v from the given version string. Implements flag.Value
|
||||||
|
func (v *Version) Set(version string) error {
|
||||||
|
metadata := splitOff(&version, "+")
|
||||||
|
preRelease := PreRelease(splitOff(&version, "-"))
|
||||||
|
dotParts := strings.SplitN(version, ".", 3)
|
||||||
|
|
||||||
|
if len(dotParts) != 3 {
|
||||||
|
return fmt.Errorf("%s is not in dotted-tri format", version)
|
||||||
|
}
|
||||||
|
|
||||||
|
parsed := make([]int64, 3, 3)
|
||||||
|
|
||||||
|
for i, v := range dotParts[:3] {
|
||||||
|
val, err := strconv.ParseInt(v, 10, 64)
|
||||||
|
parsed[i] = val
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
v.Metadata = metadata
|
||||||
|
v.PreRelease = preRelease
|
||||||
|
v.Major = parsed[0]
|
||||||
|
v.Minor = parsed[1]
|
||||||
|
v.Patch = parsed[2]
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (v Version) String() string {
|
||||||
|
var buffer bytes.Buffer
|
||||||
|
|
||||||
|
fmt.Fprintf(&buffer, "%d.%d.%d", v.Major, v.Minor, v.Patch)
|
||||||
|
|
||||||
|
if v.PreRelease != "" {
|
||||||
|
fmt.Fprintf(&buffer, "-%s", v.PreRelease)
|
||||||
|
}
|
||||||
|
|
||||||
|
if v.Metadata != "" {
|
||||||
|
fmt.Fprintf(&buffer, "+%s", v.Metadata)
|
||||||
|
}
|
||||||
|
|
||||||
|
return buffer.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (v *Version) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||||
|
var data string
|
||||||
|
if err := unmarshal(&data); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return v.Set(data)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (v Version) MarshalJSON() ([]byte, error) {
|
||||||
|
return []byte(`"` + v.String() + `"`), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (v *Version) UnmarshalJSON(data []byte) error {
|
||||||
|
l := len(data)
|
||||||
|
if l == 0 || string(data) == `""` {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if l < 2 || data[0] != '"' || data[l-1] != '"' {
|
||||||
|
return errors.New("invalid semver string")
|
||||||
|
}
|
||||||
|
return v.Set(string(data[1 : l-1]))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compare tests if v is less than, equal to, or greater than versionB,
|
||||||
|
// returning -1, 0, or +1 respectively.
|
||||||
|
func (v Version) Compare(versionB Version) int {
|
||||||
|
if cmp := recursiveCompare(v.Slice(), versionB.Slice()); cmp != 0 {
|
||||||
|
return cmp
|
||||||
|
}
|
||||||
|
return preReleaseCompare(v, versionB)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Equal tests if v is equal to versionB.
|
||||||
|
func (v Version) Equal(versionB Version) bool {
|
||||||
|
return v.Compare(versionB) == 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// LessThan tests if v is less than versionB.
|
||||||
|
func (v Version) LessThan(versionB Version) bool {
|
||||||
|
return v.Compare(versionB) < 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// Slice converts the comparable parts of the semver into a slice of integers.
|
||||||
|
func (v Version) Slice() []int64 {
|
||||||
|
return []int64{v.Major, v.Minor, v.Patch}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p PreRelease) Slice() []string {
|
||||||
|
preRelease := string(p)
|
||||||
|
return strings.Split(preRelease, ".")
|
||||||
|
}
|
||||||
|
|
||||||
|
func preReleaseCompare(versionA Version, versionB Version) int {
|
||||||
|
a := versionA.PreRelease
|
||||||
|
b := versionB.PreRelease
|
||||||
|
|
||||||
|
/* Handle the case where if two versions are otherwise equal it is the
|
||||||
|
* one without a PreRelease that is greater */
|
||||||
|
if len(a) == 0 && (len(b) > 0) {
|
||||||
|
return 1
|
||||||
|
} else if len(b) == 0 && (len(a) > 0) {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
|
||||||
|
// If there is a prerelease, check and compare each part.
|
||||||
|
return recursivePreReleaseCompare(a.Slice(), b.Slice())
|
||||||
|
}
|
||||||
|
|
||||||
|
func recursiveCompare(versionA []int64, versionB []int64) int {
|
||||||
|
if len(versionA) == 0 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
a := versionA[0]
|
||||||
|
b := versionB[0]
|
||||||
|
|
||||||
|
if a > b {
|
||||||
|
return 1
|
||||||
|
} else if a < b {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
|
||||||
|
return recursiveCompare(versionA[1:], versionB[1:])
|
||||||
|
}
|
||||||
|
|
||||||
|
func recursivePreReleaseCompare(versionA []string, versionB []string) int {
|
||||||
|
// A larger set of pre-release fields has a higher precedence than a smaller set,
|
||||||
|
// if all of the preceding identifiers are equal.
|
||||||
|
if len(versionA) == 0 {
|
||||||
|
if len(versionB) > 0 {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
} else if len(versionB) == 0 {
|
||||||
|
// We're longer than versionB so return 1.
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
a := versionA[0]
|
||||||
|
b := versionB[0]
|
||||||
|
|
||||||
|
aInt := false
|
||||||
|
bInt := false
|
||||||
|
|
||||||
|
aI, err := strconv.Atoi(versionA[0])
|
||||||
|
if err == nil {
|
||||||
|
aInt = true
|
||||||
|
}
|
||||||
|
|
||||||
|
bI, err := strconv.Atoi(versionB[0])
|
||||||
|
if err == nil {
|
||||||
|
bInt = true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle Integer Comparison
|
||||||
|
if aInt && bInt {
|
||||||
|
if aI > bI {
|
||||||
|
return 1
|
||||||
|
} else if aI < bI {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle String Comparison
|
||||||
|
if a > b {
|
||||||
|
return 1
|
||||||
|
} else if a < b {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
|
||||||
|
return recursivePreReleaseCompare(versionA[1:], versionB[1:])
|
||||||
|
}
|
||||||
|
|
||||||
|
// BumpMajor increments the Major field by 1 and resets all other fields to their default values
|
||||||
|
func (v *Version) BumpMajor() {
|
||||||
|
v.Major += 1
|
||||||
|
v.Minor = 0
|
||||||
|
v.Patch = 0
|
||||||
|
v.PreRelease = PreRelease("")
|
||||||
|
v.Metadata = ""
|
||||||
|
}
|
||||||
|
|
||||||
|
// BumpMinor increments the Minor field by 1 and resets all other fields to their default values
|
||||||
|
func (v *Version) BumpMinor() {
|
||||||
|
v.Minor += 1
|
||||||
|
v.Patch = 0
|
||||||
|
v.PreRelease = PreRelease("")
|
||||||
|
v.Metadata = ""
|
||||||
|
}
|
||||||
|
|
||||||
|
// BumpPatch increments the Patch field by 1 and resets all other fields to their default values
|
||||||
|
func (v *Version) BumpPatch() {
|
||||||
|
v.Patch += 1
|
||||||
|
v.PreRelease = PreRelease("")
|
||||||
|
v.Metadata = ""
|
||||||
|
}
|
38
vendor/github.com/coreos/etcd/vendor/github.com/coreos/go-semver/semver/sort.go
generated
vendored
Normal file
38
vendor/github.com/coreos/etcd/vendor/github.com/coreos/go-semver/semver/sort.go
generated
vendored
Normal file
@ -0,0 +1,38 @@
|
|||||||
|
// Copyright 2013-2015 CoreOS, Inc.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package semver
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sort"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Versions []*Version
|
||||||
|
|
||||||
|
func (s Versions) Len() int {
|
||||||
|
return len(s)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s Versions) Swap(i, j int) {
|
||||||
|
s[i], s[j] = s[j], s[i]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s Versions) Less(i, j int) bool {
|
||||||
|
return s[i].LessThan(*s[j])
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sort sorts the given slice of Version
|
||||||
|
func Sort(versions []*Version) {
|
||||||
|
sort.Sort(Versions(versions))
|
||||||
|
}
|
185
vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/0doc.go
generated
vendored
Normal file
185
vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/0doc.go
generated
vendored
Normal file
@ -0,0 +1,185 @@
|
|||||||
|
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT license found in the LICENSE file.
|
||||||
|
|
||||||
|
/*
|
||||||
|
High Performance, Feature-Rich Idiomatic Go 1.4+ codec/encoding library for
|
||||||
|
binc, msgpack, cbor, json
|
||||||
|
|
||||||
|
Supported Serialization formats are:
|
||||||
|
|
||||||
|
- msgpack: https://github.com/msgpack/msgpack
|
||||||
|
- binc: http://github.com/ugorji/binc
|
||||||
|
- cbor: http://cbor.io http://tools.ietf.org/html/rfc7049
|
||||||
|
- json: http://json.org http://tools.ietf.org/html/rfc7159
|
||||||
|
- simple:
|
||||||
|
|
||||||
|
To install:
|
||||||
|
|
||||||
|
go get github.com/ugorji/go/codec
|
||||||
|
|
||||||
|
This package will carefully use 'unsafe' for performance reasons in specific places.
|
||||||
|
You can build without unsafe use by passing the safe or appengine tag
|
||||||
|
i.e. 'go install -tags=safe ...'. Note that unsafe is only supported for the last 3
|
||||||
|
go sdk versions e.g. current go release is go 1.9, so we support unsafe use only from
|
||||||
|
go 1.7+ . This is because supporting unsafe requires knowledge of implementation details.
|
||||||
|
|
||||||
|
For detailed usage information, read the primer at http://ugorji.net/blog/go-codec-primer .
|
||||||
|
|
||||||
|
The idiomatic Go support is as seen in other encoding packages in
|
||||||
|
the standard library (ie json, xml, gob, etc).
|
||||||
|
|
||||||
|
Rich Feature Set includes:
|
||||||
|
|
||||||
|
- Simple but extremely powerful and feature-rich API
|
||||||
|
- Support for go1.4 and above, while selectively using newer APIs for later releases
|
||||||
|
- Good code coverage ( > 70% )
|
||||||
|
- Very High Performance.
|
||||||
|
Our extensive benchmarks show us outperforming Gob, Json, Bson, etc by 2-4X.
|
||||||
|
- Careful selected use of 'unsafe' for targeted performance gains.
|
||||||
|
100% mode exists where 'unsafe' is not used at all.
|
||||||
|
- Lock-free (sans mutex) concurrency for scaling to 100's of cores
|
||||||
|
- Multiple conversions:
|
||||||
|
Package coerces types where appropriate
|
||||||
|
e.g. decode an int in the stream into a float, etc.
|
||||||
|
- Corner Cases:
|
||||||
|
Overflows, nil maps/slices, nil values in streams are handled correctly
|
||||||
|
- Standard field renaming via tags
|
||||||
|
- Support for omitting empty fields during an encoding
|
||||||
|
- Encoding from any value and decoding into pointer to any value
|
||||||
|
(struct, slice, map, primitives, pointers, interface{}, etc)
|
||||||
|
- Extensions to support efficient encoding/decoding of any named types
|
||||||
|
- Support encoding.(Binary|Text)(M|Unm)arshaler interfaces
|
||||||
|
- Decoding without a schema (into a interface{}).
|
||||||
|
Includes Options to configure what specific map or slice type to use
|
||||||
|
when decoding an encoded list or map into a nil interface{}
|
||||||
|
- Encode a struct as an array, and decode struct from an array in the data stream
|
||||||
|
- Comprehensive support for anonymous fields
|
||||||
|
- Fast (no-reflection) encoding/decoding of common maps and slices
|
||||||
|
- Code-generation for faster performance.
|
||||||
|
- Support binary (e.g. messagepack, cbor) and text (e.g. json) formats
|
||||||
|
- Support indefinite-length formats to enable true streaming
|
||||||
|
(for formats which support it e.g. json, cbor)
|
||||||
|
- Support canonical encoding, where a value is ALWAYS encoded as same sequence of bytes.
|
||||||
|
This mostly applies to maps, where iteration order is non-deterministic.
|
||||||
|
- NIL in data stream decoded as zero value
|
||||||
|
- Never silently skip data when decoding.
|
||||||
|
User decides whether to return an error or silently skip data when keys or indexes
|
||||||
|
in the data stream do not map to fields in the struct.
|
||||||
|
- Detect and error when encoding a cyclic reference (instead of stack overflow shutdown)
|
||||||
|
- Encode/Decode from/to chan types (for iterative streaming support)
|
||||||
|
- Drop-in replacement for encoding/json. `json:` key in struct tag supported.
|
||||||
|
- Provides a RPC Server and Client Codec for net/rpc communication protocol.
|
||||||
|
- Handle unique idiosyncrasies of codecs e.g.
|
||||||
|
- For messagepack, configure how ambiguities in handling raw bytes are resolved
|
||||||
|
- For messagepack, provide rpc server/client codec to support
|
||||||
|
msgpack-rpc protocol defined at:
|
||||||
|
https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md
|
||||||
|
|
||||||
|
Extension Support
|
||||||
|
|
||||||
|
Users can register a function to handle the encoding or decoding of
|
||||||
|
their custom types.
|
||||||
|
|
||||||
|
There are no restrictions on what the custom type can be. Some examples:
|
||||||
|
|
||||||
|
type BisSet []int
|
||||||
|
type BitSet64 uint64
|
||||||
|
type UUID string
|
||||||
|
type MyStructWithUnexportedFields struct { a int; b bool; c []int; }
|
||||||
|
type GifImage struct { ... }
|
||||||
|
|
||||||
|
As an illustration, MyStructWithUnexportedFields would normally be
|
||||||
|
encoded as an empty map because it has no exported fields, while UUID
|
||||||
|
would be encoded as a string. However, with extension support, you can
|
||||||
|
encode any of these however you like.
|
||||||
|
|
||||||
|
RPC
|
||||||
|
|
||||||
|
RPC Client and Server Codecs are implemented, so the codecs can be used
|
||||||
|
with the standard net/rpc package.
|
||||||
|
|
||||||
|
Usage
|
||||||
|
|
||||||
|
The Handle is SAFE for concurrent READ, but NOT SAFE for concurrent modification.
|
||||||
|
|
||||||
|
The Encoder and Decoder are NOT safe for concurrent use.
|
||||||
|
|
||||||
|
Consequently, the usage model is basically:
|
||||||
|
|
||||||
|
- Create and initialize the Handle before any use.
|
||||||
|
Once created, DO NOT modify it.
|
||||||
|
- Multiple Encoders or Decoders can now use the Handle concurrently.
|
||||||
|
They only read information off the Handle (never write).
|
||||||
|
- However, each Encoder or Decoder MUST not be used concurrently
|
||||||
|
- To re-use an Encoder/Decoder, call Reset(...) on it first.
|
||||||
|
This allows you use state maintained on the Encoder/Decoder.
|
||||||
|
|
||||||
|
Sample usage model:
|
||||||
|
|
||||||
|
// create and configure Handle
|
||||||
|
var (
|
||||||
|
bh codec.BincHandle
|
||||||
|
mh codec.MsgpackHandle
|
||||||
|
ch codec.CborHandle
|
||||||
|
)
|
||||||
|
|
||||||
|
mh.MapType = reflect.TypeOf(map[string]interface{}(nil))
|
||||||
|
|
||||||
|
// configure extensions
|
||||||
|
// e.g. for msgpack, define functions and enable Time support for tag 1
|
||||||
|
// mh.SetExt(reflect.TypeOf(time.Time{}), 1, myExt)
|
||||||
|
|
||||||
|
// create and use decoder/encoder
|
||||||
|
var (
|
||||||
|
r io.Reader
|
||||||
|
w io.Writer
|
||||||
|
b []byte
|
||||||
|
h = &bh // or mh to use msgpack
|
||||||
|
)
|
||||||
|
|
||||||
|
dec = codec.NewDecoder(r, h)
|
||||||
|
dec = codec.NewDecoderBytes(b, h)
|
||||||
|
err = dec.Decode(&v)
|
||||||
|
|
||||||
|
enc = codec.NewEncoder(w, h)
|
||||||
|
enc = codec.NewEncoderBytes(&b, h)
|
||||||
|
err = enc.Encode(v)
|
||||||
|
|
||||||
|
//RPC Server
|
||||||
|
go func() {
|
||||||
|
for {
|
||||||
|
conn, err := listener.Accept()
|
||||||
|
rpcCodec := codec.GoRpc.ServerCodec(conn, h)
|
||||||
|
//OR rpcCodec := codec.MsgpackSpecRpc.ServerCodec(conn, h)
|
||||||
|
rpc.ServeCodec(rpcCodec)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
//RPC Communication (client side)
|
||||||
|
conn, err = net.Dial("tcp", "localhost:5555")
|
||||||
|
rpcCodec := codec.GoRpc.ClientCodec(conn, h)
|
||||||
|
//OR rpcCodec := codec.MsgpackSpecRpc.ClientCodec(conn, h)
|
||||||
|
client := rpc.NewClientWithCodec(rpcCodec)
|
||||||
|
|
||||||
|
Running Tests
|
||||||
|
|
||||||
|
To run tests, use the following:
|
||||||
|
|
||||||
|
go test
|
||||||
|
|
||||||
|
To run the full suite of tests, use the following:
|
||||||
|
|
||||||
|
go test -tags alltests -run Suite
|
||||||
|
|
||||||
|
You can run the tag 'safe' to run tests or build in safe mode. e.g.
|
||||||
|
|
||||||
|
go test -tags safe -run Json
|
||||||
|
go test -tags "alltests safe" -run Suite
|
||||||
|
|
||||||
|
Running Benchmarks
|
||||||
|
|
||||||
|
Please see http://github.com/ugorji/go-codec-bench .
|
||||||
|
|
||||||
|
*/
|
||||||
|
package codec
|
||||||
|
|
202
vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/LICENSE
generated
vendored
Normal file
202
vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/LICENSE
generated
vendored
Normal file
@ -0,0 +1,202 @@
|
|||||||
|
|
||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
APPENDIX: How to apply the Apache License to your work.
|
||||||
|
|
||||||
|
To apply the Apache License to your work, attach the following
|
||||||
|
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||||
|
replaced with your own identifying information. (Don't include
|
||||||
|
the brackets!) The text should be enclosed in the appropriate
|
||||||
|
comment syntax for the file format. We also recommend that a
|
||||||
|
file or class name and description of purpose be included on the
|
||||||
|
same "printed page" as the copyright notice for easier
|
||||||
|
identification within third-party archives.
|
||||||
|
|
||||||
|
Copyright [yyyy] [name of copyright owner]
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
946
vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/binc.go
generated
vendored
Normal file
946
vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/binc.go
generated
vendored
Normal file
@ -0,0 +1,946 @@
|
|||||||
|
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT license found in the LICENSE file.
|
||||||
|
|
||||||
|
package codec
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math"
|
||||||
|
"reflect"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
const bincDoPrune = true // No longer needed. Needed before as C lib did not support pruning.
|
||||||
|
|
||||||
|
// vd as low 4 bits (there are 16 slots)
|
||||||
|
const (
|
||||||
|
bincVdSpecial byte = iota
|
||||||
|
bincVdPosInt
|
||||||
|
bincVdNegInt
|
||||||
|
bincVdFloat
|
||||||
|
|
||||||
|
bincVdString
|
||||||
|
bincVdByteArray
|
||||||
|
bincVdArray
|
||||||
|
bincVdMap
|
||||||
|
|
||||||
|
bincVdTimestamp
|
||||||
|
bincVdSmallInt
|
||||||
|
bincVdUnicodeOther
|
||||||
|
bincVdSymbol
|
||||||
|
|
||||||
|
bincVdDecimal
|
||||||
|
_ // open slot
|
||||||
|
_ // open slot
|
||||||
|
bincVdCustomExt = 0x0f
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
bincSpNil byte = iota
|
||||||
|
bincSpFalse
|
||||||
|
bincSpTrue
|
||||||
|
bincSpNan
|
||||||
|
bincSpPosInf
|
||||||
|
bincSpNegInf
|
||||||
|
bincSpZeroFloat
|
||||||
|
bincSpZero
|
||||||
|
bincSpNegOne
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
bincFlBin16 byte = iota
|
||||||
|
bincFlBin32
|
||||||
|
_ // bincFlBin32e
|
||||||
|
bincFlBin64
|
||||||
|
_ // bincFlBin64e
|
||||||
|
// others not currently supported
|
||||||
|
)
|
||||||
|
|
||||||
|
type bincEncDriver struct {
|
||||||
|
e *Encoder
|
||||||
|
w encWriter
|
||||||
|
m map[string]uint16 // symbols
|
||||||
|
b [scratchByteArrayLen]byte
|
||||||
|
s uint16 // symbols sequencer
|
||||||
|
// encNoSeparator
|
||||||
|
encDriverNoopContainerWriter
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *bincEncDriver) IsBuiltinType(rt uintptr) bool {
|
||||||
|
return rt == timeTypId
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *bincEncDriver) EncodeBuiltin(rt uintptr, v interface{}) {
|
||||||
|
if rt == timeTypId {
|
||||||
|
var bs []byte
|
||||||
|
switch x := v.(type) {
|
||||||
|
case time.Time:
|
||||||
|
bs = encodeTime(x)
|
||||||
|
case *time.Time:
|
||||||
|
bs = encodeTime(*x)
|
||||||
|
default:
|
||||||
|
e.e.errorf("binc error encoding builtin: expect time.Time, received %T", v)
|
||||||
|
}
|
||||||
|
e.w.writen1(bincVdTimestamp<<4 | uint8(len(bs)))
|
||||||
|
e.w.writeb(bs)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *bincEncDriver) EncodeNil() {
|
||||||
|
e.w.writen1(bincVdSpecial<<4 | bincSpNil)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *bincEncDriver) EncodeBool(b bool) {
|
||||||
|
if b {
|
||||||
|
e.w.writen1(bincVdSpecial<<4 | bincSpTrue)
|
||||||
|
} else {
|
||||||
|
e.w.writen1(bincVdSpecial<<4 | bincSpFalse)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *bincEncDriver) EncodeFloat32(f float32) {
|
||||||
|
if f == 0 {
|
||||||
|
e.w.writen1(bincVdSpecial<<4 | bincSpZeroFloat)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
e.w.writen1(bincVdFloat<<4 | bincFlBin32)
|
||||||
|
bigenHelper{e.b[:4], e.w}.writeUint32(math.Float32bits(f))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *bincEncDriver) EncodeFloat64(f float64) {
|
||||||
|
if f == 0 {
|
||||||
|
e.w.writen1(bincVdSpecial<<4 | bincSpZeroFloat)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
bigen.PutUint64(e.b[:8], math.Float64bits(f))
|
||||||
|
if bincDoPrune {
|
||||||
|
i := 7
|
||||||
|
for ; i >= 0 && (e.b[i] == 0); i-- {
|
||||||
|
}
|
||||||
|
i++
|
||||||
|
if i <= 6 {
|
||||||
|
e.w.writen1(bincVdFloat<<4 | 0x8 | bincFlBin64)
|
||||||
|
e.w.writen1(byte(i))
|
||||||
|
e.w.writeb(e.b[:i])
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
e.w.writen1(bincVdFloat<<4 | bincFlBin64)
|
||||||
|
e.w.writeb(e.b[:8])
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *bincEncDriver) encIntegerPrune(bd byte, pos bool, v uint64, lim uint8) {
|
||||||
|
if lim == 4 {
|
||||||
|
bigen.PutUint32(e.b[:lim], uint32(v))
|
||||||
|
} else {
|
||||||
|
bigen.PutUint64(e.b[:lim], v)
|
||||||
|
}
|
||||||
|
if bincDoPrune {
|
||||||
|
i := pruneSignExt(e.b[:lim], pos)
|
||||||
|
e.w.writen1(bd | lim - 1 - byte(i))
|
||||||
|
e.w.writeb(e.b[i:lim])
|
||||||
|
} else {
|
||||||
|
e.w.writen1(bd | lim - 1)
|
||||||
|
e.w.writeb(e.b[:lim])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *bincEncDriver) EncodeInt(v int64) {
|
||||||
|
const nbd byte = bincVdNegInt << 4
|
||||||
|
if v >= 0 {
|
||||||
|
e.encUint(bincVdPosInt<<4, true, uint64(v))
|
||||||
|
} else if v == -1 {
|
||||||
|
e.w.writen1(bincVdSpecial<<4 | bincSpNegOne)
|
||||||
|
} else {
|
||||||
|
e.encUint(bincVdNegInt<<4, false, uint64(-v))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *bincEncDriver) EncodeUint(v uint64) {
|
||||||
|
e.encUint(bincVdPosInt<<4, true, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *bincEncDriver) encUint(bd byte, pos bool, v uint64) {
|
||||||
|
if v == 0 {
|
||||||
|
e.w.writen1(bincVdSpecial<<4 | bincSpZero)
|
||||||
|
} else if pos && v >= 1 && v <= 16 {
|
||||||
|
e.w.writen1(bincVdSmallInt<<4 | byte(v-1))
|
||||||
|
} else if v <= math.MaxUint8 {
|
||||||
|
e.w.writen2(bd|0x0, byte(v))
|
||||||
|
} else if v <= math.MaxUint16 {
|
||||||
|
e.w.writen1(bd | 0x01)
|
||||||
|
bigenHelper{e.b[:2], e.w}.writeUint16(uint16(v))
|
||||||
|
} else if v <= math.MaxUint32 {
|
||||||
|
e.encIntegerPrune(bd, pos, v, 4)
|
||||||
|
} else {
|
||||||
|
e.encIntegerPrune(bd, pos, v, 8)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *bincEncDriver) EncodeExt(rv interface{}, xtag uint64, ext Ext, _ *Encoder) {
|
||||||
|
bs := ext.WriteExt(rv)
|
||||||
|
if bs == nil {
|
||||||
|
e.EncodeNil()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
e.encodeExtPreamble(uint8(xtag), len(bs))
|
||||||
|
e.w.writeb(bs)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *bincEncDriver) EncodeRawExt(re *RawExt, _ *Encoder) {
|
||||||
|
e.encodeExtPreamble(uint8(re.Tag), len(re.Data))
|
||||||
|
e.w.writeb(re.Data)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *bincEncDriver) encodeExtPreamble(xtag byte, length int) {
|
||||||
|
e.encLen(bincVdCustomExt<<4, uint64(length))
|
||||||
|
e.w.writen1(xtag)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *bincEncDriver) WriteArrayStart(length int) {
|
||||||
|
e.encLen(bincVdArray<<4, uint64(length))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *bincEncDriver) WriteMapStart(length int) {
|
||||||
|
e.encLen(bincVdMap<<4, uint64(length))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *bincEncDriver) EncodeString(c charEncoding, v string) {
|
||||||
|
l := uint64(len(v))
|
||||||
|
e.encBytesLen(c, l)
|
||||||
|
if l > 0 {
|
||||||
|
e.w.writestr(v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *bincEncDriver) EncodeSymbol(v string) {
|
||||||
|
// if WriteSymbolsNoRefs {
|
||||||
|
// e.encodeString(c_UTF8, v)
|
||||||
|
// return
|
||||||
|
// }
|
||||||
|
|
||||||
|
//symbols only offer benefit when string length > 1.
|
||||||
|
//This is because strings with length 1 take only 2 bytes to store
|
||||||
|
//(bd with embedded length, and single byte for string val).
|
||||||
|
|
||||||
|
l := len(v)
|
||||||
|
if l == 0 {
|
||||||
|
e.encBytesLen(c_UTF8, 0)
|
||||||
|
return
|
||||||
|
} else if l == 1 {
|
||||||
|
e.encBytesLen(c_UTF8, 1)
|
||||||
|
e.w.writen1(v[0])
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if e.m == nil {
|
||||||
|
e.m = make(map[string]uint16, 16)
|
||||||
|
}
|
||||||
|
ui, ok := e.m[v]
|
||||||
|
if ok {
|
||||||
|
if ui <= math.MaxUint8 {
|
||||||
|
e.w.writen2(bincVdSymbol<<4, byte(ui))
|
||||||
|
} else {
|
||||||
|
e.w.writen1(bincVdSymbol<<4 | 0x8)
|
||||||
|
bigenHelper{e.b[:2], e.w}.writeUint16(ui)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
e.s++
|
||||||
|
ui = e.s
|
||||||
|
//ui = uint16(atomic.AddUint32(&e.s, 1))
|
||||||
|
e.m[v] = ui
|
||||||
|
var lenprec uint8
|
||||||
|
if l <= math.MaxUint8 {
|
||||||
|
// lenprec = 0
|
||||||
|
} else if l <= math.MaxUint16 {
|
||||||
|
lenprec = 1
|
||||||
|
} else if int64(l) <= math.MaxUint32 {
|
||||||
|
lenprec = 2
|
||||||
|
} else {
|
||||||
|
lenprec = 3
|
||||||
|
}
|
||||||
|
if ui <= math.MaxUint8 {
|
||||||
|
e.w.writen2(bincVdSymbol<<4|0x0|0x4|lenprec, byte(ui))
|
||||||
|
} else {
|
||||||
|
e.w.writen1(bincVdSymbol<<4 | 0x8 | 0x4 | lenprec)
|
||||||
|
bigenHelper{e.b[:2], e.w}.writeUint16(ui)
|
||||||
|
}
|
||||||
|
if lenprec == 0 {
|
||||||
|
e.w.writen1(byte(l))
|
||||||
|
} else if lenprec == 1 {
|
||||||
|
bigenHelper{e.b[:2], e.w}.writeUint16(uint16(l))
|
||||||
|
} else if lenprec == 2 {
|
||||||
|
bigenHelper{e.b[:4], e.w}.writeUint32(uint32(l))
|
||||||
|
} else {
|
||||||
|
bigenHelper{e.b[:8], e.w}.writeUint64(uint64(l))
|
||||||
|
}
|
||||||
|
e.w.writestr(v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *bincEncDriver) EncodeStringBytes(c charEncoding, v []byte) {
|
||||||
|
l := uint64(len(v))
|
||||||
|
e.encBytesLen(c, l)
|
||||||
|
if l > 0 {
|
||||||
|
e.w.writeb(v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *bincEncDriver) encBytesLen(c charEncoding, length uint64) {
|
||||||
|
//TODO: support bincUnicodeOther (for now, just use string or bytearray)
|
||||||
|
if c == c_RAW {
|
||||||
|
e.encLen(bincVdByteArray<<4, length)
|
||||||
|
} else {
|
||||||
|
e.encLen(bincVdString<<4, length)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *bincEncDriver) encLen(bd byte, l uint64) {
|
||||||
|
if l < 12 {
|
||||||
|
e.w.writen1(bd | uint8(l+4))
|
||||||
|
} else {
|
||||||
|
e.encLenNumber(bd, l)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *bincEncDriver) encLenNumber(bd byte, v uint64) {
|
||||||
|
if v <= math.MaxUint8 {
|
||||||
|
e.w.writen2(bd, byte(v))
|
||||||
|
} else if v <= math.MaxUint16 {
|
||||||
|
e.w.writen1(bd | 0x01)
|
||||||
|
bigenHelper{e.b[:2], e.w}.writeUint16(uint16(v))
|
||||||
|
} else if v <= math.MaxUint32 {
|
||||||
|
e.w.writen1(bd | 0x02)
|
||||||
|
bigenHelper{e.b[:4], e.w}.writeUint32(uint32(v))
|
||||||
|
} else {
|
||||||
|
e.w.writen1(bd | 0x03)
|
||||||
|
bigenHelper{e.b[:8], e.w}.writeUint64(uint64(v))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
//------------------------------------
|
||||||
|
|
||||||
|
type bincDecSymbol struct {
|
||||||
|
s string
|
||||||
|
b []byte
|
||||||
|
i uint16
|
||||||
|
}
|
||||||
|
|
||||||
|
type bincDecDriver struct {
|
||||||
|
d *Decoder
|
||||||
|
h *BincHandle
|
||||||
|
r decReader
|
||||||
|
br bool // bytes reader
|
||||||
|
bdRead bool
|
||||||
|
bd byte
|
||||||
|
vd byte
|
||||||
|
vs byte
|
||||||
|
// noStreamingCodec
|
||||||
|
// decNoSeparator
|
||||||
|
b [scratchByteArrayLen]byte
|
||||||
|
|
||||||
|
// linear searching on this slice is ok,
|
||||||
|
// because we typically expect < 32 symbols in each stream.
|
||||||
|
s []bincDecSymbol
|
||||||
|
decDriverNoopContainerReader
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *bincDecDriver) readNextBd() {
|
||||||
|
d.bd = d.r.readn1()
|
||||||
|
d.vd = d.bd >> 4
|
||||||
|
d.vs = d.bd & 0x0f
|
||||||
|
d.bdRead = true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *bincDecDriver) uncacheRead() {
|
||||||
|
if d.bdRead {
|
||||||
|
d.r.unreadn1()
|
||||||
|
d.bdRead = false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *bincDecDriver) ContainerType() (vt valueType) {
|
||||||
|
if !d.bdRead {
|
||||||
|
d.readNextBd()
|
||||||
|
}
|
||||||
|
if d.vd == bincVdSpecial && d.vs == bincSpNil {
|
||||||
|
return valueTypeNil
|
||||||
|
} else if d.vd == bincVdByteArray {
|
||||||
|
return valueTypeBytes
|
||||||
|
} else if d.vd == bincVdString {
|
||||||
|
return valueTypeString
|
||||||
|
} else if d.vd == bincVdArray {
|
||||||
|
return valueTypeArray
|
||||||
|
} else if d.vd == bincVdMap {
|
||||||
|
return valueTypeMap
|
||||||
|
} else {
|
||||||
|
// d.d.errorf("isContainerType: unsupported parameter: %v", vt)
|
||||||
|
}
|
||||||
|
return valueTypeUnset
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *bincDecDriver) TryDecodeAsNil() bool {
|
||||||
|
if !d.bdRead {
|
||||||
|
d.readNextBd()
|
||||||
|
}
|
||||||
|
if d.bd == bincVdSpecial<<4|bincSpNil {
|
||||||
|
d.bdRead = false
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *bincDecDriver) IsBuiltinType(rt uintptr) bool {
|
||||||
|
return rt == timeTypId
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *bincDecDriver) DecodeBuiltin(rt uintptr, v interface{}) {
|
||||||
|
if !d.bdRead {
|
||||||
|
d.readNextBd()
|
||||||
|
}
|
||||||
|
if rt == timeTypId {
|
||||||
|
if d.vd != bincVdTimestamp {
|
||||||
|
d.d.errorf("Invalid d.vd. Expecting 0x%x. Received: 0x%x", bincVdTimestamp, d.vd)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
tt, err := decodeTime(d.r.readx(int(d.vs)))
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
var vt *time.Time = v.(*time.Time)
|
||||||
|
*vt = tt
|
||||||
|
d.bdRead = false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *bincDecDriver) decFloatPre(vs, defaultLen byte) {
|
||||||
|
if vs&0x8 == 0 {
|
||||||
|
d.r.readb(d.b[0:defaultLen])
|
||||||
|
} else {
|
||||||
|
l := d.r.readn1()
|
||||||
|
if l > 8 {
|
||||||
|
d.d.errorf("At most 8 bytes used to represent float. Received: %v bytes", l)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
for i := l; i < 8; i++ {
|
||||||
|
d.b[i] = 0
|
||||||
|
}
|
||||||
|
d.r.readb(d.b[0:l])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *bincDecDriver) decFloat() (f float64) {
|
||||||
|
//if true { f = math.Float64frombits(bigen.Uint64(d.r.readx(8))); break; }
|
||||||
|
if x := d.vs & 0x7; x == bincFlBin32 {
|
||||||
|
d.decFloatPre(d.vs, 4)
|
||||||
|
f = float64(math.Float32frombits(bigen.Uint32(d.b[0:4])))
|
||||||
|
} else if x == bincFlBin64 {
|
||||||
|
d.decFloatPre(d.vs, 8)
|
||||||
|
f = math.Float64frombits(bigen.Uint64(d.b[0:8]))
|
||||||
|
} else {
|
||||||
|
d.d.errorf("only float32 and float64 are supported. d.vd: 0x%x, d.vs: 0x%x", d.vd, d.vs)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *bincDecDriver) decUint() (v uint64) {
|
||||||
|
// need to inline the code (interface conversion and type assertion expensive)
|
||||||
|
switch d.vs {
|
||||||
|
case 0:
|
||||||
|
v = uint64(d.r.readn1())
|
||||||
|
case 1:
|
||||||
|
d.r.readb(d.b[6:8])
|
||||||
|
v = uint64(bigen.Uint16(d.b[6:8]))
|
||||||
|
case 2:
|
||||||
|
d.b[4] = 0
|
||||||
|
d.r.readb(d.b[5:8])
|
||||||
|
v = uint64(bigen.Uint32(d.b[4:8]))
|
||||||
|
case 3:
|
||||||
|
d.r.readb(d.b[4:8])
|
||||||
|
v = uint64(bigen.Uint32(d.b[4:8]))
|
||||||
|
case 4, 5, 6:
|
||||||
|
lim := int(7 - d.vs)
|
||||||
|
d.r.readb(d.b[lim:8])
|
||||||
|
for i := 0; i < lim; i++ {
|
||||||
|
d.b[i] = 0
|
||||||
|
}
|
||||||
|
v = uint64(bigen.Uint64(d.b[:8]))
|
||||||
|
case 7:
|
||||||
|
d.r.readb(d.b[:8])
|
||||||
|
v = uint64(bigen.Uint64(d.b[:8]))
|
||||||
|
default:
|
||||||
|
d.d.errorf("unsigned integers with greater than 64 bits of precision not supported")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *bincDecDriver) decCheckInteger() (ui uint64, neg bool) {
|
||||||
|
if !d.bdRead {
|
||||||
|
d.readNextBd()
|
||||||
|
}
|
||||||
|
vd, vs := d.vd, d.vs
|
||||||
|
if vd == bincVdPosInt {
|
||||||
|
ui = d.decUint()
|
||||||
|
} else if vd == bincVdNegInt {
|
||||||
|
ui = d.decUint()
|
||||||
|
neg = true
|
||||||
|
} else if vd == bincVdSmallInt {
|
||||||
|
ui = uint64(d.vs) + 1
|
||||||
|
} else if vd == bincVdSpecial {
|
||||||
|
if vs == bincSpZero {
|
||||||
|
//i = 0
|
||||||
|
} else if vs == bincSpNegOne {
|
||||||
|
neg = true
|
||||||
|
ui = 1
|
||||||
|
} else {
|
||||||
|
d.d.errorf("numeric decode fails for special value: d.vs: 0x%x", d.vs)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
d.d.errorf("number can only be decoded from uint or int values. d.bd: 0x%x, d.vd: 0x%x", d.bd, d.vd)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *bincDecDriver) DecodeInt(bitsize uint8) (i int64) {
|
||||||
|
ui, neg := d.decCheckInteger()
|
||||||
|
i, overflow := chkOvf.SignedInt(ui)
|
||||||
|
if overflow {
|
||||||
|
d.d.errorf("simple: overflow converting %v to signed integer", ui)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if neg {
|
||||||
|
i = -i
|
||||||
|
}
|
||||||
|
if chkOvf.Int(i, bitsize) {
|
||||||
|
d.d.errorf("binc: overflow integer: %v for num bits: %v", i, bitsize)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
d.bdRead = false
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *bincDecDriver) DecodeUint(bitsize uint8) (ui uint64) {
|
||||||
|
ui, neg := d.decCheckInteger()
|
||||||
|
if neg {
|
||||||
|
d.d.errorf("Assigning negative signed value to unsigned type")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if chkOvf.Uint(ui, bitsize) {
|
||||||
|
d.d.errorf("binc: overflow integer: %v", ui)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
d.bdRead = false
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *bincDecDriver) DecodeFloat(chkOverflow32 bool) (f float64) {
|
||||||
|
if !d.bdRead {
|
||||||
|
d.readNextBd()
|
||||||
|
}
|
||||||
|
vd, vs := d.vd, d.vs
|
||||||
|
if vd == bincVdSpecial {
|
||||||
|
d.bdRead = false
|
||||||
|
if vs == bincSpNan {
|
||||||
|
return math.NaN()
|
||||||
|
} else if vs == bincSpPosInf {
|
||||||
|
return math.Inf(1)
|
||||||
|
} else if vs == bincSpZeroFloat || vs == bincSpZero {
|
||||||
|
return
|
||||||
|
} else if vs == bincSpNegInf {
|
||||||
|
return math.Inf(-1)
|
||||||
|
} else {
|
||||||
|
d.d.errorf("Invalid d.vs decoding float where d.vd=bincVdSpecial: %v", d.vs)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
} else if vd == bincVdFloat {
|
||||||
|
f = d.decFloat()
|
||||||
|
} else {
|
||||||
|
f = float64(d.DecodeInt(64))
|
||||||
|
}
|
||||||
|
if chkOverflow32 && chkOvf.Float32(f) {
|
||||||
|
d.d.errorf("binc: float32 overflow: %v", f)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
d.bdRead = false
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// bool can be decoded from bool only (single byte).
|
||||||
|
func (d *bincDecDriver) DecodeBool() (b bool) {
|
||||||
|
if !d.bdRead {
|
||||||
|
d.readNextBd()
|
||||||
|
}
|
||||||
|
if bd := d.bd; bd == (bincVdSpecial | bincSpFalse) {
|
||||||
|
// b = false
|
||||||
|
} else if bd == (bincVdSpecial | bincSpTrue) {
|
||||||
|
b = true
|
||||||
|
} else {
|
||||||
|
d.d.errorf("Invalid single-byte value for bool: %s: %x", msgBadDesc, d.bd)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
d.bdRead = false
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *bincDecDriver) ReadMapStart() (length int) {
|
||||||
|
if !d.bdRead {
|
||||||
|
d.readNextBd()
|
||||||
|
}
|
||||||
|
if d.vd != bincVdMap {
|
||||||
|
d.d.errorf("Invalid d.vd for map. Expecting 0x%x. Got: 0x%x", bincVdMap, d.vd)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
length = d.decLen()
|
||||||
|
d.bdRead = false
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *bincDecDriver) ReadArrayStart() (length int) {
|
||||||
|
if !d.bdRead {
|
||||||
|
d.readNextBd()
|
||||||
|
}
|
||||||
|
if d.vd != bincVdArray {
|
||||||
|
d.d.errorf("Invalid d.vd for array. Expecting 0x%x. Got: 0x%x", bincVdArray, d.vd)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
length = d.decLen()
|
||||||
|
d.bdRead = false
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *bincDecDriver) decLen() int {
|
||||||
|
if d.vs > 3 {
|
||||||
|
return int(d.vs - 4)
|
||||||
|
}
|
||||||
|
return int(d.decLenNumber())
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *bincDecDriver) decLenNumber() (v uint64) {
|
||||||
|
if x := d.vs; x == 0 {
|
||||||
|
v = uint64(d.r.readn1())
|
||||||
|
} else if x == 1 {
|
||||||
|
d.r.readb(d.b[6:8])
|
||||||
|
v = uint64(bigen.Uint16(d.b[6:8]))
|
||||||
|
} else if x == 2 {
|
||||||
|
d.r.readb(d.b[4:8])
|
||||||
|
v = uint64(bigen.Uint32(d.b[4:8]))
|
||||||
|
} else {
|
||||||
|
d.r.readb(d.b[:8])
|
||||||
|
v = bigen.Uint64(d.b[:8])
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *bincDecDriver) decStringAndBytes(bs []byte, withString, zerocopy bool) (bs2 []byte, s string) {
|
||||||
|
if !d.bdRead {
|
||||||
|
d.readNextBd()
|
||||||
|
}
|
||||||
|
if d.bd == bincVdSpecial<<4|bincSpNil {
|
||||||
|
d.bdRead = false
|
||||||
|
return
|
||||||
|
}
|
||||||
|
var slen int = -1
|
||||||
|
// var ok bool
|
||||||
|
switch d.vd {
|
||||||
|
case bincVdString, bincVdByteArray:
|
||||||
|
slen = d.decLen()
|
||||||
|
if zerocopy {
|
||||||
|
if d.br {
|
||||||
|
bs2 = d.r.readx(slen)
|
||||||
|
} else if len(bs) == 0 {
|
||||||
|
bs2 = decByteSlice(d.r, slen, d.d.h.MaxInitLen, d.b[:])
|
||||||
|
} else {
|
||||||
|
bs2 = decByteSlice(d.r, slen, d.d.h.MaxInitLen, bs)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
bs2 = decByteSlice(d.r, slen, d.d.h.MaxInitLen, bs)
|
||||||
|
}
|
||||||
|
if withString {
|
||||||
|
s = string(bs2)
|
||||||
|
}
|
||||||
|
case bincVdSymbol:
|
||||||
|
// zerocopy doesn't apply for symbols,
|
||||||
|
// as the values must be stored in a table for later use.
|
||||||
|
//
|
||||||
|
//from vs: extract numSymbolBytes, containsStringVal, strLenPrecision,
|
||||||
|
//extract symbol
|
||||||
|
//if containsStringVal, read it and put in map
|
||||||
|
//else look in map for string value
|
||||||
|
var symbol uint16
|
||||||
|
vs := d.vs
|
||||||
|
if vs&0x8 == 0 {
|
||||||
|
symbol = uint16(d.r.readn1())
|
||||||
|
} else {
|
||||||
|
symbol = uint16(bigen.Uint16(d.r.readx(2)))
|
||||||
|
}
|
||||||
|
if d.s == nil {
|
||||||
|
d.s = make([]bincDecSymbol, 0, 16)
|
||||||
|
}
|
||||||
|
|
||||||
|
if vs&0x4 == 0 {
|
||||||
|
for i := range d.s {
|
||||||
|
j := &d.s[i]
|
||||||
|
if j.i == symbol {
|
||||||
|
bs2 = j.b
|
||||||
|
if withString {
|
||||||
|
if j.s == "" && bs2 != nil {
|
||||||
|
j.s = string(bs2)
|
||||||
|
}
|
||||||
|
s = j.s
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
switch vs & 0x3 {
|
||||||
|
case 0:
|
||||||
|
slen = int(d.r.readn1())
|
||||||
|
case 1:
|
||||||
|
slen = int(bigen.Uint16(d.r.readx(2)))
|
||||||
|
case 2:
|
||||||
|
slen = int(bigen.Uint32(d.r.readx(4)))
|
||||||
|
case 3:
|
||||||
|
slen = int(bigen.Uint64(d.r.readx(8)))
|
||||||
|
}
|
||||||
|
// since using symbols, do not store any part of
|
||||||
|
// the parameter bs in the map, as it might be a shared buffer.
|
||||||
|
// bs2 = decByteSlice(d.r, slen, bs)
|
||||||
|
bs2 = decByteSlice(d.r, slen, d.d.h.MaxInitLen, nil)
|
||||||
|
if withString {
|
||||||
|
s = string(bs2)
|
||||||
|
}
|
||||||
|
d.s = append(d.s, bincDecSymbol{i: symbol, s: s, b: bs2})
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
d.d.errorf("Invalid d.vd. Expecting string:0x%x, bytearray:0x%x or symbol: 0x%x. Got: 0x%x",
|
||||||
|
bincVdString, bincVdByteArray, bincVdSymbol, d.vd)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
d.bdRead = false
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *bincDecDriver) DecodeString() (s string) {
|
||||||
|
// DecodeBytes does not accommodate symbols, whose impl stores string version in map.
|
||||||
|
// Use decStringAndBytes directly.
|
||||||
|
// return string(d.DecodeBytes(d.b[:], true, true))
|
||||||
|
_, s = d.decStringAndBytes(d.b[:], true, true)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *bincDecDriver) DecodeStringAsBytes() (s []byte) {
|
||||||
|
s, _ = d.decStringAndBytes(d.b[:], false, true)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *bincDecDriver) DecodeBytes(bs []byte, zerocopy bool) (bsOut []byte) {
|
||||||
|
if !d.bdRead {
|
||||||
|
d.readNextBd()
|
||||||
|
}
|
||||||
|
if d.bd == bincVdSpecial<<4|bincSpNil {
|
||||||
|
d.bdRead = false
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
var clen int
|
||||||
|
if d.vd == bincVdString || d.vd == bincVdByteArray {
|
||||||
|
clen = d.decLen()
|
||||||
|
} else {
|
||||||
|
d.d.errorf("Invalid d.vd for bytes. Expecting string:0x%x or bytearray:0x%x. Got: 0x%x",
|
||||||
|
bincVdString, bincVdByteArray, d.vd)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
d.bdRead = false
|
||||||
|
if zerocopy {
|
||||||
|
if d.br {
|
||||||
|
return d.r.readx(clen)
|
||||||
|
} else if len(bs) == 0 {
|
||||||
|
bs = d.b[:]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return decByteSlice(d.r, clen, d.d.h.MaxInitLen, bs)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *bincDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) {
|
||||||
|
if xtag > 0xff {
|
||||||
|
d.d.errorf("decodeExt: tag must be <= 0xff; got: %v", xtag)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
realxtag1, xbs := d.decodeExtV(ext != nil, uint8(xtag))
|
||||||
|
realxtag = uint64(realxtag1)
|
||||||
|
if ext == nil {
|
||||||
|
re := rv.(*RawExt)
|
||||||
|
re.Tag = realxtag
|
||||||
|
re.Data = detachZeroCopyBytes(d.br, re.Data, xbs)
|
||||||
|
} else {
|
||||||
|
ext.ReadExt(rv, xbs)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *bincDecDriver) decodeExtV(verifyTag bool, tag byte) (xtag byte, xbs []byte) {
|
||||||
|
if !d.bdRead {
|
||||||
|
d.readNextBd()
|
||||||
|
}
|
||||||
|
if d.vd == bincVdCustomExt {
|
||||||
|
l := d.decLen()
|
||||||
|
xtag = d.r.readn1()
|
||||||
|
if verifyTag && xtag != tag {
|
||||||
|
d.d.errorf("Wrong extension tag. Got %b. Expecting: %v", xtag, tag)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
xbs = d.r.readx(l)
|
||||||
|
} else if d.vd == bincVdByteArray {
|
||||||
|
xbs = d.DecodeBytes(nil, true)
|
||||||
|
} else {
|
||||||
|
d.d.errorf("Invalid d.vd for extensions (Expecting extensions or byte array). Got: 0x%x", d.vd)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
d.bdRead = false
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *bincDecDriver) DecodeNaked() {
|
||||||
|
if !d.bdRead {
|
||||||
|
d.readNextBd()
|
||||||
|
}
|
||||||
|
|
||||||
|
n := d.d.n
|
||||||
|
var decodeFurther bool
|
||||||
|
|
||||||
|
switch d.vd {
|
||||||
|
case bincVdSpecial:
|
||||||
|
switch d.vs {
|
||||||
|
case bincSpNil:
|
||||||
|
n.v = valueTypeNil
|
||||||
|
case bincSpFalse:
|
||||||
|
n.v = valueTypeBool
|
||||||
|
n.b = false
|
||||||
|
case bincSpTrue:
|
||||||
|
n.v = valueTypeBool
|
||||||
|
n.b = true
|
||||||
|
case bincSpNan:
|
||||||
|
n.v = valueTypeFloat
|
||||||
|
n.f = math.NaN()
|
||||||
|
case bincSpPosInf:
|
||||||
|
n.v = valueTypeFloat
|
||||||
|
n.f = math.Inf(1)
|
||||||
|
case bincSpNegInf:
|
||||||
|
n.v = valueTypeFloat
|
||||||
|
n.f = math.Inf(-1)
|
||||||
|
case bincSpZeroFloat:
|
||||||
|
n.v = valueTypeFloat
|
||||||
|
n.f = float64(0)
|
||||||
|
case bincSpZero:
|
||||||
|
n.v = valueTypeUint
|
||||||
|
n.u = uint64(0) // int8(0)
|
||||||
|
case bincSpNegOne:
|
||||||
|
n.v = valueTypeInt
|
||||||
|
n.i = int64(-1) // int8(-1)
|
||||||
|
default:
|
||||||
|
d.d.errorf("decodeNaked: Unrecognized special value 0x%x", d.vs)
|
||||||
|
}
|
||||||
|
case bincVdSmallInt:
|
||||||
|
n.v = valueTypeUint
|
||||||
|
n.u = uint64(int8(d.vs)) + 1 // int8(d.vs) + 1
|
||||||
|
case bincVdPosInt:
|
||||||
|
n.v = valueTypeUint
|
||||||
|
n.u = d.decUint()
|
||||||
|
case bincVdNegInt:
|
||||||
|
n.v = valueTypeInt
|
||||||
|
n.i = -(int64(d.decUint()))
|
||||||
|
case bincVdFloat:
|
||||||
|
n.v = valueTypeFloat
|
||||||
|
n.f = d.decFloat()
|
||||||
|
case bincVdSymbol:
|
||||||
|
n.v = valueTypeSymbol
|
||||||
|
n.s = d.DecodeString()
|
||||||
|
case bincVdString:
|
||||||
|
n.v = valueTypeString
|
||||||
|
n.s = d.DecodeString()
|
||||||
|
case bincVdByteArray:
|
||||||
|
n.v = valueTypeBytes
|
||||||
|
n.l = d.DecodeBytes(nil, false)
|
||||||
|
case bincVdTimestamp:
|
||||||
|
n.v = valueTypeTimestamp
|
||||||
|
tt, err := decodeTime(d.r.readx(int(d.vs)))
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
n.t = tt
|
||||||
|
case bincVdCustomExt:
|
||||||
|
n.v = valueTypeExt
|
||||||
|
l := d.decLen()
|
||||||
|
n.u = uint64(d.r.readn1())
|
||||||
|
n.l = d.r.readx(l)
|
||||||
|
case bincVdArray:
|
||||||
|
n.v = valueTypeArray
|
||||||
|
decodeFurther = true
|
||||||
|
case bincVdMap:
|
||||||
|
n.v = valueTypeMap
|
||||||
|
decodeFurther = true
|
||||||
|
default:
|
||||||
|
d.d.errorf("decodeNaked: Unrecognized d.vd: 0x%x", d.vd)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !decodeFurther {
|
||||||
|
d.bdRead = false
|
||||||
|
}
|
||||||
|
if n.v == valueTypeUint && d.h.SignedInteger {
|
||||||
|
n.v = valueTypeInt
|
||||||
|
n.i = int64(n.u)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
//------------------------------------
|
||||||
|
|
||||||
|
//BincHandle is a Handle for the Binc Schema-Free Encoding Format
|
||||||
|
//defined at https://github.com/ugorji/binc .
|
||||||
|
//
|
||||||
|
//BincHandle currently supports all Binc features with the following EXCEPTIONS:
|
||||||
|
// - only integers up to 64 bits of precision are supported.
|
||||||
|
// big integers are unsupported.
|
||||||
|
// - Only IEEE 754 binary32 and binary64 floats are supported (ie Go float32 and float64 types).
|
||||||
|
// extended precision and decimal IEEE 754 floats are unsupported.
|
||||||
|
// - Only UTF-8 strings supported.
|
||||||
|
// Unicode_Other Binc types (UTF16, UTF32) are currently unsupported.
|
||||||
|
//
|
||||||
|
//Note that these EXCEPTIONS are temporary and full support is possible and may happen soon.
|
||||||
|
type BincHandle struct {
|
||||||
|
BasicHandle
|
||||||
|
binaryEncodingType
|
||||||
|
noElemSeparators
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *BincHandle) SetBytesExt(rt reflect.Type, tag uint64, ext BytesExt) (err error) {
|
||||||
|
return h.SetExt(rt, tag, &setExtWrapper{b: ext})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *BincHandle) newEncDriver(e *Encoder) encDriver {
|
||||||
|
return &bincEncDriver{e: e, w: e.w}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *BincHandle) newDecDriver(d *Decoder) decDriver {
|
||||||
|
return &bincDecDriver{d: d, h: h, r: d.r, br: d.bytes}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_ *BincHandle) IsBuiltinType(rt uintptr) bool {
|
||||||
|
return rt == timeTypId
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *bincEncDriver) reset() {
|
||||||
|
e.w = e.e.w
|
||||||
|
e.s = 0
|
||||||
|
e.m = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *bincDecDriver) reset() {
|
||||||
|
d.r, d.br = d.d.r, d.d.bytes
|
||||||
|
d.s = nil
|
||||||
|
d.bd, d.bdRead, d.vd, d.vs = 0, false, 0, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ decDriver = (*bincDecDriver)(nil)
|
||||||
|
var _ encDriver = (*bincEncDriver)(nil)
|
631
vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/cbor.go
generated
vendored
Normal file
631
vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/cbor.go
generated
vendored
Normal file
@ -0,0 +1,631 @@
|
|||||||
|
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT license found in the LICENSE file.
|
||||||
|
|
||||||
|
package codec
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math"
|
||||||
|
"reflect"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
cborMajorUint byte = iota
|
||||||
|
cborMajorNegInt
|
||||||
|
cborMajorBytes
|
||||||
|
cborMajorText
|
||||||
|
cborMajorArray
|
||||||
|
cborMajorMap
|
||||||
|
cborMajorTag
|
||||||
|
cborMajorOther
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
cborBdFalse byte = 0xf4 + iota
|
||||||
|
cborBdTrue
|
||||||
|
cborBdNil
|
||||||
|
cborBdUndefined
|
||||||
|
cborBdExt
|
||||||
|
cborBdFloat16
|
||||||
|
cborBdFloat32
|
||||||
|
cborBdFloat64
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
cborBdIndefiniteBytes byte = 0x5f
|
||||||
|
cborBdIndefiniteString = 0x7f
|
||||||
|
cborBdIndefiniteArray = 0x9f
|
||||||
|
cborBdIndefiniteMap = 0xbf
|
||||||
|
cborBdBreak = 0xff
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
CborStreamBytes byte = 0x5f
|
||||||
|
CborStreamString = 0x7f
|
||||||
|
CborStreamArray = 0x9f
|
||||||
|
CborStreamMap = 0xbf
|
||||||
|
CborStreamBreak = 0xff
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
cborBaseUint byte = 0x00
|
||||||
|
cborBaseNegInt = 0x20
|
||||||
|
cborBaseBytes = 0x40
|
||||||
|
cborBaseString = 0x60
|
||||||
|
cborBaseArray = 0x80
|
||||||
|
cborBaseMap = 0xa0
|
||||||
|
cborBaseTag = 0xc0
|
||||||
|
cborBaseSimple = 0xe0
|
||||||
|
)
|
||||||
|
|
||||||
|
// -------------------
|
||||||
|
|
||||||
|
type cborEncDriver struct {
|
||||||
|
noBuiltInTypes
|
||||||
|
encDriverNoopContainerWriter
|
||||||
|
// encNoSeparator
|
||||||
|
e *Encoder
|
||||||
|
w encWriter
|
||||||
|
h *CborHandle
|
||||||
|
x [8]byte
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *cborEncDriver) EncodeNil() {
|
||||||
|
e.w.writen1(cborBdNil)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *cborEncDriver) EncodeBool(b bool) {
|
||||||
|
if b {
|
||||||
|
e.w.writen1(cborBdTrue)
|
||||||
|
} else {
|
||||||
|
e.w.writen1(cborBdFalse)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *cborEncDriver) EncodeFloat32(f float32) {
|
||||||
|
e.w.writen1(cborBdFloat32)
|
||||||
|
bigenHelper{e.x[:4], e.w}.writeUint32(math.Float32bits(f))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *cborEncDriver) EncodeFloat64(f float64) {
|
||||||
|
e.w.writen1(cborBdFloat64)
|
||||||
|
bigenHelper{e.x[:8], e.w}.writeUint64(math.Float64bits(f))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *cborEncDriver) encUint(v uint64, bd byte) {
|
||||||
|
if v <= 0x17 {
|
||||||
|
e.w.writen1(byte(v) + bd)
|
||||||
|
} else if v <= math.MaxUint8 {
|
||||||
|
e.w.writen2(bd+0x18, uint8(v))
|
||||||
|
} else if v <= math.MaxUint16 {
|
||||||
|
e.w.writen1(bd + 0x19)
|
||||||
|
bigenHelper{e.x[:2], e.w}.writeUint16(uint16(v))
|
||||||
|
} else if v <= math.MaxUint32 {
|
||||||
|
e.w.writen1(bd + 0x1a)
|
||||||
|
bigenHelper{e.x[:4], e.w}.writeUint32(uint32(v))
|
||||||
|
} else { // if v <= math.MaxUint64 {
|
||||||
|
e.w.writen1(bd + 0x1b)
|
||||||
|
bigenHelper{e.x[:8], e.w}.writeUint64(v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *cborEncDriver) EncodeInt(v int64) {
|
||||||
|
if v < 0 {
|
||||||
|
e.encUint(uint64(-1-v), cborBaseNegInt)
|
||||||
|
} else {
|
||||||
|
e.encUint(uint64(v), cborBaseUint)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *cborEncDriver) EncodeUint(v uint64) {
|
||||||
|
e.encUint(v, cborBaseUint)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *cborEncDriver) encLen(bd byte, length int) {
|
||||||
|
e.encUint(uint64(length), bd)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *cborEncDriver) EncodeExt(rv interface{}, xtag uint64, ext Ext, en *Encoder) {
|
||||||
|
e.encUint(uint64(xtag), cborBaseTag)
|
||||||
|
if v := ext.ConvertExt(rv); v == nil {
|
||||||
|
e.EncodeNil()
|
||||||
|
} else {
|
||||||
|
en.encode(v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *cborEncDriver) EncodeRawExt(re *RawExt, en *Encoder) {
|
||||||
|
e.encUint(uint64(re.Tag), cborBaseTag)
|
||||||
|
if false && re.Data != nil {
|
||||||
|
en.encode(re.Data)
|
||||||
|
} else if re.Value != nil {
|
||||||
|
en.encode(re.Value)
|
||||||
|
} else {
|
||||||
|
e.EncodeNil()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *cborEncDriver) WriteArrayStart(length int) {
|
||||||
|
if e.h.IndefiniteLength {
|
||||||
|
e.w.writen1(cborBdIndefiniteArray)
|
||||||
|
} else {
|
||||||
|
e.encLen(cborBaseArray, length)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *cborEncDriver) WriteMapStart(length int) {
|
||||||
|
if e.h.IndefiniteLength {
|
||||||
|
e.w.writen1(cborBdIndefiniteMap)
|
||||||
|
} else {
|
||||||
|
e.encLen(cborBaseMap, length)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *cborEncDriver) WriteMapEnd() {
|
||||||
|
if e.h.IndefiniteLength {
|
||||||
|
e.w.writen1(cborBdBreak)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *cborEncDriver) WriteArrayEnd() {
|
||||||
|
if e.h.IndefiniteLength {
|
||||||
|
e.w.writen1(cborBdBreak)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *cborEncDriver) EncodeString(c charEncoding, v string) {
|
||||||
|
e.encLen(cborBaseString, len(v))
|
||||||
|
e.w.writestr(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *cborEncDriver) EncodeSymbol(v string) {
|
||||||
|
e.EncodeString(c_UTF8, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *cborEncDriver) EncodeStringBytes(c charEncoding, v []byte) {
|
||||||
|
if c == c_RAW {
|
||||||
|
e.encLen(cborBaseBytes, len(v))
|
||||||
|
} else {
|
||||||
|
e.encLen(cborBaseString, len(v))
|
||||||
|
}
|
||||||
|
e.w.writeb(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ----------------------
|
||||||
|
|
||||||
|
type cborDecDriver struct {
|
||||||
|
d *Decoder
|
||||||
|
h *CborHandle
|
||||||
|
r decReader
|
||||||
|
b [scratchByteArrayLen]byte
|
||||||
|
br bool // bytes reader
|
||||||
|
bdRead bool
|
||||||
|
bd byte
|
||||||
|
noBuiltInTypes
|
||||||
|
// decNoSeparator
|
||||||
|
decDriverNoopContainerReader
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *cborDecDriver) readNextBd() {
|
||||||
|
d.bd = d.r.readn1()
|
||||||
|
d.bdRead = true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *cborDecDriver) uncacheRead() {
|
||||||
|
if d.bdRead {
|
||||||
|
d.r.unreadn1()
|
||||||
|
d.bdRead = false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *cborDecDriver) ContainerType() (vt valueType) {
|
||||||
|
if !d.bdRead {
|
||||||
|
d.readNextBd()
|
||||||
|
}
|
||||||
|
if d.bd == cborBdNil {
|
||||||
|
return valueTypeNil
|
||||||
|
} else if d.bd == cborBdIndefiniteBytes || (d.bd >= cborBaseBytes && d.bd < cborBaseString) {
|
||||||
|
return valueTypeBytes
|
||||||
|
} else if d.bd == cborBdIndefiniteString || (d.bd >= cborBaseString && d.bd < cborBaseArray) {
|
||||||
|
return valueTypeString
|
||||||
|
} else if d.bd == cborBdIndefiniteArray || (d.bd >= cborBaseArray && d.bd < cborBaseMap) {
|
||||||
|
return valueTypeArray
|
||||||
|
} else if d.bd == cborBdIndefiniteMap || (d.bd >= cborBaseMap && d.bd < cborBaseTag) {
|
||||||
|
return valueTypeMap
|
||||||
|
} else {
|
||||||
|
// d.d.errorf("isContainerType: unsupported parameter: %v", vt)
|
||||||
|
}
|
||||||
|
return valueTypeUnset
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *cborDecDriver) TryDecodeAsNil() bool {
|
||||||
|
if !d.bdRead {
|
||||||
|
d.readNextBd()
|
||||||
|
}
|
||||||
|
// treat Nil and Undefined as nil values
|
||||||
|
if d.bd == cborBdNil || d.bd == cborBdUndefined {
|
||||||
|
d.bdRead = false
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *cborDecDriver) CheckBreak() bool {
|
||||||
|
if !d.bdRead {
|
||||||
|
d.readNextBd()
|
||||||
|
}
|
||||||
|
if d.bd == cborBdBreak {
|
||||||
|
d.bdRead = false
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *cborDecDriver) decUint() (ui uint64) {
|
||||||
|
v := d.bd & 0x1f
|
||||||
|
if v <= 0x17 {
|
||||||
|
ui = uint64(v)
|
||||||
|
} else {
|
||||||
|
if v == 0x18 {
|
||||||
|
ui = uint64(d.r.readn1())
|
||||||
|
} else if v == 0x19 {
|
||||||
|
ui = uint64(bigen.Uint16(d.r.readx(2)))
|
||||||
|
} else if v == 0x1a {
|
||||||
|
ui = uint64(bigen.Uint32(d.r.readx(4)))
|
||||||
|
} else if v == 0x1b {
|
||||||
|
ui = uint64(bigen.Uint64(d.r.readx(8)))
|
||||||
|
} else {
|
||||||
|
d.d.errorf("decUint: Invalid descriptor: %v", d.bd)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *cborDecDriver) decCheckInteger() (neg bool) {
|
||||||
|
if !d.bdRead {
|
||||||
|
d.readNextBd()
|
||||||
|
}
|
||||||
|
major := d.bd >> 5
|
||||||
|
if major == cborMajorUint {
|
||||||
|
} else if major == cborMajorNegInt {
|
||||||
|
neg = true
|
||||||
|
} else {
|
||||||
|
d.d.errorf("invalid major: %v (bd: %v)", major, d.bd)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *cborDecDriver) DecodeInt(bitsize uint8) (i int64) {
|
||||||
|
neg := d.decCheckInteger()
|
||||||
|
ui := d.decUint()
|
||||||
|
// check if this number can be converted to an int without overflow
|
||||||
|
var overflow bool
|
||||||
|
if neg {
|
||||||
|
if i, overflow = chkOvf.SignedInt(ui + 1); overflow {
|
||||||
|
d.d.errorf("cbor: overflow converting %v to signed integer", ui+1)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
i = -i
|
||||||
|
} else {
|
||||||
|
if i, overflow = chkOvf.SignedInt(ui); overflow {
|
||||||
|
d.d.errorf("cbor: overflow converting %v to signed integer", ui)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if chkOvf.Int(i, bitsize) {
|
||||||
|
d.d.errorf("cbor: overflow integer: %v", i)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
d.bdRead = false
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *cborDecDriver) DecodeUint(bitsize uint8) (ui uint64) {
|
||||||
|
if d.decCheckInteger() {
|
||||||
|
d.d.errorf("Assigning negative signed value to unsigned type")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
ui = d.decUint()
|
||||||
|
if chkOvf.Uint(ui, bitsize) {
|
||||||
|
d.d.errorf("cbor: overflow integer: %v", ui)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
d.bdRead = false
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *cborDecDriver) DecodeFloat(chkOverflow32 bool) (f float64) {
|
||||||
|
if !d.bdRead {
|
||||||
|
d.readNextBd()
|
||||||
|
}
|
||||||
|
if bd := d.bd; bd == cborBdFloat16 {
|
||||||
|
f = float64(math.Float32frombits(halfFloatToFloatBits(bigen.Uint16(d.r.readx(2)))))
|
||||||
|
} else if bd == cborBdFloat32 {
|
||||||
|
f = float64(math.Float32frombits(bigen.Uint32(d.r.readx(4))))
|
||||||
|
} else if bd == cborBdFloat64 {
|
||||||
|
f = math.Float64frombits(bigen.Uint64(d.r.readx(8)))
|
||||||
|
} else if bd >= cborBaseUint && bd < cborBaseBytes {
|
||||||
|
f = float64(d.DecodeInt(64))
|
||||||
|
} else {
|
||||||
|
d.d.errorf("Float only valid from float16/32/64: Invalid descriptor: %v", bd)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if chkOverflow32 && chkOvf.Float32(f) {
|
||||||
|
d.d.errorf("cbor: float32 overflow: %v", f)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
d.bdRead = false
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// bool can be decoded from bool only (single byte).
|
||||||
|
func (d *cborDecDriver) DecodeBool() (b bool) {
|
||||||
|
if !d.bdRead {
|
||||||
|
d.readNextBd()
|
||||||
|
}
|
||||||
|
if bd := d.bd; bd == cborBdTrue {
|
||||||
|
b = true
|
||||||
|
} else if bd == cborBdFalse {
|
||||||
|
} else {
|
||||||
|
d.d.errorf("Invalid single-byte value for bool: %s: %x", msgBadDesc, d.bd)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
d.bdRead = false
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *cborDecDriver) ReadMapStart() (length int) {
|
||||||
|
if !d.bdRead {
|
||||||
|
d.readNextBd()
|
||||||
|
}
|
||||||
|
d.bdRead = false
|
||||||
|
if d.bd == cborBdIndefiniteMap {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
return d.decLen()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *cborDecDriver) ReadArrayStart() (length int) {
|
||||||
|
if !d.bdRead {
|
||||||
|
d.readNextBd()
|
||||||
|
}
|
||||||
|
d.bdRead = false
|
||||||
|
if d.bd == cborBdIndefiniteArray {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
return d.decLen()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *cborDecDriver) decLen() int {
|
||||||
|
return int(d.decUint())
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *cborDecDriver) decAppendIndefiniteBytes(bs []byte) []byte {
|
||||||
|
d.bdRead = false
|
||||||
|
for {
|
||||||
|
if d.CheckBreak() {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if major := d.bd >> 5; major != cborMajorBytes && major != cborMajorText {
|
||||||
|
d.d.errorf("cbor: expect bytes or string major type in indefinite string/bytes; got: %v, byte: %v", major, d.bd)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
n := d.decLen()
|
||||||
|
oldLen := len(bs)
|
||||||
|
newLen := oldLen + n
|
||||||
|
if newLen > cap(bs) {
|
||||||
|
bs2 := make([]byte, newLen, 2*cap(bs)+n)
|
||||||
|
copy(bs2, bs)
|
||||||
|
bs = bs2
|
||||||
|
} else {
|
||||||
|
bs = bs[:newLen]
|
||||||
|
}
|
||||||
|
d.r.readb(bs[oldLen:newLen])
|
||||||
|
// bs = append(bs, d.r.readn()...)
|
||||||
|
d.bdRead = false
|
||||||
|
}
|
||||||
|
d.bdRead = false
|
||||||
|
return bs
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *cborDecDriver) DecodeBytes(bs []byte, zerocopy bool) (bsOut []byte) {
|
||||||
|
if !d.bdRead {
|
||||||
|
d.readNextBd()
|
||||||
|
}
|
||||||
|
if d.bd == cborBdNil || d.bd == cborBdUndefined {
|
||||||
|
d.bdRead = false
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if d.bd == cborBdIndefiniteBytes || d.bd == cborBdIndefiniteString {
|
||||||
|
if bs == nil {
|
||||||
|
return d.decAppendIndefiniteBytes(nil)
|
||||||
|
}
|
||||||
|
return d.decAppendIndefiniteBytes(bs[:0])
|
||||||
|
}
|
||||||
|
clen := d.decLen()
|
||||||
|
d.bdRead = false
|
||||||
|
if zerocopy {
|
||||||
|
if d.br {
|
||||||
|
return d.r.readx(clen)
|
||||||
|
} else if len(bs) == 0 {
|
||||||
|
bs = d.b[:]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return decByteSlice(d.r, clen, d.d.h.MaxInitLen, bs)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *cborDecDriver) DecodeString() (s string) {
|
||||||
|
return string(d.DecodeBytes(d.b[:], true))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *cborDecDriver) DecodeStringAsBytes() (s []byte) {
|
||||||
|
return d.DecodeBytes(d.b[:], true)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *cborDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) {
|
||||||
|
if !d.bdRead {
|
||||||
|
d.readNextBd()
|
||||||
|
}
|
||||||
|
u := d.decUint()
|
||||||
|
d.bdRead = false
|
||||||
|
realxtag = u
|
||||||
|
if ext == nil {
|
||||||
|
re := rv.(*RawExt)
|
||||||
|
re.Tag = realxtag
|
||||||
|
d.d.decode(&re.Value)
|
||||||
|
} else if xtag != realxtag {
|
||||||
|
d.d.errorf("Wrong extension tag. Got %b. Expecting: %v", realxtag, xtag)
|
||||||
|
return
|
||||||
|
} else {
|
||||||
|
var v interface{}
|
||||||
|
d.d.decode(&v)
|
||||||
|
ext.UpdateExt(rv, v)
|
||||||
|
}
|
||||||
|
d.bdRead = false
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *cborDecDriver) DecodeNaked() {
|
||||||
|
if !d.bdRead {
|
||||||
|
d.readNextBd()
|
||||||
|
}
|
||||||
|
|
||||||
|
n := d.d.n
|
||||||
|
var decodeFurther bool
|
||||||
|
|
||||||
|
switch d.bd {
|
||||||
|
case cborBdNil:
|
||||||
|
n.v = valueTypeNil
|
||||||
|
case cborBdFalse:
|
||||||
|
n.v = valueTypeBool
|
||||||
|
n.b = false
|
||||||
|
case cborBdTrue:
|
||||||
|
n.v = valueTypeBool
|
||||||
|
n.b = true
|
||||||
|
case cborBdFloat16, cborBdFloat32:
|
||||||
|
n.v = valueTypeFloat
|
||||||
|
n.f = d.DecodeFloat(true)
|
||||||
|
case cborBdFloat64:
|
||||||
|
n.v = valueTypeFloat
|
||||||
|
n.f = d.DecodeFloat(false)
|
||||||
|
case cborBdIndefiniteBytes:
|
||||||
|
n.v = valueTypeBytes
|
||||||
|
n.l = d.DecodeBytes(nil, false)
|
||||||
|
case cborBdIndefiniteString:
|
||||||
|
n.v = valueTypeString
|
||||||
|
n.s = d.DecodeString()
|
||||||
|
case cborBdIndefiniteArray:
|
||||||
|
n.v = valueTypeArray
|
||||||
|
decodeFurther = true
|
||||||
|
case cborBdIndefiniteMap:
|
||||||
|
n.v = valueTypeMap
|
||||||
|
decodeFurther = true
|
||||||
|
default:
|
||||||
|
switch {
|
||||||
|
case d.bd >= cborBaseUint && d.bd < cborBaseNegInt:
|
||||||
|
if d.h.SignedInteger {
|
||||||
|
n.v = valueTypeInt
|
||||||
|
n.i = d.DecodeInt(64)
|
||||||
|
} else {
|
||||||
|
n.v = valueTypeUint
|
||||||
|
n.u = d.DecodeUint(64)
|
||||||
|
}
|
||||||
|
case d.bd >= cborBaseNegInt && d.bd < cborBaseBytes:
|
||||||
|
n.v = valueTypeInt
|
||||||
|
n.i = d.DecodeInt(64)
|
||||||
|
case d.bd >= cborBaseBytes && d.bd < cborBaseString:
|
||||||
|
n.v = valueTypeBytes
|
||||||
|
n.l = d.DecodeBytes(nil, false)
|
||||||
|
case d.bd >= cborBaseString && d.bd < cborBaseArray:
|
||||||
|
n.v = valueTypeString
|
||||||
|
n.s = d.DecodeString()
|
||||||
|
case d.bd >= cborBaseArray && d.bd < cborBaseMap:
|
||||||
|
n.v = valueTypeArray
|
||||||
|
decodeFurther = true
|
||||||
|
case d.bd >= cborBaseMap && d.bd < cborBaseTag:
|
||||||
|
n.v = valueTypeMap
|
||||||
|
decodeFurther = true
|
||||||
|
case d.bd >= cborBaseTag && d.bd < cborBaseSimple:
|
||||||
|
n.v = valueTypeExt
|
||||||
|
n.u = d.decUint()
|
||||||
|
n.l = nil
|
||||||
|
// d.bdRead = false
|
||||||
|
// d.d.decode(&re.Value) // handled by decode itself.
|
||||||
|
// decodeFurther = true
|
||||||
|
default:
|
||||||
|
d.d.errorf("decodeNaked: Unrecognized d.bd: 0x%x", d.bd)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !decodeFurther {
|
||||||
|
d.bdRead = false
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// -------------------------
|
||||||
|
|
||||||
|
// CborHandle is a Handle for the CBOR encoding format,
|
||||||
|
// defined at http://tools.ietf.org/html/rfc7049 and documented further at http://cbor.io .
|
||||||
|
//
|
||||||
|
// CBOR is comprehensively supported, including support for:
|
||||||
|
// - indefinite-length arrays/maps/bytes/strings
|
||||||
|
// - (extension) tags in range 0..0xffff (0 .. 65535)
|
||||||
|
// - half, single and double-precision floats
|
||||||
|
// - all numbers (1, 2, 4 and 8-byte signed and unsigned integers)
|
||||||
|
// - nil, true, false, ...
|
||||||
|
// - arrays and maps, bytes and text strings
|
||||||
|
//
|
||||||
|
// None of the optional extensions (with tags) defined in the spec are supported out-of-the-box.
|
||||||
|
// Users can implement them as needed (using SetExt), including spec-documented ones:
|
||||||
|
// - timestamp, BigNum, BigFloat, Decimals, Encoded Text (e.g. URL, regexp, base64, MIME Message), etc.
|
||||||
|
//
|
||||||
|
// To encode with indefinite lengths (streaming), users will use
|
||||||
|
// (Must)Encode methods of *Encoder, along with writing CborStreamXXX constants.
|
||||||
|
//
|
||||||
|
// For example, to encode "one-byte" as an indefinite length string:
|
||||||
|
// var buf bytes.Buffer
|
||||||
|
// e := NewEncoder(&buf, new(CborHandle))
|
||||||
|
// buf.WriteByte(CborStreamString)
|
||||||
|
// e.MustEncode("one-")
|
||||||
|
// e.MustEncode("byte")
|
||||||
|
// buf.WriteByte(CborStreamBreak)
|
||||||
|
// encodedBytes := buf.Bytes()
|
||||||
|
// var vv interface{}
|
||||||
|
// NewDecoderBytes(buf.Bytes(), new(CborHandle)).MustDecode(&vv)
|
||||||
|
// // Now, vv contains the same string "one-byte"
|
||||||
|
//
|
||||||
|
type CborHandle struct {
|
||||||
|
binaryEncodingType
|
||||||
|
noElemSeparators
|
||||||
|
BasicHandle
|
||||||
|
|
||||||
|
// IndefiniteLength=true, means that we encode using indefinitelength
|
||||||
|
IndefiniteLength bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *CborHandle) SetInterfaceExt(rt reflect.Type, tag uint64, ext InterfaceExt) (err error) {
|
||||||
|
return h.SetExt(rt, tag, &setExtWrapper{i: ext})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *CborHandle) newEncDriver(e *Encoder) encDriver {
|
||||||
|
return &cborEncDriver{e: e, w: e.w, h: h}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *CborHandle) newDecDriver(d *Decoder) decDriver {
|
||||||
|
return &cborDecDriver{d: d, h: h, r: d.r, br: d.bytes}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *cborEncDriver) reset() {
|
||||||
|
e.w = e.e.w
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *cborDecDriver) reset() {
|
||||||
|
d.r, d.br = d.d.r, d.d.bytes
|
||||||
|
d.bd, d.bdRead = 0, false
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ decDriver = (*cborDecDriver)(nil)
|
||||||
|
var _ encDriver = (*cborEncDriver)(nil)
|
2520
vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/decode.go
generated
vendored
Normal file
2520
vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/decode.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
1414
vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/encode.go
generated
vendored
Normal file
1414
vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/encode.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
33034
vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/fast-path.generated.go
generated
vendored
Normal file
33034
vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/fast-path.generated.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
35
vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/fast-path.not.go
generated
vendored
Normal file
35
vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/fast-path.not.go
generated
vendored
Normal file
@ -0,0 +1,35 @@
|
|||||||
|
// +build notfastpath
|
||||||
|
|
||||||
|
package codec
|
||||||
|
|
||||||
|
import "reflect"
|
||||||
|
|
||||||
|
const fastpathEnabled = false
|
||||||
|
|
||||||
|
// The generated fast-path code is very large, and adds a few seconds to the build time.
|
||||||
|
// This causes test execution, execution of small tools which use codec, etc
|
||||||
|
// to take a long time.
|
||||||
|
//
|
||||||
|
// To mitigate, we now support the notfastpath tag.
|
||||||
|
// This tag disables fastpath during build, allowing for faster build, test execution,
|
||||||
|
// short-program runs, etc.
|
||||||
|
|
||||||
|
func fastpathDecodeTypeSwitch(iv interface{}, d *Decoder) bool { return false }
|
||||||
|
func fastpathEncodeTypeSwitch(iv interface{}, e *Encoder) bool { return false }
|
||||||
|
func fastpathEncodeTypeSwitchSlice(iv interface{}, e *Encoder) bool { return false }
|
||||||
|
func fastpathEncodeTypeSwitchMap(iv interface{}, e *Encoder) bool { return false }
|
||||||
|
func fastpathDecodeSetZeroTypeSwitch(iv interface{}, d *Decoder) bool { return false }
|
||||||
|
|
||||||
|
type fastpathT struct{}
|
||||||
|
type fastpathE struct {
|
||||||
|
rtid uintptr
|
||||||
|
rt reflect.Type
|
||||||
|
encfn func(*Encoder, *codecFnInfo, reflect.Value)
|
||||||
|
decfn func(*Decoder, *codecFnInfo, reflect.Value)
|
||||||
|
}
|
||||||
|
type fastpathA [0]fastpathE
|
||||||
|
|
||||||
|
func (x fastpathA) index(rtid uintptr) int { return -1 }
|
||||||
|
|
||||||
|
var fastpathAV fastpathA
|
||||||
|
var fastpathTV fastpathT
|
250
vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/gen-helper.generated.go
generated
vendored
Normal file
250
vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/gen-helper.generated.go
generated
vendored
Normal file
@ -0,0 +1,250 @@
|
|||||||
|
/* // +build ignore */
|
||||||
|
|
||||||
|
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT license found in the LICENSE file.
|
||||||
|
|
||||||
|
// ************************************************************
|
||||||
|
// DO NOT EDIT.
|
||||||
|
// THIS FILE IS AUTO-GENERATED from gen-helper.go.tmpl
|
||||||
|
// ************************************************************
|
||||||
|
|
||||||
|
package codec
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding"
|
||||||
|
"reflect"
|
||||||
|
)
|
||||||
|
|
||||||
|
// GenVersion is the current version of codecgen.
|
||||||
|
const GenVersion = 8
|
||||||
|
|
||||||
|
// This file is used to generate helper code for codecgen.
|
||||||
|
// The values here i.e. genHelper(En|De)coder are not to be used directly by
|
||||||
|
// library users. They WILL change continuously and without notice.
|
||||||
|
//
|
||||||
|
// To help enforce this, we create an unexported type with exported members.
|
||||||
|
// The only way to get the type is via the one exported type that we control (somewhat).
|
||||||
|
//
|
||||||
|
// When static codecs are created for types, they will use this value
|
||||||
|
// to perform encoding or decoding of primitives or known slice or map types.
|
||||||
|
|
||||||
|
// GenHelperEncoder is exported so that it can be used externally by codecgen.
|
||||||
|
//
|
||||||
|
// Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE.
|
||||||
|
func GenHelperEncoder(e *Encoder) (genHelperEncoder, encDriver) {
|
||||||
|
return genHelperEncoder{e: e}, e.e
|
||||||
|
}
|
||||||
|
|
||||||
|
// GenHelperDecoder is exported so that it can be used externally by codecgen.
|
||||||
|
//
|
||||||
|
// Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE.
|
||||||
|
func GenHelperDecoder(d *Decoder) (genHelperDecoder, decDriver) {
|
||||||
|
return genHelperDecoder{d: d}, d.d
|
||||||
|
}
|
||||||
|
|
||||||
|
// Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE.
|
||||||
|
func BasicHandleDoNotUse(h Handle) *BasicHandle {
|
||||||
|
return h.getBasicHandle()
|
||||||
|
}
|
||||||
|
|
||||||
|
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
|
||||||
|
type genHelperEncoder struct {
|
||||||
|
e *Encoder
|
||||||
|
F fastpathT
|
||||||
|
}
|
||||||
|
|
||||||
|
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
|
||||||
|
type genHelperDecoder struct {
|
||||||
|
d *Decoder
|
||||||
|
F fastpathT
|
||||||
|
}
|
||||||
|
|
||||||
|
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
|
||||||
|
func (f genHelperEncoder) EncBasicHandle() *BasicHandle {
|
||||||
|
return f.e.h
|
||||||
|
}
|
||||||
|
|
||||||
|
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
|
||||||
|
func (f genHelperEncoder) EncBinary() bool {
|
||||||
|
return f.e.cf.be // f.e.hh.isBinaryEncoding()
|
||||||
|
}
|
||||||
|
|
||||||
|
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
|
||||||
|
func (f genHelperEncoder) EncFallback(iv interface{}) {
|
||||||
|
// println(">>>>>>>>> EncFallback")
|
||||||
|
// f.e.encodeI(iv, false, false)
|
||||||
|
f.e.encodeValue(reflect.ValueOf(iv), nil, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
|
||||||
|
func (f genHelperEncoder) EncTextMarshal(iv encoding.TextMarshaler) {
|
||||||
|
bs, fnerr := iv.MarshalText()
|
||||||
|
f.e.marshal(bs, fnerr, false, c_UTF8)
|
||||||
|
}
|
||||||
|
|
||||||
|
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
|
||||||
|
func (f genHelperEncoder) EncJSONMarshal(iv jsonMarshaler) {
|
||||||
|
bs, fnerr := iv.MarshalJSON()
|
||||||
|
f.e.marshal(bs, fnerr, true, c_UTF8)
|
||||||
|
}
|
||||||
|
|
||||||
|
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
|
||||||
|
func (f genHelperEncoder) EncBinaryMarshal(iv encoding.BinaryMarshaler) {
|
||||||
|
bs, fnerr := iv.MarshalBinary()
|
||||||
|
f.e.marshal(bs, fnerr, false, c_RAW)
|
||||||
|
}
|
||||||
|
|
||||||
|
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
|
||||||
|
func (f genHelperEncoder) EncRaw(iv Raw) {
|
||||||
|
f.e.rawBytes(iv)
|
||||||
|
}
|
||||||
|
|
||||||
|
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
|
||||||
|
func (f genHelperEncoder) TimeRtidIfBinc() uintptr {
|
||||||
|
if _, ok := f.e.hh.(*BincHandle); ok {
|
||||||
|
return timeTypId
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
|
||||||
|
func (f genHelperEncoder) IsJSONHandle() bool {
|
||||||
|
return f.e.cf.js
|
||||||
|
}
|
||||||
|
|
||||||
|
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
|
||||||
|
func (f genHelperEncoder) HasExtensions() bool {
|
||||||
|
return len(f.e.h.extHandle) != 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
|
||||||
|
func (f genHelperEncoder) EncExt(v interface{}) (r bool) {
|
||||||
|
rt := reflect.TypeOf(v)
|
||||||
|
if rt.Kind() == reflect.Ptr {
|
||||||
|
rt = rt.Elem()
|
||||||
|
}
|
||||||
|
rtid := rt2id(rt)
|
||||||
|
if xfFn := f.e.h.getExt(rtid); xfFn != nil {
|
||||||
|
f.e.e.EncodeExt(v, xfFn.tag, xfFn.ext, f.e)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// ---------------- DECODER FOLLOWS -----------------
|
||||||
|
|
||||||
|
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
|
||||||
|
func (f genHelperDecoder) DecBasicHandle() *BasicHandle {
|
||||||
|
return f.d.h
|
||||||
|
}
|
||||||
|
|
||||||
|
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
|
||||||
|
func (f genHelperDecoder) DecBinary() bool {
|
||||||
|
return f.d.be // f.d.hh.isBinaryEncoding()
|
||||||
|
}
|
||||||
|
|
||||||
|
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
|
||||||
|
func (f genHelperDecoder) DecSwallow() {
|
||||||
|
f.d.swallow()
|
||||||
|
}
|
||||||
|
|
||||||
|
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
|
||||||
|
func (f genHelperDecoder) DecScratchBuffer() []byte {
|
||||||
|
return f.d.b[:]
|
||||||
|
}
|
||||||
|
|
||||||
|
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
|
||||||
|
func (f genHelperDecoder) DecFallback(iv interface{}, chkPtr bool) {
|
||||||
|
// println(">>>>>>>>> DecFallback")
|
||||||
|
rv := reflect.ValueOf(iv)
|
||||||
|
if chkPtr {
|
||||||
|
rv = f.d.ensureDecodeable(rv)
|
||||||
|
}
|
||||||
|
f.d.decodeValue(rv, nil, false, false)
|
||||||
|
// f.d.decodeValueFallback(rv)
|
||||||
|
}
|
||||||
|
|
||||||
|
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
|
||||||
|
func (f genHelperDecoder) DecSliceHelperStart() (decSliceHelper, int) {
|
||||||
|
return f.d.decSliceHelperStart()
|
||||||
|
}
|
||||||
|
|
||||||
|
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
|
||||||
|
func (f genHelperDecoder) DecStructFieldNotFound(index int, name string) {
|
||||||
|
f.d.structFieldNotFound(index, name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
|
||||||
|
func (f genHelperDecoder) DecArrayCannotExpand(sliceLen, streamLen int) {
|
||||||
|
f.d.arrayCannotExpand(sliceLen, streamLen)
|
||||||
|
}
|
||||||
|
|
||||||
|
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
|
||||||
|
func (f genHelperDecoder) DecTextUnmarshal(tm encoding.TextUnmarshaler) {
|
||||||
|
fnerr := tm.UnmarshalText(f.d.d.DecodeStringAsBytes())
|
||||||
|
if fnerr != nil {
|
||||||
|
panic(fnerr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
|
||||||
|
func (f genHelperDecoder) DecJSONUnmarshal(tm jsonUnmarshaler) {
|
||||||
|
// bs := f.dd.DecodeStringAsBytes()
|
||||||
|
// grab the bytes to be read, as UnmarshalJSON needs the full JSON so as to unmarshal it itself.
|
||||||
|
fnerr := tm.UnmarshalJSON(f.d.nextValueBytes())
|
||||||
|
if fnerr != nil {
|
||||||
|
panic(fnerr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
|
||||||
|
func (f genHelperDecoder) DecBinaryUnmarshal(bm encoding.BinaryUnmarshaler) {
|
||||||
|
fnerr := bm.UnmarshalBinary(f.d.d.DecodeBytes(nil, true))
|
||||||
|
if fnerr != nil {
|
||||||
|
panic(fnerr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
|
||||||
|
func (f genHelperDecoder) DecRaw() []byte {
|
||||||
|
return f.d.rawBytes()
|
||||||
|
}
|
||||||
|
|
||||||
|
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
|
||||||
|
func (f genHelperDecoder) TimeRtidIfBinc() uintptr {
|
||||||
|
if _, ok := f.d.hh.(*BincHandle); ok {
|
||||||
|
return timeTypId
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
|
||||||
|
func (f genHelperDecoder) IsJSONHandle() bool {
|
||||||
|
return f.d.js
|
||||||
|
}
|
||||||
|
|
||||||
|
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
|
||||||
|
func (f genHelperDecoder) HasExtensions() bool {
|
||||||
|
return len(f.d.h.extHandle) != 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
|
||||||
|
func (f genHelperDecoder) DecExt(v interface{}) (r bool) {
|
||||||
|
rt := reflect.TypeOf(v).Elem()
|
||||||
|
rtid := rt2id(rt)
|
||||||
|
if xfFn := f.d.h.getExt(rtid); xfFn != nil {
|
||||||
|
f.d.d.DecodeExt(v, xfFn.tag, xfFn.ext)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
|
||||||
|
func (f genHelperDecoder) DecInferLen(clen, maxlen, unit int) (rvlen int) {
|
||||||
|
return decInferLen(clen, maxlen, unit)
|
||||||
|
}
|
||||||
|
|
||||||
|
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
|
||||||
|
func (f genHelperDecoder) StringView(v []byte) string {
|
||||||
|
return stringView(v)
|
||||||
|
}
|
132
vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/gen.generated.go
generated
vendored
Normal file
132
vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/gen.generated.go
generated
vendored
Normal file
@ -0,0 +1,132 @@
|
|||||||
|
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT license found in the LICENSE file.
|
||||||
|
|
||||||
|
package codec
|
||||||
|
|
||||||
|
// DO NOT EDIT. THIS FILE IS AUTO-GENERATED FROM gen-dec-(map|array).go.tmpl
|
||||||
|
|
||||||
|
const genDecMapTmpl = `
|
||||||
|
{{var "v"}} := *{{ .Varname }}
|
||||||
|
{{var "l"}} := r.ReadMapStart()
|
||||||
|
{{var "bh"}} := z.DecBasicHandle()
|
||||||
|
if {{var "v"}} == nil {
|
||||||
|
{{var "rl"}} := z.DecInferLen({{var "l"}}, {{var "bh"}}.MaxInitLen, {{ .Size }})
|
||||||
|
{{var "v"}} = make(map[{{ .KTyp }}]{{ .Typ }}, {{var "rl"}})
|
||||||
|
*{{ .Varname }} = {{var "v"}}
|
||||||
|
}
|
||||||
|
var {{var "mk"}} {{ .KTyp }}
|
||||||
|
var {{var "mv"}} {{ .Typ }}
|
||||||
|
var {{var "mg"}}, {{var "mdn"}} {{if decElemKindPtr}}, {{var "ms"}}, {{var "mok"}}{{end}} bool
|
||||||
|
if {{var "bh"}}.MapValueReset {
|
||||||
|
{{if decElemKindPtr}}{{var "mg"}} = true
|
||||||
|
{{else if decElemKindIntf}}if !{{var "bh"}}.InterfaceReset { {{var "mg"}} = true }
|
||||||
|
{{else if not decElemKindImmutable}}{{var "mg"}} = true
|
||||||
|
{{end}} }
|
||||||
|
if {{var "l"}} != 0 {
|
||||||
|
{{var "hl"}} := {{var "l"}} > 0
|
||||||
|
for {{var "j"}} := 0; ({{var "hl"}} && {{var "j"}} < {{var "l"}}) || !({{var "hl"}} || r.CheckBreak()); {{var "j"}}++ {
|
||||||
|
r.ReadMapElemKey() {{/* z.DecSendContainerState(codecSelfer_containerMapKey{{ .Sfx }}) */}}
|
||||||
|
{{ $x := printf "%vmk%v" .TempVar .Rand }}{{ decLineVarK $x }}
|
||||||
|
{{ if eq .KTyp "interface{}" }}{{/* // special case if a byte array. */}}if {{var "bv"}}, {{var "bok"}} := {{var "mk"}}.([]byte); {{var "bok"}} {
|
||||||
|
{{var "mk"}} = string({{var "bv"}})
|
||||||
|
}{{ end }}{{if decElemKindPtr}}
|
||||||
|
{{var "ms"}} = true{{end}}
|
||||||
|
if {{var "mg"}} {
|
||||||
|
{{if decElemKindPtr}}{{var "mv"}}, {{var "mok"}} = {{var "v"}}[{{var "mk"}}]
|
||||||
|
if {{var "mok"}} {
|
||||||
|
{{var "ms"}} = false
|
||||||
|
} {{else}}{{var "mv"}} = {{var "v"}}[{{var "mk"}}] {{end}}
|
||||||
|
} {{if not decElemKindImmutable}}else { {{var "mv"}} = {{decElemZero}} }{{end}}
|
||||||
|
r.ReadMapElemValue() {{/* z.DecSendContainerState(codecSelfer_containerMapValue{{ .Sfx }}) */}}
|
||||||
|
{{var "mdn"}} = false
|
||||||
|
{{ $x := printf "%vmv%v" .TempVar .Rand }}{{ $y := printf "%vmdn%v" .TempVar .Rand }}{{ decLineVar $x $y }}
|
||||||
|
if {{var "mdn"}} {
|
||||||
|
if {{ var "bh" }}.DeleteOnNilMapValue { delete({{var "v"}}, {{var "mk"}}) } else { {{var "v"}}[{{var "mk"}}] = {{decElemZero}} }
|
||||||
|
} else if {{if decElemKindPtr}} {{var "ms"}} && {{end}} {{var "v"}} != nil {
|
||||||
|
{{var "v"}}[{{var "mk"}}] = {{var "mv"}}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} // else len==0: TODO: Should we clear map entries?
|
||||||
|
r.ReadMapEnd() {{/* z.DecSendContainerState(codecSelfer_containerMapEnd{{ .Sfx }}) */}}
|
||||||
|
`
|
||||||
|
|
||||||
|
const genDecListTmpl = `
|
||||||
|
{{var "v"}} := {{if not isArray}}*{{end}}{{ .Varname }}
|
||||||
|
{{var "h"}}, {{var "l"}} := z.DecSliceHelperStart() {{/* // helper, containerLenS */}}{{if not isArray}}
|
||||||
|
var {{var "c"}} bool {{/* // changed */}}
|
||||||
|
_ = {{var "c"}}{{end}}
|
||||||
|
if {{var "l"}} == 0 {
|
||||||
|
{{if isSlice }}if {{var "v"}} == nil {
|
||||||
|
{{var "v"}} = []{{ .Typ }}{}
|
||||||
|
{{var "c"}} = true
|
||||||
|
} else if len({{var "v"}}) != 0 {
|
||||||
|
{{var "v"}} = {{var "v"}}[:0]
|
||||||
|
{{var "c"}} = true
|
||||||
|
} {{end}} {{if isChan }}if {{var "v"}} == nil {
|
||||||
|
{{var "v"}} = make({{ .CTyp }}, 0)
|
||||||
|
{{var "c"}} = true
|
||||||
|
} {{end}}
|
||||||
|
} else {
|
||||||
|
{{var "hl"}} := {{var "l"}} > 0
|
||||||
|
var {{var "rl"}} int; _ = {{var "rl"}}
|
||||||
|
{{if isSlice }} if {{var "hl"}} {
|
||||||
|
if {{var "l"}} > cap({{var "v"}}) {
|
||||||
|
{{var "rl"}} = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }})
|
||||||
|
if {{var "rl"}} <= cap({{var "v"}}) {
|
||||||
|
{{var "v"}} = {{var "v"}}[:{{var "rl"}}]
|
||||||
|
} else {
|
||||||
|
{{var "v"}} = make([]{{ .Typ }}, {{var "rl"}})
|
||||||
|
}
|
||||||
|
{{var "c"}} = true
|
||||||
|
} else if {{var "l"}} != len({{var "v"}}) {
|
||||||
|
{{var "v"}} = {{var "v"}}[:{{var "l"}}]
|
||||||
|
{{var "c"}} = true
|
||||||
|
}
|
||||||
|
} {{end}}
|
||||||
|
var {{var "j"}} int
|
||||||
|
// var {{var "dn"}} bool
|
||||||
|
for ; ({{var "hl"}} && {{var "j"}} < {{var "l"}}) || !({{var "hl"}} || r.CheckBreak()); {{var "j"}}++ {
|
||||||
|
{{if not isArray}} if {{var "j"}} == 0 && len({{var "v"}}) == 0 {
|
||||||
|
if {{var "hl"}} {
|
||||||
|
{{var "rl"}} = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }})
|
||||||
|
} else {
|
||||||
|
{{var "rl"}} = 8
|
||||||
|
}
|
||||||
|
{{var "v"}} = make([]{{ .Typ }}, {{var "rl"}})
|
||||||
|
{{var "c"}} = true
|
||||||
|
}{{end}}
|
||||||
|
{{var "h"}}.ElemContainerState({{var "j"}})
|
||||||
|
// {{var "dn"}} = r.TryDecodeAsNil()
|
||||||
|
{{if isChan}}{{ $x := printf "%[1]vv%[2]v" .TempVar .Rand }}var {{var $x}} {{ .Typ }}
|
||||||
|
{{ decLineVar $x }}
|
||||||
|
{{var "v"}} <- {{ $x }}
|
||||||
|
{{else}}
|
||||||
|
// if indefinite, etc, then expand the slice if necessary
|
||||||
|
var {{var "db"}} bool
|
||||||
|
if {{var "j"}} >= len({{var "v"}}) {
|
||||||
|
{{if isSlice }} {{var "v"}} = append({{var "v"}}, {{ zero }}); {{var "c"}} = true
|
||||||
|
{{else}} z.DecArrayCannotExpand(len(v), {{var "j"}}+1); {{var "db"}} = true
|
||||||
|
{{end}}
|
||||||
|
}
|
||||||
|
if {{var "db"}} {
|
||||||
|
z.DecSwallow()
|
||||||
|
} else {
|
||||||
|
{{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x }}
|
||||||
|
}
|
||||||
|
{{end}}
|
||||||
|
}
|
||||||
|
{{if isSlice}} if {{var "j"}} < len({{var "v"}}) {
|
||||||
|
{{var "v"}} = {{var "v"}}[:{{var "j"}}]
|
||||||
|
{{var "c"}} = true
|
||||||
|
} else if {{var "j"}} == 0 && {{var "v"}} == nil {
|
||||||
|
{{var "v"}} = make([]{{ .Typ }}, 0)
|
||||||
|
{{var "c"}} = true
|
||||||
|
} {{end}}
|
||||||
|
}
|
||||||
|
{{var "h"}}.End()
|
||||||
|
{{if not isArray }}if {{var "c"}} {
|
||||||
|
*{{ .Varname }} = {{var "v"}}
|
||||||
|
}{{end}}
|
||||||
|
|
||||||
|
`
|
||||||
|
|
2014
vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/gen.go
generated
vendored
Normal file
2014
vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/gen.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
14
vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/goversion_arrayof_gte_go15.go
generated
vendored
Normal file
14
vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/goversion_arrayof_gte_go15.go
generated
vendored
Normal file
@ -0,0 +1,14 @@
|
|||||||
|
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT license found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build go1.5
|
||||||
|
|
||||||
|
package codec
|
||||||
|
|
||||||
|
import "reflect"
|
||||||
|
|
||||||
|
const reflectArrayOfSupported = true
|
||||||
|
|
||||||
|
func reflectArrayOf(count int, elem reflect.Type) reflect.Type {
|
||||||
|
return reflect.ArrayOf(count, elem)
|
||||||
|
}
|
14
vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/goversion_arrayof_lt_go15.go
generated
vendored
Normal file
14
vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/goversion_arrayof_lt_go15.go
generated
vendored
Normal file
@ -0,0 +1,14 @@
|
|||||||
|
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT license found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build !go1.5
|
||||||
|
|
||||||
|
package codec
|
||||||
|
|
||||||
|
import "reflect"
|
||||||
|
|
||||||
|
const reflectArrayOfSupported = false
|
||||||
|
|
||||||
|
func reflectArrayOf(count int, elem reflect.Type) reflect.Type {
|
||||||
|
panic("codec: reflect.ArrayOf unsupported in this go version")
|
||||||
|
}
|
15
vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/goversion_makemap_gte_go19.go
generated
vendored
Normal file
15
vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/goversion_makemap_gte_go19.go
generated
vendored
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT license found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build go1.9
|
||||||
|
|
||||||
|
package codec
|
||||||
|
|
||||||
|
import "reflect"
|
||||||
|
|
||||||
|
func makeMapReflect(t reflect.Type, size int) reflect.Value {
|
||||||
|
if size < 0 {
|
||||||
|
return reflect.MakeMapWithSize(t, 4)
|
||||||
|
}
|
||||||
|
return reflect.MakeMapWithSize(t, size)
|
||||||
|
}
|
12
vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/goversion_makemap_lt_go19.go
generated
vendored
Normal file
12
vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/goversion_makemap_lt_go19.go
generated
vendored
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT license found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build !go1.9
|
||||||
|
|
||||||
|
package codec
|
||||||
|
|
||||||
|
import "reflect"
|
||||||
|
|
||||||
|
func makeMapReflect(t reflect.Type, size int) reflect.Value {
|
||||||
|
return reflect.MakeMap(t)
|
||||||
|
}
|
17
vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/goversion_unsupported_lt_go14.go
generated
vendored
Normal file
17
vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/goversion_unsupported_lt_go14.go
generated
vendored
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT license found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build !go1.4
|
||||||
|
|
||||||
|
package codec
|
||||||
|
|
||||||
|
// This codec package will only work for go1.4 and above.
|
||||||
|
// This is for the following reasons:
|
||||||
|
// - go 1.4 was released in 2014
|
||||||
|
// - go runtime is written fully in go
|
||||||
|
// - interface only holds pointers
|
||||||
|
// - reflect.Value is stabilized as 3 words
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
panic("codec: go 1.3 and below are not supported")
|
||||||
|
}
|
10
vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/goversion_vendor_eq_go15.go
generated
vendored
Normal file
10
vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/goversion_vendor_eq_go15.go
generated
vendored
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT license found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build go1.5,!go1.6
|
||||||
|
|
||||||
|
package codec
|
||||||
|
|
||||||
|
import "os"
|
||||||
|
|
||||||
|
var genCheckVendor = os.Getenv("GO15VENDOREXPERIMENT") == "1"
|
10
vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/goversion_vendor_eq_go16.go
generated
vendored
Normal file
10
vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/goversion_vendor_eq_go16.go
generated
vendored
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT license found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build go1.6,!go1.7
|
||||||
|
|
||||||
|
package codec
|
||||||
|
|
||||||
|
import "os"
|
||||||
|
|
||||||
|
var genCheckVendor = os.Getenv("GO15VENDOREXPERIMENT") != "0"
|
8
vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/goversion_vendor_gte_go17.go
generated
vendored
Normal file
8
vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/goversion_vendor_gte_go17.go
generated
vendored
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT license found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build go1.7
|
||||||
|
|
||||||
|
package codec
|
||||||
|
|
||||||
|
const genCheckVendor = true
|
8
vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/goversion_vendor_lt_go15.go
generated
vendored
Normal file
8
vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/goversion_vendor_lt_go15.go
generated
vendored
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT license found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build !go1.5
|
||||||
|
|
||||||
|
package codec
|
||||||
|
|
||||||
|
var genCheckVendor = false
|
1944
vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/helper.go
generated
vendored
Normal file
1944
vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/helper.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
221
vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/helper_internal.go
generated
vendored
Normal file
221
vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/helper_internal.go
generated
vendored
Normal file
@ -0,0 +1,221 @@
|
|||||||
|
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT license found in the LICENSE file.
|
||||||
|
|
||||||
|
package codec
|
||||||
|
|
||||||
|
// All non-std package dependencies live in this file,
|
||||||
|
// so porting to different environment is easy (just update functions).
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"math"
|
||||||
|
"reflect"
|
||||||
|
)
|
||||||
|
|
||||||
|
func panicValToErr(panicVal interface{}, err *error) {
|
||||||
|
if panicVal == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// case nil
|
||||||
|
switch xerr := panicVal.(type) {
|
||||||
|
case error:
|
||||||
|
*err = xerr
|
||||||
|
case string:
|
||||||
|
*err = errors.New(xerr)
|
||||||
|
default:
|
||||||
|
*err = fmt.Errorf("%v", panicVal)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func hIsEmptyValue(v reflect.Value, deref, checkStruct bool) bool {
|
||||||
|
switch v.Kind() {
|
||||||
|
case reflect.Invalid:
|
||||||
|
return true
|
||||||
|
case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
|
||||||
|
return v.Len() == 0
|
||||||
|
case reflect.Bool:
|
||||||
|
return !v.Bool()
|
||||||
|
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||||
|
return v.Int() == 0
|
||||||
|
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||||
|
return v.Uint() == 0
|
||||||
|
case reflect.Float32, reflect.Float64:
|
||||||
|
return v.Float() == 0
|
||||||
|
case reflect.Interface, reflect.Ptr:
|
||||||
|
if deref {
|
||||||
|
if v.IsNil() {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return hIsEmptyValue(v.Elem(), deref, checkStruct)
|
||||||
|
} else {
|
||||||
|
return v.IsNil()
|
||||||
|
}
|
||||||
|
case reflect.Struct:
|
||||||
|
if !checkStruct {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
// return true if all fields are empty. else return false.
|
||||||
|
// we cannot use equality check, because some fields may be maps/slices/etc
|
||||||
|
// and consequently the structs are not comparable.
|
||||||
|
// return v.Interface() == reflect.Zero(v.Type()).Interface()
|
||||||
|
for i, n := 0, v.NumField(); i < n; i++ {
|
||||||
|
if !hIsEmptyValue(v.Field(i), deref, checkStruct) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func isEmptyValue(v reflect.Value, deref, checkStruct bool) bool {
|
||||||
|
return hIsEmptyValue(v, deref, checkStruct)
|
||||||
|
}
|
||||||
|
|
||||||
|
func pruneSignExt(v []byte, pos bool) (n int) {
|
||||||
|
if len(v) < 2 {
|
||||||
|
} else if pos && v[0] == 0 {
|
||||||
|
for ; v[n] == 0 && n+1 < len(v) && (v[n+1]&(1<<7) == 0); n++ {
|
||||||
|
}
|
||||||
|
} else if !pos && v[0] == 0xff {
|
||||||
|
for ; v[n] == 0xff && n+1 < len(v) && (v[n+1]&(1<<7) != 0); n++ {
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func implementsIntf(typ, iTyp reflect.Type) (success bool, indir int8) {
|
||||||
|
if typ == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
rt := typ
|
||||||
|
// The type might be a pointer and we need to keep
|
||||||
|
// dereferencing to the base type until we find an implementation.
|
||||||
|
for {
|
||||||
|
if rt.Implements(iTyp) {
|
||||||
|
return true, indir
|
||||||
|
}
|
||||||
|
if p := rt; p.Kind() == reflect.Ptr {
|
||||||
|
indir++
|
||||||
|
if indir >= math.MaxInt8 { // insane number of indirections
|
||||||
|
return false, 0
|
||||||
|
}
|
||||||
|
rt = p.Elem()
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
// No luck yet, but if this is a base type (non-pointer), the pointer might satisfy.
|
||||||
|
if typ.Kind() != reflect.Ptr {
|
||||||
|
// Not a pointer, but does the pointer work?
|
||||||
|
if reflect.PtrTo(typ).Implements(iTyp) {
|
||||||
|
return true, -1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// validate that this function is correct ...
|
||||||
|
// culled from OGRE (Object-Oriented Graphics Rendering Engine)
|
||||||
|
// function: halfToFloatI (http://stderr.org/doc/ogre-doc/api/OgreBitwise_8h-source.html)
|
||||||
|
func halfFloatToFloatBits(yy uint16) (d uint32) {
|
||||||
|
y := uint32(yy)
|
||||||
|
s := (y >> 15) & 0x01
|
||||||
|
e := (y >> 10) & 0x1f
|
||||||
|
m := y & 0x03ff
|
||||||
|
|
||||||
|
if e == 0 {
|
||||||
|
if m == 0 { // plu or minus 0
|
||||||
|
return s << 31
|
||||||
|
} else { // Denormalized number -- renormalize it
|
||||||
|
for (m & 0x00000400) == 0 {
|
||||||
|
m <<= 1
|
||||||
|
e -= 1
|
||||||
|
}
|
||||||
|
e += 1
|
||||||
|
const zz uint32 = 0x0400
|
||||||
|
m &= ^zz
|
||||||
|
}
|
||||||
|
} else if e == 31 {
|
||||||
|
if m == 0 { // Inf
|
||||||
|
return (s << 31) | 0x7f800000
|
||||||
|
} else { // NaN
|
||||||
|
return (s << 31) | 0x7f800000 | (m << 13)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
e = e + (127 - 15)
|
||||||
|
m = m << 13
|
||||||
|
return (s << 31) | (e << 23) | m
|
||||||
|
}
|
||||||
|
|
||||||
|
// GrowCap will return a new capacity for a slice, given the following:
|
||||||
|
// - oldCap: current capacity
|
||||||
|
// - unit: in-memory size of an element
|
||||||
|
// - num: number of elements to add
|
||||||
|
func growCap(oldCap, unit, num int) (newCap int) {
|
||||||
|
// appendslice logic (if cap < 1024, *2, else *1.25):
|
||||||
|
// leads to many copy calls, especially when copying bytes.
|
||||||
|
// bytes.Buffer model (2*cap + n): much better for bytes.
|
||||||
|
// smarter way is to take the byte-size of the appended element(type) into account
|
||||||
|
|
||||||
|
// maintain 3 thresholds:
|
||||||
|
// t1: if cap <= t1, newcap = 2x
|
||||||
|
// t2: if cap <= t2, newcap = 1.75x
|
||||||
|
// t3: if cap <= t3, newcap = 1.5x
|
||||||
|
// else newcap = 1.25x
|
||||||
|
//
|
||||||
|
// t1, t2, t3 >= 1024 always.
|
||||||
|
// i.e. if unit size >= 16, then always do 2x or 1.25x (ie t1, t2, t3 are all same)
|
||||||
|
//
|
||||||
|
// With this, appending for bytes increase by:
|
||||||
|
// 100% up to 4K
|
||||||
|
// 75% up to 8K
|
||||||
|
// 50% up to 16K
|
||||||
|
// 25% beyond that
|
||||||
|
|
||||||
|
// unit can be 0 e.g. for struct{}{}; handle that appropriately
|
||||||
|
var t1, t2, t3 int // thresholds
|
||||||
|
if unit <= 1 {
|
||||||
|
t1, t2, t3 = 4*1024, 8*1024, 16*1024
|
||||||
|
} else if unit < 16 {
|
||||||
|
t3 = 16 / unit * 1024
|
||||||
|
t1 = t3 * 1 / 4
|
||||||
|
t2 = t3 * 2 / 4
|
||||||
|
} else {
|
||||||
|
t1, t2, t3 = 1024, 1024, 1024
|
||||||
|
}
|
||||||
|
|
||||||
|
var x int // temporary variable
|
||||||
|
|
||||||
|
// x is multiplier here: one of 5, 6, 7 or 8; incr of 25%, 50%, 75% or 100% respectively
|
||||||
|
if oldCap <= t1 { // [0,t1]
|
||||||
|
x = 8
|
||||||
|
} else if oldCap > t3 { // (t3,infinity]
|
||||||
|
x = 5
|
||||||
|
} else if oldCap <= t2 { // (t1,t2]
|
||||||
|
x = 7
|
||||||
|
} else { // (t2,t3]
|
||||||
|
x = 6
|
||||||
|
}
|
||||||
|
newCap = x * oldCap / 4
|
||||||
|
|
||||||
|
if num > 0 {
|
||||||
|
newCap += num
|
||||||
|
}
|
||||||
|
|
||||||
|
// ensure newCap is a multiple of 64 (if it is > 64) or 16.
|
||||||
|
if newCap > 64 {
|
||||||
|
if x = newCap % 64; x != 0 {
|
||||||
|
x = newCap / 64
|
||||||
|
newCap = 64 * (x + 1)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if x = newCap % 16; x != 0 {
|
||||||
|
x = newCap / 16
|
||||||
|
newCap = 16 * (x + 1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
156
vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/helper_not_unsafe.go
generated
vendored
Normal file
156
vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/helper_not_unsafe.go
generated
vendored
Normal file
@ -0,0 +1,156 @@
|
|||||||
|
// +build !go1.7 safe appengine
|
||||||
|
|
||||||
|
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT license found in the LICENSE file.
|
||||||
|
|
||||||
|
package codec
|
||||||
|
|
||||||
|
import (
|
||||||
|
"reflect"
|
||||||
|
"sync/atomic"
|
||||||
|
)
|
||||||
|
|
||||||
|
// stringView returns a view of the []byte as a string.
|
||||||
|
// In unsafe mode, it doesn't incur allocation and copying caused by conversion.
|
||||||
|
// In regular safe mode, it is an allocation and copy.
|
||||||
|
//
|
||||||
|
// Usage: Always maintain a reference to v while result of this call is in use,
|
||||||
|
// and call keepAlive4BytesView(v) at point where done with view.
|
||||||
|
func stringView(v []byte) string {
|
||||||
|
return string(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// bytesView returns a view of the string as a []byte.
|
||||||
|
// In unsafe mode, it doesn't incur allocation and copying caused by conversion.
|
||||||
|
// In regular safe mode, it is an allocation and copy.
|
||||||
|
//
|
||||||
|
// Usage: Always maintain a reference to v while result of this call is in use,
|
||||||
|
// and call keepAlive4BytesView(v) at point where done with view.
|
||||||
|
func bytesView(v string) []byte {
|
||||||
|
return []byte(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
func definitelyNil(v interface{}) bool {
|
||||||
|
return false
|
||||||
|
// rv := reflect.ValueOf(v)
|
||||||
|
// switch rv.Kind() {
|
||||||
|
// case reflect.Invalid:
|
||||||
|
// return true
|
||||||
|
// case reflect.Ptr, reflect.Interface, reflect.Chan, reflect.Slice, reflect.Map, reflect.Func:
|
||||||
|
// return rv.IsNil()
|
||||||
|
// default:
|
||||||
|
// return false
|
||||||
|
// }
|
||||||
|
}
|
||||||
|
|
||||||
|
// // keepAlive4BytesView maintains a reference to the input parameter for bytesView.
|
||||||
|
// //
|
||||||
|
// // Usage: call this at point where done with the bytes view.
|
||||||
|
// func keepAlive4BytesView(v string) {}
|
||||||
|
|
||||||
|
// // keepAlive4BytesView maintains a reference to the input parameter for stringView.
|
||||||
|
// //
|
||||||
|
// // Usage: call this at point where done with the string view.
|
||||||
|
// func keepAlive4StringView(v []byte) {}
|
||||||
|
|
||||||
|
func rv2i(rv reflect.Value) interface{} {
|
||||||
|
return rv.Interface()
|
||||||
|
}
|
||||||
|
|
||||||
|
func rt2id(rt reflect.Type) uintptr {
|
||||||
|
return reflect.ValueOf(rt).Pointer()
|
||||||
|
}
|
||||||
|
|
||||||
|
func rv2rtid(rv reflect.Value) uintptr {
|
||||||
|
return reflect.ValueOf(rv.Type()).Pointer()
|
||||||
|
}
|
||||||
|
|
||||||
|
// --------------------------
|
||||||
|
// type ptrToRvMap struct{}
|
||||||
|
|
||||||
|
// func (_ *ptrToRvMap) init() {}
|
||||||
|
// func (_ *ptrToRvMap) get(i interface{}) reflect.Value {
|
||||||
|
// return reflect.ValueOf(i).Elem()
|
||||||
|
// }
|
||||||
|
|
||||||
|
// --------------------------
|
||||||
|
type atomicTypeInfoSlice struct {
|
||||||
|
v atomic.Value
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *atomicTypeInfoSlice) load() *[]rtid2ti {
|
||||||
|
i := x.v.Load()
|
||||||
|
if i == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return i.(*[]rtid2ti)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *atomicTypeInfoSlice) store(p *[]rtid2ti) {
|
||||||
|
x.v.Store(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
// --------------------------
|
||||||
|
func (d *Decoder) raw(f *codecFnInfo, rv reflect.Value) {
|
||||||
|
rv.SetBytes(d.rawBytes())
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Decoder) kString(f *codecFnInfo, rv reflect.Value) {
|
||||||
|
rv.SetString(d.d.DecodeString())
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Decoder) kBool(f *codecFnInfo, rv reflect.Value) {
|
||||||
|
rv.SetBool(d.d.DecodeBool())
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Decoder) kFloat32(f *codecFnInfo, rv reflect.Value) {
|
||||||
|
rv.SetFloat(d.d.DecodeFloat(true))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Decoder) kFloat64(f *codecFnInfo, rv reflect.Value) {
|
||||||
|
rv.SetFloat(d.d.DecodeFloat(false))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Decoder) kInt(f *codecFnInfo, rv reflect.Value) {
|
||||||
|
rv.SetInt(d.d.DecodeInt(intBitsize))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Decoder) kInt8(f *codecFnInfo, rv reflect.Value) {
|
||||||
|
rv.SetInt(d.d.DecodeInt(8))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Decoder) kInt16(f *codecFnInfo, rv reflect.Value) {
|
||||||
|
rv.SetInt(d.d.DecodeInt(16))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Decoder) kInt32(f *codecFnInfo, rv reflect.Value) {
|
||||||
|
rv.SetInt(d.d.DecodeInt(32))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Decoder) kInt64(f *codecFnInfo, rv reflect.Value) {
|
||||||
|
rv.SetInt(d.d.DecodeInt(64))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Decoder) kUint(f *codecFnInfo, rv reflect.Value) {
|
||||||
|
rv.SetUint(d.d.DecodeUint(uintBitsize))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Decoder) kUintptr(f *codecFnInfo, rv reflect.Value) {
|
||||||
|
rv.SetUint(d.d.DecodeUint(uintBitsize))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Decoder) kUint8(f *codecFnInfo, rv reflect.Value) {
|
||||||
|
rv.SetUint(d.d.DecodeUint(8))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Decoder) kUint16(f *codecFnInfo, rv reflect.Value) {
|
||||||
|
rv.SetUint(d.d.DecodeUint(16))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Decoder) kUint32(f *codecFnInfo, rv reflect.Value) {
|
||||||
|
rv.SetUint(d.d.DecodeUint(32))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Decoder) kUint64(f *codecFnInfo, rv reflect.Value) {
|
||||||
|
rv.SetUint(d.d.DecodeUint(64))
|
||||||
|
}
|
418
vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/helper_unsafe.go
generated
vendored
Normal file
418
vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/helper_unsafe.go
generated
vendored
Normal file
@ -0,0 +1,418 @@
|
|||||||
|
// +build !safe
|
||||||
|
// +build !appengine
|
||||||
|
// +build go1.7
|
||||||
|
|
||||||
|
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT license found in the LICENSE file.
|
||||||
|
|
||||||
|
package codec
|
||||||
|
|
||||||
|
import (
|
||||||
|
"reflect"
|
||||||
|
"sync/atomic"
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
// This file has unsafe variants of some helper methods.
|
||||||
|
// NOTE: See helper_not_unsafe.go for the usage information.
|
||||||
|
|
||||||
|
// var zeroRTv [4]uintptr
|
||||||
|
|
||||||
|
const unsafeFlagIndir = 1 << 7 // keep in sync with GO_ROOT/src/reflect/value.go
|
||||||
|
|
||||||
|
type unsafeString struct {
|
||||||
|
Data uintptr
|
||||||
|
Len int
|
||||||
|
}
|
||||||
|
|
||||||
|
type unsafeSlice struct {
|
||||||
|
Data uintptr
|
||||||
|
Len int
|
||||||
|
Cap int
|
||||||
|
}
|
||||||
|
|
||||||
|
type unsafeIntf struct {
|
||||||
|
typ unsafe.Pointer
|
||||||
|
word unsafe.Pointer
|
||||||
|
}
|
||||||
|
|
||||||
|
type unsafeReflectValue struct {
|
||||||
|
typ unsafe.Pointer
|
||||||
|
ptr unsafe.Pointer
|
||||||
|
flag uintptr
|
||||||
|
}
|
||||||
|
|
||||||
|
func stringView(v []byte) string {
|
||||||
|
if len(v) == 0 {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
bx := (*unsafeSlice)(unsafe.Pointer(&v))
|
||||||
|
sx := unsafeString{bx.Data, bx.Len}
|
||||||
|
return *(*string)(unsafe.Pointer(&sx))
|
||||||
|
}
|
||||||
|
|
||||||
|
func bytesView(v string) []byte {
|
||||||
|
if len(v) == 0 {
|
||||||
|
return zeroByteSlice
|
||||||
|
}
|
||||||
|
|
||||||
|
sx := (*unsafeString)(unsafe.Pointer(&v))
|
||||||
|
bx := unsafeSlice{sx.Data, sx.Len, sx.Len}
|
||||||
|
return *(*[]byte)(unsafe.Pointer(&bx))
|
||||||
|
}
|
||||||
|
|
||||||
|
func definitelyNil(v interface{}) bool {
|
||||||
|
return (*unsafeIntf)(unsafe.Pointer(&v)).word == nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// func keepAlive4BytesView(v string) {
|
||||||
|
// runtime.KeepAlive(v)
|
||||||
|
// }
|
||||||
|
|
||||||
|
// func keepAlive4StringView(v []byte) {
|
||||||
|
// runtime.KeepAlive(v)
|
||||||
|
// }
|
||||||
|
|
||||||
|
// TODO: consider a more generally-known optimization for reflect.Value ==> Interface
|
||||||
|
//
|
||||||
|
// Currently, we use this fragile method that taps into implememtation details from
|
||||||
|
// the source go stdlib reflect/value.go,
|
||||||
|
// and trims the implementation.
|
||||||
|
func rv2i(rv reflect.Value) interface{} {
|
||||||
|
if false {
|
||||||
|
return rv.Interface()
|
||||||
|
}
|
||||||
|
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
|
||||||
|
// references that are single-words (map, ptr) may be double-referenced as flagIndir
|
||||||
|
kk := urv.flag & (1<<5 - 1)
|
||||||
|
if (kk == uintptr(reflect.Map) || kk == uintptr(reflect.Ptr)) && urv.flag&unsafeFlagIndir != 0 {
|
||||||
|
return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: *(*unsafe.Pointer)(urv.ptr), typ: urv.typ}))
|
||||||
|
}
|
||||||
|
return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: urv.ptr, typ: urv.typ}))
|
||||||
|
}
|
||||||
|
|
||||||
|
func rt2id(rt reflect.Type) uintptr {
|
||||||
|
return uintptr(((*unsafeIntf)(unsafe.Pointer(&rt))).word)
|
||||||
|
}
|
||||||
|
|
||||||
|
func rv2rtid(rv reflect.Value) uintptr {
|
||||||
|
return uintptr((*unsafeReflectValue)(unsafe.Pointer(&rv)).typ)
|
||||||
|
}
|
||||||
|
|
||||||
|
// func rv0t(rt reflect.Type) reflect.Value {
|
||||||
|
// ut := (*unsafeIntf)(unsafe.Pointer(&rt))
|
||||||
|
// // we need to determine whether ifaceIndir, and then whether to just pass 0 as the ptr
|
||||||
|
// uv := unsafeReflectValue{ut.word, &zeroRTv, flag(rt.Kind())}
|
||||||
|
// return *(*reflect.Value)(unsafe.Pointer(&uv})
|
||||||
|
// }
|
||||||
|
|
||||||
|
// --------------------------
|
||||||
|
type atomicTypeInfoSlice struct {
|
||||||
|
v unsafe.Pointer
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *atomicTypeInfoSlice) load() *[]rtid2ti {
|
||||||
|
return (*[]rtid2ti)(atomic.LoadPointer(&x.v))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *atomicTypeInfoSlice) store(p *[]rtid2ti) {
|
||||||
|
atomic.StorePointer(&x.v, unsafe.Pointer(p))
|
||||||
|
}
|
||||||
|
|
||||||
|
// --------------------------
|
||||||
|
func (d *Decoder) raw(f *codecFnInfo, rv reflect.Value) {
|
||||||
|
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
|
||||||
|
// if urv.flag&unsafeFlagIndir != 0 {
|
||||||
|
// urv.ptr = *(*unsafe.Pointer)(urv.ptr)
|
||||||
|
// }
|
||||||
|
*(*[]byte)(urv.ptr) = d.rawBytes()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Decoder) kString(f *codecFnInfo, rv reflect.Value) {
|
||||||
|
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
|
||||||
|
*(*string)(urv.ptr) = d.d.DecodeString()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Decoder) kBool(f *codecFnInfo, rv reflect.Value) {
|
||||||
|
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
|
||||||
|
*(*bool)(urv.ptr) = d.d.DecodeBool()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Decoder) kFloat32(f *codecFnInfo, rv reflect.Value) {
|
||||||
|
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
|
||||||
|
*(*float32)(urv.ptr) = float32(d.d.DecodeFloat(true))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Decoder) kFloat64(f *codecFnInfo, rv reflect.Value) {
|
||||||
|
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
|
||||||
|
*(*float64)(urv.ptr) = d.d.DecodeFloat(false)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Decoder) kInt(f *codecFnInfo, rv reflect.Value) {
|
||||||
|
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
|
||||||
|
*(*int)(urv.ptr) = int(d.d.DecodeInt(intBitsize))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Decoder) kInt8(f *codecFnInfo, rv reflect.Value) {
|
||||||
|
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
|
||||||
|
*(*int8)(urv.ptr) = int8(d.d.DecodeInt(8))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Decoder) kInt16(f *codecFnInfo, rv reflect.Value) {
|
||||||
|
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
|
||||||
|
*(*int16)(urv.ptr) = int16(d.d.DecodeInt(16))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Decoder) kInt32(f *codecFnInfo, rv reflect.Value) {
|
||||||
|
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
|
||||||
|
*(*int32)(urv.ptr) = int32(d.d.DecodeInt(32))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Decoder) kInt64(f *codecFnInfo, rv reflect.Value) {
|
||||||
|
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
|
||||||
|
*(*int64)(urv.ptr) = d.d.DecodeInt(64)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Decoder) kUint(f *codecFnInfo, rv reflect.Value) {
|
||||||
|
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
|
||||||
|
*(*uint)(urv.ptr) = uint(d.d.DecodeUint(uintBitsize))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Decoder) kUintptr(f *codecFnInfo, rv reflect.Value) {
|
||||||
|
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
|
||||||
|
*(*uintptr)(urv.ptr) = uintptr(d.d.DecodeUint(uintBitsize))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Decoder) kUint8(f *codecFnInfo, rv reflect.Value) {
|
||||||
|
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
|
||||||
|
*(*uint8)(urv.ptr) = uint8(d.d.DecodeUint(8))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Decoder) kUint16(f *codecFnInfo, rv reflect.Value) {
|
||||||
|
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
|
||||||
|
*(*uint16)(urv.ptr) = uint16(d.d.DecodeUint(16))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Decoder) kUint32(f *codecFnInfo, rv reflect.Value) {
|
||||||
|
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
|
||||||
|
*(*uint32)(urv.ptr) = uint32(d.d.DecodeUint(32))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Decoder) kUint64(f *codecFnInfo, rv reflect.Value) {
|
||||||
|
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
|
||||||
|
*(*uint64)(urv.ptr) = d.d.DecodeUint(64)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ------------
|
||||||
|
|
||||||
|
// func rt2id(rt reflect.Type) uintptr {
|
||||||
|
// return uintptr(((*unsafeIntf)(unsafe.Pointer(&rt))).word)
|
||||||
|
// // var i interface{} = rt
|
||||||
|
// // // ui := (*unsafeIntf)(unsafe.Pointer(&i))
|
||||||
|
// // return ((*unsafeIntf)(unsafe.Pointer(&i))).word
|
||||||
|
// }
|
||||||
|
|
||||||
|
// func rv2i(rv reflect.Value) interface{} {
|
||||||
|
// urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
|
||||||
|
// // non-reference type: already indir
|
||||||
|
// // reference type: depend on flagIndir property ('cos maybe was double-referenced)
|
||||||
|
// // const (unsafeRvFlagKindMask = 1<<5 - 1 , unsafeRvFlagIndir = 1 << 7 )
|
||||||
|
// // rvk := reflect.Kind(urv.flag & (1<<5 - 1))
|
||||||
|
// // if (rvk == reflect.Chan ||
|
||||||
|
// // rvk == reflect.Func ||
|
||||||
|
// // rvk == reflect.Interface ||
|
||||||
|
// // rvk == reflect.Map ||
|
||||||
|
// // rvk == reflect.Ptr ||
|
||||||
|
// // rvk == reflect.UnsafePointer) && urv.flag&(1<<8) != 0 {
|
||||||
|
// // fmt.Printf(">>>>> ---- double indirect reference: %v, %v\n", rvk, rv.Type())
|
||||||
|
// // return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: *(*unsafe.Pointer)(urv.ptr), typ: urv.typ}))
|
||||||
|
// // }
|
||||||
|
// if urv.flag&(1<<5-1) == uintptr(reflect.Map) && urv.flag&(1<<7) != 0 {
|
||||||
|
// // fmt.Printf(">>>>> ---- double indirect reference: %v, %v\n", rvk, rv.Type())
|
||||||
|
// return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: *(*unsafe.Pointer)(urv.ptr), typ: urv.typ}))
|
||||||
|
// }
|
||||||
|
// // fmt.Printf(">>>>> ++++ direct reference: %v, %v\n", rvk, rv.Type())
|
||||||
|
// return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: urv.ptr, typ: urv.typ}))
|
||||||
|
// }
|
||||||
|
|
||||||
|
// const (
|
||||||
|
// unsafeRvFlagKindMask = 1<<5 - 1
|
||||||
|
// unsafeRvKindDirectIface = 1 << 5
|
||||||
|
// unsafeRvFlagIndir = 1 << 7
|
||||||
|
// unsafeRvFlagAddr = 1 << 8
|
||||||
|
// unsafeRvFlagMethod = 1 << 9
|
||||||
|
|
||||||
|
// _USE_RV_INTERFACE bool = false
|
||||||
|
// _UNSAFE_RV_DEBUG = true
|
||||||
|
// )
|
||||||
|
|
||||||
|
// type unsafeRtype struct {
|
||||||
|
// _ [2]uintptr
|
||||||
|
// _ uint32
|
||||||
|
// _ uint8
|
||||||
|
// _ uint8
|
||||||
|
// _ uint8
|
||||||
|
// kind uint8
|
||||||
|
// _ [2]uintptr
|
||||||
|
// _ int32
|
||||||
|
// }
|
||||||
|
|
||||||
|
// func _rv2i(rv reflect.Value) interface{} {
|
||||||
|
// // Note: From use,
|
||||||
|
// // - it's never an interface
|
||||||
|
// // - the only calls here are for ifaceIndir types.
|
||||||
|
// // (though that conditional is wrong)
|
||||||
|
// // To know for sure, we need the value of t.kind (which is not exposed).
|
||||||
|
// //
|
||||||
|
// // Need to validate the path: type is indirect ==> only value is indirect ==> default (value is direct)
|
||||||
|
// // - Type indirect, Value indirect: ==> numbers, boolean, slice, struct, array, string
|
||||||
|
// // - Type Direct, Value indirect: ==> map???
|
||||||
|
// // - Type Direct, Value direct: ==> pointers, unsafe.Pointer, func, chan, map
|
||||||
|
// //
|
||||||
|
// // TRANSLATES TO:
|
||||||
|
// // if typeIndirect { } else if valueIndirect { } else { }
|
||||||
|
// //
|
||||||
|
// // Since we don't deal with funcs, then "flagNethod" is unset, and can be ignored.
|
||||||
|
|
||||||
|
// if _USE_RV_INTERFACE {
|
||||||
|
// return rv.Interface()
|
||||||
|
// }
|
||||||
|
// urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
|
||||||
|
|
||||||
|
// // if urv.flag&unsafeRvFlagMethod != 0 || urv.flag&unsafeRvFlagKindMask == uintptr(reflect.Interface) {
|
||||||
|
// // println("***** IS flag method or interface: delegating to rv.Interface()")
|
||||||
|
// // return rv.Interface()
|
||||||
|
// // }
|
||||||
|
|
||||||
|
// // if urv.flag&unsafeRvFlagKindMask == uintptr(reflect.Interface) {
|
||||||
|
// // println("***** IS Interface: delegate to rv.Interface")
|
||||||
|
// // return rv.Interface()
|
||||||
|
// // }
|
||||||
|
// // if urv.flag&unsafeRvFlagKindMask&unsafeRvKindDirectIface == 0 {
|
||||||
|
// // if urv.flag&unsafeRvFlagAddr == 0 {
|
||||||
|
// // println("***** IS ifaceIndir typ")
|
||||||
|
// // // ui := unsafeIntf{word: urv.ptr, typ: urv.typ}
|
||||||
|
// // // return *(*interface{})(unsafe.Pointer(&ui))
|
||||||
|
// // // return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: urv.ptr, typ: urv.typ}))
|
||||||
|
// // }
|
||||||
|
// // } else if urv.flag&unsafeRvFlagIndir != 0 {
|
||||||
|
// // println("***** IS flagindir")
|
||||||
|
// // // return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: *(*unsafe.Pointer)(urv.ptr), typ: urv.typ}))
|
||||||
|
// // } else {
|
||||||
|
// // println("***** NOT flagindir")
|
||||||
|
// // return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: urv.ptr, typ: urv.typ}))
|
||||||
|
// // }
|
||||||
|
// // println("***** default: delegate to rv.Interface")
|
||||||
|
|
||||||
|
// urt := (*unsafeRtype)(unsafe.Pointer(urv.typ))
|
||||||
|
// if _UNSAFE_RV_DEBUG {
|
||||||
|
// fmt.Printf(">>>> start: %v: ", rv.Type())
|
||||||
|
// fmt.Printf("%v - %v\n", *urv, *urt)
|
||||||
|
// }
|
||||||
|
// if urt.kind&unsafeRvKindDirectIface == 0 {
|
||||||
|
// if _UNSAFE_RV_DEBUG {
|
||||||
|
// fmt.Printf("**** +ifaceIndir type: %v\n", rv.Type())
|
||||||
|
// }
|
||||||
|
// // println("***** IS ifaceIndir typ")
|
||||||
|
// // if true || urv.flag&unsafeRvFlagAddr == 0 {
|
||||||
|
// // // println(" ***** IS NOT addr")
|
||||||
|
// return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: urv.ptr, typ: urv.typ}))
|
||||||
|
// // }
|
||||||
|
// } else if urv.flag&unsafeRvFlagIndir != 0 {
|
||||||
|
// if _UNSAFE_RV_DEBUG {
|
||||||
|
// fmt.Printf("**** +flagIndir type: %v\n", rv.Type())
|
||||||
|
// }
|
||||||
|
// // println("***** IS flagindir")
|
||||||
|
// return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: *(*unsafe.Pointer)(urv.ptr), typ: urv.typ}))
|
||||||
|
// } else {
|
||||||
|
// if _UNSAFE_RV_DEBUG {
|
||||||
|
// fmt.Printf("**** -flagIndir type: %v\n", rv.Type())
|
||||||
|
// }
|
||||||
|
// // println("***** NOT flagindir")
|
||||||
|
// return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: urv.ptr, typ: urv.typ}))
|
||||||
|
// }
|
||||||
|
// // println("***** default: delegating to rv.Interface()")
|
||||||
|
// // return rv.Interface()
|
||||||
|
// }
|
||||||
|
|
||||||
|
// var staticM0 = make(map[string]uint64)
|
||||||
|
// var staticI0 = (int32)(-5)
|
||||||
|
|
||||||
|
// func staticRv2iTest() {
|
||||||
|
// i0 := (int32)(-5)
|
||||||
|
// m0 := make(map[string]uint16)
|
||||||
|
// m0["1"] = 1
|
||||||
|
// for _, i := range []interface{}{
|
||||||
|
// (int)(7),
|
||||||
|
// (uint)(8),
|
||||||
|
// (int16)(-9),
|
||||||
|
// (uint16)(19),
|
||||||
|
// (uintptr)(77),
|
||||||
|
// (bool)(true),
|
||||||
|
// float32(-32.7),
|
||||||
|
// float64(64.9),
|
||||||
|
// complex(float32(19), 5),
|
||||||
|
// complex(float64(-32), 7),
|
||||||
|
// [4]uint64{1, 2, 3, 4},
|
||||||
|
// (chan<- int)(nil), // chan,
|
||||||
|
// rv2i, // func
|
||||||
|
// io.Writer(ioutil.Discard),
|
||||||
|
// make(map[string]uint),
|
||||||
|
// (map[string]uint)(nil),
|
||||||
|
// staticM0,
|
||||||
|
// m0,
|
||||||
|
// &m0,
|
||||||
|
// i0,
|
||||||
|
// &i0,
|
||||||
|
// &staticI0,
|
||||||
|
// &staticM0,
|
||||||
|
// []uint32{6, 7, 8},
|
||||||
|
// "abc",
|
||||||
|
// Raw{},
|
||||||
|
// RawExt{},
|
||||||
|
// &Raw{},
|
||||||
|
// &RawExt{},
|
||||||
|
// unsafe.Pointer(&i0),
|
||||||
|
// } {
|
||||||
|
// i2 := rv2i(reflect.ValueOf(i))
|
||||||
|
// eq := reflect.DeepEqual(i, i2)
|
||||||
|
// fmt.Printf(">>>> %v == %v? %v\n", i, i2, eq)
|
||||||
|
// }
|
||||||
|
// // os.Exit(0)
|
||||||
|
// }
|
||||||
|
|
||||||
|
// func init() {
|
||||||
|
// staticRv2iTest()
|
||||||
|
// }
|
||||||
|
|
||||||
|
// func rv2i(rv reflect.Value) interface{} {
|
||||||
|
// if _USE_RV_INTERFACE || rv.Kind() == reflect.Interface || rv.CanAddr() {
|
||||||
|
// return rv.Interface()
|
||||||
|
// }
|
||||||
|
// // var i interface{}
|
||||||
|
// // ui := (*unsafeIntf)(unsafe.Pointer(&i))
|
||||||
|
// var ui unsafeIntf
|
||||||
|
// urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
|
||||||
|
// // fmt.Printf("urv: flag: %b, typ: %b, ptr: %b\n", urv.flag, uintptr(urv.typ), uintptr(urv.ptr))
|
||||||
|
// if (urv.flag&unsafeRvFlagKindMask)&unsafeRvKindDirectIface == 0 {
|
||||||
|
// if urv.flag&unsafeRvFlagAddr != 0 {
|
||||||
|
// println("***** indirect and addressable! Needs typed move - delegate to rv.Interface()")
|
||||||
|
// return rv.Interface()
|
||||||
|
// }
|
||||||
|
// println("****** indirect type/kind")
|
||||||
|
// ui.word = urv.ptr
|
||||||
|
// } else if urv.flag&unsafeRvFlagIndir != 0 {
|
||||||
|
// println("****** unsafe rv flag indir")
|
||||||
|
// ui.word = *(*unsafe.Pointer)(urv.ptr)
|
||||||
|
// } else {
|
||||||
|
// println("****** default: assign prt to word directly")
|
||||||
|
// ui.word = urv.ptr
|
||||||
|
// }
|
||||||
|
// // ui.word = urv.ptr
|
||||||
|
// ui.typ = urv.typ
|
||||||
|
// // fmt.Printf("(pointers) ui.typ: %p, word: %p\n", ui.typ, ui.word)
|
||||||
|
// // fmt.Printf("(binary) ui.typ: %b, word: %b\n", uintptr(ui.typ), uintptr(ui.word))
|
||||||
|
// return *(*interface{})(unsafe.Pointer(&ui))
|
||||||
|
// // return i
|
||||||
|
// }
|
1172
vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/json.go
generated
vendored
Normal file
1172
vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/json.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
899
vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/msgpack.go
generated
vendored
Normal file
899
vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/msgpack.go
generated
vendored
Normal file
@ -0,0 +1,899 @@
|
|||||||
|
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT license found in the LICENSE file.
|
||||||
|
|
||||||
|
/*
|
||||||
|
MSGPACK
|
||||||
|
|
||||||
|
Msgpack-c implementation powers the c, c++, python, ruby, etc libraries.
|
||||||
|
We need to maintain compatibility with it and how it encodes integer values
|
||||||
|
without caring about the type.
|
||||||
|
|
||||||
|
For compatibility with behaviour of msgpack-c reference implementation:
|
||||||
|
- Go intX (>0) and uintX
|
||||||
|
IS ENCODED AS
|
||||||
|
msgpack +ve fixnum, unsigned
|
||||||
|
- Go intX (<0)
|
||||||
|
IS ENCODED AS
|
||||||
|
msgpack -ve fixnum, signed
|
||||||
|
|
||||||
|
*/
|
||||||
|
package codec
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"math"
|
||||||
|
"net/rpc"
|
||||||
|
"reflect"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
mpPosFixNumMin byte = 0x00
|
||||||
|
mpPosFixNumMax = 0x7f
|
||||||
|
mpFixMapMin = 0x80
|
||||||
|
mpFixMapMax = 0x8f
|
||||||
|
mpFixArrayMin = 0x90
|
||||||
|
mpFixArrayMax = 0x9f
|
||||||
|
mpFixStrMin = 0xa0
|
||||||
|
mpFixStrMax = 0xbf
|
||||||
|
mpNil = 0xc0
|
||||||
|
_ = 0xc1
|
||||||
|
mpFalse = 0xc2
|
||||||
|
mpTrue = 0xc3
|
||||||
|
mpFloat = 0xca
|
||||||
|
mpDouble = 0xcb
|
||||||
|
mpUint8 = 0xcc
|
||||||
|
mpUint16 = 0xcd
|
||||||
|
mpUint32 = 0xce
|
||||||
|
mpUint64 = 0xcf
|
||||||
|
mpInt8 = 0xd0
|
||||||
|
mpInt16 = 0xd1
|
||||||
|
mpInt32 = 0xd2
|
||||||
|
mpInt64 = 0xd3
|
||||||
|
|
||||||
|
// extensions below
|
||||||
|
mpBin8 = 0xc4
|
||||||
|
mpBin16 = 0xc5
|
||||||
|
mpBin32 = 0xc6
|
||||||
|
mpExt8 = 0xc7
|
||||||
|
mpExt16 = 0xc8
|
||||||
|
mpExt32 = 0xc9
|
||||||
|
mpFixExt1 = 0xd4
|
||||||
|
mpFixExt2 = 0xd5
|
||||||
|
mpFixExt4 = 0xd6
|
||||||
|
mpFixExt8 = 0xd7
|
||||||
|
mpFixExt16 = 0xd8
|
||||||
|
|
||||||
|
mpStr8 = 0xd9 // new
|
||||||
|
mpStr16 = 0xda
|
||||||
|
mpStr32 = 0xdb
|
||||||
|
|
||||||
|
mpArray16 = 0xdc
|
||||||
|
mpArray32 = 0xdd
|
||||||
|
|
||||||
|
mpMap16 = 0xde
|
||||||
|
mpMap32 = 0xdf
|
||||||
|
|
||||||
|
mpNegFixNumMin = 0xe0
|
||||||
|
mpNegFixNumMax = 0xff
|
||||||
|
)
|
||||||
|
|
||||||
|
// MsgpackSpecRpcMultiArgs is a special type which signifies to the MsgpackSpecRpcCodec
|
||||||
|
// that the backend RPC service takes multiple arguments, which have been arranged
|
||||||
|
// in sequence in the slice.
|
||||||
|
//
|
||||||
|
// The Codec then passes it AS-IS to the rpc service (without wrapping it in an
|
||||||
|
// array of 1 element).
|
||||||
|
type MsgpackSpecRpcMultiArgs []interface{}
|
||||||
|
|
||||||
|
// A MsgpackContainer type specifies the different types of msgpackContainers.
|
||||||
|
type msgpackContainerType struct {
|
||||||
|
fixCutoff int
|
||||||
|
bFixMin, b8, b16, b32 byte
|
||||||
|
hasFixMin, has8, has8Always bool
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
msgpackContainerStr = msgpackContainerType{32, mpFixStrMin, mpStr8, mpStr16, mpStr32, true, true, false}
|
||||||
|
msgpackContainerBin = msgpackContainerType{0, 0, mpBin8, mpBin16, mpBin32, false, true, true}
|
||||||
|
msgpackContainerList = msgpackContainerType{16, mpFixArrayMin, 0, mpArray16, mpArray32, true, false, false}
|
||||||
|
msgpackContainerMap = msgpackContainerType{16, mpFixMapMin, 0, mpMap16, mpMap32, true, false, false}
|
||||||
|
)
|
||||||
|
|
||||||
|
//---------------------------------------------
|
||||||
|
|
||||||
|
type msgpackEncDriver struct {
|
||||||
|
noBuiltInTypes
|
||||||
|
encDriverNoopContainerWriter
|
||||||
|
// encNoSeparator
|
||||||
|
e *Encoder
|
||||||
|
w encWriter
|
||||||
|
h *MsgpackHandle
|
||||||
|
x [8]byte
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *msgpackEncDriver) EncodeNil() {
|
||||||
|
e.w.writen1(mpNil)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *msgpackEncDriver) EncodeInt(i int64) {
|
||||||
|
if i >= 0 {
|
||||||
|
e.EncodeUint(uint64(i))
|
||||||
|
} else if i >= -32 {
|
||||||
|
e.w.writen1(byte(i))
|
||||||
|
} else if i >= math.MinInt8 {
|
||||||
|
e.w.writen2(mpInt8, byte(i))
|
||||||
|
} else if i >= math.MinInt16 {
|
||||||
|
e.w.writen1(mpInt16)
|
||||||
|
bigenHelper{e.x[:2], e.w}.writeUint16(uint16(i))
|
||||||
|
} else if i >= math.MinInt32 {
|
||||||
|
e.w.writen1(mpInt32)
|
||||||
|
bigenHelper{e.x[:4], e.w}.writeUint32(uint32(i))
|
||||||
|
} else {
|
||||||
|
e.w.writen1(mpInt64)
|
||||||
|
bigenHelper{e.x[:8], e.w}.writeUint64(uint64(i))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *msgpackEncDriver) EncodeUint(i uint64) {
|
||||||
|
if i <= math.MaxInt8 {
|
||||||
|
e.w.writen1(byte(i))
|
||||||
|
} else if i <= math.MaxUint8 {
|
||||||
|
e.w.writen2(mpUint8, byte(i))
|
||||||
|
} else if i <= math.MaxUint16 {
|
||||||
|
e.w.writen1(mpUint16)
|
||||||
|
bigenHelper{e.x[:2], e.w}.writeUint16(uint16(i))
|
||||||
|
} else if i <= math.MaxUint32 {
|
||||||
|
e.w.writen1(mpUint32)
|
||||||
|
bigenHelper{e.x[:4], e.w}.writeUint32(uint32(i))
|
||||||
|
} else {
|
||||||
|
e.w.writen1(mpUint64)
|
||||||
|
bigenHelper{e.x[:8], e.w}.writeUint64(uint64(i))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *msgpackEncDriver) EncodeBool(b bool) {
|
||||||
|
if b {
|
||||||
|
e.w.writen1(mpTrue)
|
||||||
|
} else {
|
||||||
|
e.w.writen1(mpFalse)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *msgpackEncDriver) EncodeFloat32(f float32) {
|
||||||
|
e.w.writen1(mpFloat)
|
||||||
|
bigenHelper{e.x[:4], e.w}.writeUint32(math.Float32bits(f))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *msgpackEncDriver) EncodeFloat64(f float64) {
|
||||||
|
e.w.writen1(mpDouble)
|
||||||
|
bigenHelper{e.x[:8], e.w}.writeUint64(math.Float64bits(f))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *msgpackEncDriver) EncodeExt(v interface{}, xtag uint64, ext Ext, _ *Encoder) {
|
||||||
|
bs := ext.WriteExt(v)
|
||||||
|
if bs == nil {
|
||||||
|
e.EncodeNil()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if e.h.WriteExt {
|
||||||
|
e.encodeExtPreamble(uint8(xtag), len(bs))
|
||||||
|
e.w.writeb(bs)
|
||||||
|
} else {
|
||||||
|
e.EncodeStringBytes(c_RAW, bs)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *msgpackEncDriver) EncodeRawExt(re *RawExt, _ *Encoder) {
|
||||||
|
e.encodeExtPreamble(uint8(re.Tag), len(re.Data))
|
||||||
|
e.w.writeb(re.Data)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *msgpackEncDriver) encodeExtPreamble(xtag byte, l int) {
|
||||||
|
if l == 1 {
|
||||||
|
e.w.writen2(mpFixExt1, xtag)
|
||||||
|
} else if l == 2 {
|
||||||
|
e.w.writen2(mpFixExt2, xtag)
|
||||||
|
} else if l == 4 {
|
||||||
|
e.w.writen2(mpFixExt4, xtag)
|
||||||
|
} else if l == 8 {
|
||||||
|
e.w.writen2(mpFixExt8, xtag)
|
||||||
|
} else if l == 16 {
|
||||||
|
e.w.writen2(mpFixExt16, xtag)
|
||||||
|
} else if l < 256 {
|
||||||
|
e.w.writen2(mpExt8, byte(l))
|
||||||
|
e.w.writen1(xtag)
|
||||||
|
} else if l < 65536 {
|
||||||
|
e.w.writen1(mpExt16)
|
||||||
|
bigenHelper{e.x[:2], e.w}.writeUint16(uint16(l))
|
||||||
|
e.w.writen1(xtag)
|
||||||
|
} else {
|
||||||
|
e.w.writen1(mpExt32)
|
||||||
|
bigenHelper{e.x[:4], e.w}.writeUint32(uint32(l))
|
||||||
|
e.w.writen1(xtag)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *msgpackEncDriver) WriteArrayStart(length int) {
|
||||||
|
e.writeContainerLen(msgpackContainerList, length)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *msgpackEncDriver) WriteMapStart(length int) {
|
||||||
|
e.writeContainerLen(msgpackContainerMap, length)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *msgpackEncDriver) EncodeString(c charEncoding, s string) {
|
||||||
|
slen := len(s)
|
||||||
|
if c == c_RAW && e.h.WriteExt {
|
||||||
|
e.writeContainerLen(msgpackContainerBin, slen)
|
||||||
|
} else {
|
||||||
|
e.writeContainerLen(msgpackContainerStr, slen)
|
||||||
|
}
|
||||||
|
if slen > 0 {
|
||||||
|
e.w.writestr(s)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *msgpackEncDriver) EncodeSymbol(v string) {
|
||||||
|
e.EncodeString(c_UTF8, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *msgpackEncDriver) EncodeStringBytes(c charEncoding, bs []byte) {
|
||||||
|
slen := len(bs)
|
||||||
|
if c == c_RAW && e.h.WriteExt {
|
||||||
|
e.writeContainerLen(msgpackContainerBin, slen)
|
||||||
|
} else {
|
||||||
|
e.writeContainerLen(msgpackContainerStr, slen)
|
||||||
|
}
|
||||||
|
if slen > 0 {
|
||||||
|
e.w.writeb(bs)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *msgpackEncDriver) writeContainerLen(ct msgpackContainerType, l int) {
|
||||||
|
if ct.hasFixMin && l < ct.fixCutoff {
|
||||||
|
e.w.writen1(ct.bFixMin | byte(l))
|
||||||
|
} else if ct.has8 && l < 256 && (ct.has8Always || e.h.WriteExt) {
|
||||||
|
e.w.writen2(ct.b8, uint8(l))
|
||||||
|
} else if l < 65536 {
|
||||||
|
e.w.writen1(ct.b16)
|
||||||
|
bigenHelper{e.x[:2], e.w}.writeUint16(uint16(l))
|
||||||
|
} else {
|
||||||
|
e.w.writen1(ct.b32)
|
||||||
|
bigenHelper{e.x[:4], e.w}.writeUint32(uint32(l))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
//---------------------------------------------
|
||||||
|
|
||||||
|
type msgpackDecDriver struct {
|
||||||
|
d *Decoder
|
||||||
|
r decReader // *Decoder decReader decReaderT
|
||||||
|
h *MsgpackHandle
|
||||||
|
b [scratchByteArrayLen]byte
|
||||||
|
bd byte
|
||||||
|
bdRead bool
|
||||||
|
br bool // bytes reader
|
||||||
|
noBuiltInTypes
|
||||||
|
// noStreamingCodec
|
||||||
|
// decNoSeparator
|
||||||
|
decDriverNoopContainerReader
|
||||||
|
}
|
||||||
|
|
||||||
|
// Note: This returns either a primitive (int, bool, etc) for non-containers,
|
||||||
|
// or a containerType, or a specific type denoting nil or extension.
|
||||||
|
// It is called when a nil interface{} is passed, leaving it up to the DecDriver
|
||||||
|
// to introspect the stream and decide how best to decode.
|
||||||
|
// It deciphers the value by looking at the stream first.
|
||||||
|
func (d *msgpackDecDriver) DecodeNaked() {
|
||||||
|
if !d.bdRead {
|
||||||
|
d.readNextBd()
|
||||||
|
}
|
||||||
|
bd := d.bd
|
||||||
|
n := d.d.n
|
||||||
|
var decodeFurther bool
|
||||||
|
|
||||||
|
switch bd {
|
||||||
|
case mpNil:
|
||||||
|
n.v = valueTypeNil
|
||||||
|
d.bdRead = false
|
||||||
|
case mpFalse:
|
||||||
|
n.v = valueTypeBool
|
||||||
|
n.b = false
|
||||||
|
case mpTrue:
|
||||||
|
n.v = valueTypeBool
|
||||||
|
n.b = true
|
||||||
|
|
||||||
|
case mpFloat:
|
||||||
|
n.v = valueTypeFloat
|
||||||
|
n.f = float64(math.Float32frombits(bigen.Uint32(d.r.readx(4))))
|
||||||
|
case mpDouble:
|
||||||
|
n.v = valueTypeFloat
|
||||||
|
n.f = math.Float64frombits(bigen.Uint64(d.r.readx(8)))
|
||||||
|
|
||||||
|
case mpUint8:
|
||||||
|
n.v = valueTypeUint
|
||||||
|
n.u = uint64(d.r.readn1())
|
||||||
|
case mpUint16:
|
||||||
|
n.v = valueTypeUint
|
||||||
|
n.u = uint64(bigen.Uint16(d.r.readx(2)))
|
||||||
|
case mpUint32:
|
||||||
|
n.v = valueTypeUint
|
||||||
|
n.u = uint64(bigen.Uint32(d.r.readx(4)))
|
||||||
|
case mpUint64:
|
||||||
|
n.v = valueTypeUint
|
||||||
|
n.u = uint64(bigen.Uint64(d.r.readx(8)))
|
||||||
|
|
||||||
|
case mpInt8:
|
||||||
|
n.v = valueTypeInt
|
||||||
|
n.i = int64(int8(d.r.readn1()))
|
||||||
|
case mpInt16:
|
||||||
|
n.v = valueTypeInt
|
||||||
|
n.i = int64(int16(bigen.Uint16(d.r.readx(2))))
|
||||||
|
case mpInt32:
|
||||||
|
n.v = valueTypeInt
|
||||||
|
n.i = int64(int32(bigen.Uint32(d.r.readx(4))))
|
||||||
|
case mpInt64:
|
||||||
|
n.v = valueTypeInt
|
||||||
|
n.i = int64(int64(bigen.Uint64(d.r.readx(8))))
|
||||||
|
|
||||||
|
default:
|
||||||
|
switch {
|
||||||
|
case bd >= mpPosFixNumMin && bd <= mpPosFixNumMax:
|
||||||
|
// positive fixnum (always signed)
|
||||||
|
n.v = valueTypeInt
|
||||||
|
n.i = int64(int8(bd))
|
||||||
|
case bd >= mpNegFixNumMin && bd <= mpNegFixNumMax:
|
||||||
|
// negative fixnum
|
||||||
|
n.v = valueTypeInt
|
||||||
|
n.i = int64(int8(bd))
|
||||||
|
case bd == mpStr8, bd == mpStr16, bd == mpStr32, bd >= mpFixStrMin && bd <= mpFixStrMax:
|
||||||
|
if d.h.RawToString {
|
||||||
|
n.v = valueTypeString
|
||||||
|
n.s = d.DecodeString()
|
||||||
|
} else {
|
||||||
|
n.v = valueTypeBytes
|
||||||
|
n.l = d.DecodeBytes(nil, false)
|
||||||
|
}
|
||||||
|
case bd == mpBin8, bd == mpBin16, bd == mpBin32:
|
||||||
|
n.v = valueTypeBytes
|
||||||
|
n.l = d.DecodeBytes(nil, false)
|
||||||
|
case bd == mpArray16, bd == mpArray32, bd >= mpFixArrayMin && bd <= mpFixArrayMax:
|
||||||
|
n.v = valueTypeArray
|
||||||
|
decodeFurther = true
|
||||||
|
case bd == mpMap16, bd == mpMap32, bd >= mpFixMapMin && bd <= mpFixMapMax:
|
||||||
|
n.v = valueTypeMap
|
||||||
|
decodeFurther = true
|
||||||
|
case bd >= mpFixExt1 && bd <= mpFixExt16, bd >= mpExt8 && bd <= mpExt32:
|
||||||
|
n.v = valueTypeExt
|
||||||
|
clen := d.readExtLen()
|
||||||
|
n.u = uint64(d.r.readn1())
|
||||||
|
n.l = d.r.readx(clen)
|
||||||
|
default:
|
||||||
|
d.d.errorf("Nil-Deciphered DecodeValue: %s: hex: %x, dec: %d", msgBadDesc, bd, bd)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !decodeFurther {
|
||||||
|
d.bdRead = false
|
||||||
|
}
|
||||||
|
if n.v == valueTypeUint && d.h.SignedInteger {
|
||||||
|
n.v = valueTypeInt
|
||||||
|
n.i = int64(n.u)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// int can be decoded from msgpack type: intXXX or uintXXX
|
||||||
|
func (d *msgpackDecDriver) DecodeInt(bitsize uint8) (i int64) {
|
||||||
|
if !d.bdRead {
|
||||||
|
d.readNextBd()
|
||||||
|
}
|
||||||
|
switch d.bd {
|
||||||
|
case mpUint8:
|
||||||
|
i = int64(uint64(d.r.readn1()))
|
||||||
|
case mpUint16:
|
||||||
|
i = int64(uint64(bigen.Uint16(d.r.readx(2))))
|
||||||
|
case mpUint32:
|
||||||
|
i = int64(uint64(bigen.Uint32(d.r.readx(4))))
|
||||||
|
case mpUint64:
|
||||||
|
i = int64(bigen.Uint64(d.r.readx(8)))
|
||||||
|
case mpInt8:
|
||||||
|
i = int64(int8(d.r.readn1()))
|
||||||
|
case mpInt16:
|
||||||
|
i = int64(int16(bigen.Uint16(d.r.readx(2))))
|
||||||
|
case mpInt32:
|
||||||
|
i = int64(int32(bigen.Uint32(d.r.readx(4))))
|
||||||
|
case mpInt64:
|
||||||
|
i = int64(bigen.Uint64(d.r.readx(8)))
|
||||||
|
default:
|
||||||
|
switch {
|
||||||
|
case d.bd >= mpPosFixNumMin && d.bd <= mpPosFixNumMax:
|
||||||
|
i = int64(int8(d.bd))
|
||||||
|
case d.bd >= mpNegFixNumMin && d.bd <= mpNegFixNumMax:
|
||||||
|
i = int64(int8(d.bd))
|
||||||
|
default:
|
||||||
|
d.d.errorf("Unhandled single-byte unsigned integer value: %s: %x", msgBadDesc, d.bd)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// check overflow (logic adapted from std pkg reflect/value.go OverflowUint()
|
||||||
|
if bitsize > 0 {
|
||||||
|
if trunc := (i << (64 - bitsize)) >> (64 - bitsize); i != trunc {
|
||||||
|
d.d.errorf("Overflow int value: %v", i)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
d.bdRead = false
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// uint can be decoded from msgpack type: intXXX or uintXXX
|
||||||
|
func (d *msgpackDecDriver) DecodeUint(bitsize uint8) (ui uint64) {
|
||||||
|
if !d.bdRead {
|
||||||
|
d.readNextBd()
|
||||||
|
}
|
||||||
|
switch d.bd {
|
||||||
|
case mpUint8:
|
||||||
|
ui = uint64(d.r.readn1())
|
||||||
|
case mpUint16:
|
||||||
|
ui = uint64(bigen.Uint16(d.r.readx(2)))
|
||||||
|
case mpUint32:
|
||||||
|
ui = uint64(bigen.Uint32(d.r.readx(4)))
|
||||||
|
case mpUint64:
|
||||||
|
ui = bigen.Uint64(d.r.readx(8))
|
||||||
|
case mpInt8:
|
||||||
|
if i := int64(int8(d.r.readn1())); i >= 0 {
|
||||||
|
ui = uint64(i)
|
||||||
|
} else {
|
||||||
|
d.d.errorf("Assigning negative signed value: %v, to unsigned type", i)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
case mpInt16:
|
||||||
|
if i := int64(int16(bigen.Uint16(d.r.readx(2)))); i >= 0 {
|
||||||
|
ui = uint64(i)
|
||||||
|
} else {
|
||||||
|
d.d.errorf("Assigning negative signed value: %v, to unsigned type", i)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
case mpInt32:
|
||||||
|
if i := int64(int32(bigen.Uint32(d.r.readx(4)))); i >= 0 {
|
||||||
|
ui = uint64(i)
|
||||||
|
} else {
|
||||||
|
d.d.errorf("Assigning negative signed value: %v, to unsigned type", i)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
case mpInt64:
|
||||||
|
if i := int64(bigen.Uint64(d.r.readx(8))); i >= 0 {
|
||||||
|
ui = uint64(i)
|
||||||
|
} else {
|
||||||
|
d.d.errorf("Assigning negative signed value: %v, to unsigned type", i)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
switch {
|
||||||
|
case d.bd >= mpPosFixNumMin && d.bd <= mpPosFixNumMax:
|
||||||
|
ui = uint64(d.bd)
|
||||||
|
case d.bd >= mpNegFixNumMin && d.bd <= mpNegFixNumMax:
|
||||||
|
d.d.errorf("Assigning negative signed value: %v, to unsigned type", int(d.bd))
|
||||||
|
return
|
||||||
|
default:
|
||||||
|
d.d.errorf("Unhandled single-byte unsigned integer value: %s: %x", msgBadDesc, d.bd)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// check overflow (logic adapted from std pkg reflect/value.go OverflowUint()
|
||||||
|
if bitsize > 0 {
|
||||||
|
if trunc := (ui << (64 - bitsize)) >> (64 - bitsize); ui != trunc {
|
||||||
|
d.d.errorf("Overflow uint value: %v", ui)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
d.bdRead = false
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// float can either be decoded from msgpack type: float, double or intX
|
||||||
|
func (d *msgpackDecDriver) DecodeFloat(chkOverflow32 bool) (f float64) {
|
||||||
|
if !d.bdRead {
|
||||||
|
d.readNextBd()
|
||||||
|
}
|
||||||
|
if d.bd == mpFloat {
|
||||||
|
f = float64(math.Float32frombits(bigen.Uint32(d.r.readx(4))))
|
||||||
|
} else if d.bd == mpDouble {
|
||||||
|
f = math.Float64frombits(bigen.Uint64(d.r.readx(8)))
|
||||||
|
} else {
|
||||||
|
f = float64(d.DecodeInt(0))
|
||||||
|
}
|
||||||
|
if chkOverflow32 && chkOvf.Float32(f) {
|
||||||
|
d.d.errorf("msgpack: float32 overflow: %v", f)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
d.bdRead = false
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// bool can be decoded from bool, fixnum 0 or 1.
|
||||||
|
func (d *msgpackDecDriver) DecodeBool() (b bool) {
|
||||||
|
if !d.bdRead {
|
||||||
|
d.readNextBd()
|
||||||
|
}
|
||||||
|
if d.bd == mpFalse || d.bd == 0 {
|
||||||
|
// b = false
|
||||||
|
} else if d.bd == mpTrue || d.bd == 1 {
|
||||||
|
b = true
|
||||||
|
} else {
|
||||||
|
d.d.errorf("Invalid single-byte value for bool: %s: %x", msgBadDesc, d.bd)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
d.bdRead = false
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *msgpackDecDriver) DecodeBytes(bs []byte, zerocopy bool) (bsOut []byte) {
|
||||||
|
if !d.bdRead {
|
||||||
|
d.readNextBd()
|
||||||
|
}
|
||||||
|
|
||||||
|
// DecodeBytes could be from: bin str fixstr fixarray array ...
|
||||||
|
var clen int
|
||||||
|
vt := d.ContainerType()
|
||||||
|
switch vt {
|
||||||
|
case valueTypeBytes:
|
||||||
|
// valueTypeBytes may be a mpBin or an mpStr container
|
||||||
|
if bd := d.bd; bd == mpBin8 || bd == mpBin16 || bd == mpBin32 {
|
||||||
|
clen = d.readContainerLen(msgpackContainerBin)
|
||||||
|
} else {
|
||||||
|
clen = d.readContainerLen(msgpackContainerStr)
|
||||||
|
}
|
||||||
|
case valueTypeString:
|
||||||
|
clen = d.readContainerLen(msgpackContainerStr)
|
||||||
|
case valueTypeArray:
|
||||||
|
clen = d.readContainerLen(msgpackContainerList)
|
||||||
|
// ensure everything after is one byte each
|
||||||
|
for i := 0; i < clen; i++ {
|
||||||
|
d.readNextBd()
|
||||||
|
if d.bd == mpNil {
|
||||||
|
bs = append(bs, 0)
|
||||||
|
} else if d.bd == mpUint8 {
|
||||||
|
bs = append(bs, d.r.readn1())
|
||||||
|
} else {
|
||||||
|
d.d.errorf("cannot read non-byte into a byte array")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
d.bdRead = false
|
||||||
|
return bs
|
||||||
|
default:
|
||||||
|
d.d.errorf("invalid container type: expecting bin|str|array")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// these are (bin|str)(8|16|32)
|
||||||
|
// println("DecodeBytes: clen: ", clen)
|
||||||
|
d.bdRead = false
|
||||||
|
// bytes may be nil, so handle it. if nil, clen=-1.
|
||||||
|
if clen < 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if zerocopy {
|
||||||
|
if d.br {
|
||||||
|
return d.r.readx(clen)
|
||||||
|
} else if len(bs) == 0 {
|
||||||
|
bs = d.b[:]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return decByteSlice(d.r, clen, d.d.h.MaxInitLen, bs)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *msgpackDecDriver) DecodeString() (s string) {
|
||||||
|
return string(d.DecodeBytes(d.b[:], true))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *msgpackDecDriver) DecodeStringAsBytes() (s []byte) {
|
||||||
|
return d.DecodeBytes(d.b[:], true)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *msgpackDecDriver) readNextBd() {
|
||||||
|
d.bd = d.r.readn1()
|
||||||
|
d.bdRead = true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *msgpackDecDriver) uncacheRead() {
|
||||||
|
if d.bdRead {
|
||||||
|
d.r.unreadn1()
|
||||||
|
d.bdRead = false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *msgpackDecDriver) ContainerType() (vt valueType) {
|
||||||
|
if !d.bdRead {
|
||||||
|
d.readNextBd()
|
||||||
|
}
|
||||||
|
bd := d.bd
|
||||||
|
if bd == mpNil {
|
||||||
|
return valueTypeNil
|
||||||
|
} else if bd == mpBin8 || bd == mpBin16 || bd == mpBin32 ||
|
||||||
|
(!d.h.RawToString &&
|
||||||
|
(bd == mpStr8 || bd == mpStr16 || bd == mpStr32 || (bd >= mpFixStrMin && bd <= mpFixStrMax))) {
|
||||||
|
return valueTypeBytes
|
||||||
|
} else if d.h.RawToString &&
|
||||||
|
(bd == mpStr8 || bd == mpStr16 || bd == mpStr32 || (bd >= mpFixStrMin && bd <= mpFixStrMax)) {
|
||||||
|
return valueTypeString
|
||||||
|
} else if bd == mpArray16 || bd == mpArray32 || (bd >= mpFixArrayMin && bd <= mpFixArrayMax) {
|
||||||
|
return valueTypeArray
|
||||||
|
} else if bd == mpMap16 || bd == mpMap32 || (bd >= mpFixMapMin && bd <= mpFixMapMax) {
|
||||||
|
return valueTypeMap
|
||||||
|
} else {
|
||||||
|
// d.d.errorf("isContainerType: unsupported parameter: %v", vt)
|
||||||
|
}
|
||||||
|
return valueTypeUnset
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *msgpackDecDriver) TryDecodeAsNil() (v bool) {
|
||||||
|
if !d.bdRead {
|
||||||
|
d.readNextBd()
|
||||||
|
}
|
||||||
|
if d.bd == mpNil {
|
||||||
|
d.bdRead = false
|
||||||
|
v = true
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *msgpackDecDriver) readContainerLen(ct msgpackContainerType) (clen int) {
|
||||||
|
bd := d.bd
|
||||||
|
if bd == mpNil {
|
||||||
|
clen = -1 // to represent nil
|
||||||
|
} else if bd == ct.b8 {
|
||||||
|
clen = int(d.r.readn1())
|
||||||
|
} else if bd == ct.b16 {
|
||||||
|
clen = int(bigen.Uint16(d.r.readx(2)))
|
||||||
|
} else if bd == ct.b32 {
|
||||||
|
clen = int(bigen.Uint32(d.r.readx(4)))
|
||||||
|
} else if (ct.bFixMin & bd) == ct.bFixMin {
|
||||||
|
clen = int(ct.bFixMin ^ bd)
|
||||||
|
} else {
|
||||||
|
d.d.errorf("readContainerLen: %s: hex: %x, decimal: %d", msgBadDesc, bd, bd)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
d.bdRead = false
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *msgpackDecDriver) ReadMapStart() int {
|
||||||
|
if !d.bdRead {
|
||||||
|
d.readNextBd()
|
||||||
|
}
|
||||||
|
return d.readContainerLen(msgpackContainerMap)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *msgpackDecDriver) ReadArrayStart() int {
|
||||||
|
if !d.bdRead {
|
||||||
|
d.readNextBd()
|
||||||
|
}
|
||||||
|
return d.readContainerLen(msgpackContainerList)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *msgpackDecDriver) readExtLen() (clen int) {
|
||||||
|
switch d.bd {
|
||||||
|
case mpNil:
|
||||||
|
clen = -1 // to represent nil
|
||||||
|
case mpFixExt1:
|
||||||
|
clen = 1
|
||||||
|
case mpFixExt2:
|
||||||
|
clen = 2
|
||||||
|
case mpFixExt4:
|
||||||
|
clen = 4
|
||||||
|
case mpFixExt8:
|
||||||
|
clen = 8
|
||||||
|
case mpFixExt16:
|
||||||
|
clen = 16
|
||||||
|
case mpExt8:
|
||||||
|
clen = int(d.r.readn1())
|
||||||
|
case mpExt16:
|
||||||
|
clen = int(bigen.Uint16(d.r.readx(2)))
|
||||||
|
case mpExt32:
|
||||||
|
clen = int(bigen.Uint32(d.r.readx(4)))
|
||||||
|
default:
|
||||||
|
d.d.errorf("decoding ext bytes: found unexpected byte: %x", d.bd)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *msgpackDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) {
|
||||||
|
if xtag > 0xff {
|
||||||
|
d.d.errorf("decodeExt: tag must be <= 0xff; got: %v", xtag)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
realxtag1, xbs := d.decodeExtV(ext != nil, uint8(xtag))
|
||||||
|
realxtag = uint64(realxtag1)
|
||||||
|
if ext == nil {
|
||||||
|
re := rv.(*RawExt)
|
||||||
|
re.Tag = realxtag
|
||||||
|
re.Data = detachZeroCopyBytes(d.br, re.Data, xbs)
|
||||||
|
} else {
|
||||||
|
ext.ReadExt(rv, xbs)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *msgpackDecDriver) decodeExtV(verifyTag bool, tag byte) (xtag byte, xbs []byte) {
|
||||||
|
if !d.bdRead {
|
||||||
|
d.readNextBd()
|
||||||
|
}
|
||||||
|
xbd := d.bd
|
||||||
|
if xbd == mpBin8 || xbd == mpBin16 || xbd == mpBin32 {
|
||||||
|
xbs = d.DecodeBytes(nil, true)
|
||||||
|
} else if xbd == mpStr8 || xbd == mpStr16 || xbd == mpStr32 ||
|
||||||
|
(xbd >= mpFixStrMin && xbd <= mpFixStrMax) {
|
||||||
|
xbs = d.DecodeStringAsBytes()
|
||||||
|
} else {
|
||||||
|
clen := d.readExtLen()
|
||||||
|
xtag = d.r.readn1()
|
||||||
|
if verifyTag && xtag != tag {
|
||||||
|
d.d.errorf("Wrong extension tag. Got %b. Expecting: %v", xtag, tag)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
xbs = d.r.readx(clen)
|
||||||
|
}
|
||||||
|
d.bdRead = false
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
//--------------------------------------------------
|
||||||
|
|
||||||
|
//MsgpackHandle is a Handle for the Msgpack Schema-Free Encoding Format.
|
||||||
|
type MsgpackHandle struct {
|
||||||
|
BasicHandle
|
||||||
|
|
||||||
|
// RawToString controls how raw bytes are decoded into a nil interface{}.
|
||||||
|
RawToString bool
|
||||||
|
|
||||||
|
// WriteExt flag supports encoding configured extensions with extension tags.
|
||||||
|
// It also controls whether other elements of the new spec are encoded (ie Str8).
|
||||||
|
//
|
||||||
|
// With WriteExt=false, configured extensions are serialized as raw bytes
|
||||||
|
// and Str8 is not encoded.
|
||||||
|
//
|
||||||
|
// A stream can still be decoded into a typed value, provided an appropriate value
|
||||||
|
// is provided, but the type cannot be inferred from the stream. If no appropriate
|
||||||
|
// type is provided (e.g. decoding into a nil interface{}), you get back
|
||||||
|
// a []byte or string based on the setting of RawToString.
|
||||||
|
WriteExt bool
|
||||||
|
binaryEncodingType
|
||||||
|
noElemSeparators
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *MsgpackHandle) SetBytesExt(rt reflect.Type, tag uint64, ext BytesExt) (err error) {
|
||||||
|
return h.SetExt(rt, tag, &setExtWrapper{b: ext})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *MsgpackHandle) newEncDriver(e *Encoder) encDriver {
|
||||||
|
return &msgpackEncDriver{e: e, w: e.w, h: h}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *MsgpackHandle) newDecDriver(d *Decoder) decDriver {
|
||||||
|
return &msgpackDecDriver{d: d, h: h, r: d.r, br: d.bytes}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *msgpackEncDriver) reset() {
|
||||||
|
e.w = e.e.w
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *msgpackDecDriver) reset() {
|
||||||
|
d.r, d.br = d.d.r, d.d.bytes
|
||||||
|
d.bd, d.bdRead = 0, false
|
||||||
|
}
|
||||||
|
|
||||||
|
//--------------------------------------------------
|
||||||
|
|
||||||
|
type msgpackSpecRpcCodec struct {
|
||||||
|
rpcCodec
|
||||||
|
}
|
||||||
|
|
||||||
|
// /////////////// Spec RPC Codec ///////////////////
|
||||||
|
func (c *msgpackSpecRpcCodec) WriteRequest(r *rpc.Request, body interface{}) error {
|
||||||
|
// WriteRequest can write to both a Go service, and other services that do
|
||||||
|
// not abide by the 1 argument rule of a Go service.
|
||||||
|
// We discriminate based on if the body is a MsgpackSpecRpcMultiArgs
|
||||||
|
var bodyArr []interface{}
|
||||||
|
if m, ok := body.(MsgpackSpecRpcMultiArgs); ok {
|
||||||
|
bodyArr = ([]interface{})(m)
|
||||||
|
} else {
|
||||||
|
bodyArr = []interface{}{body}
|
||||||
|
}
|
||||||
|
r2 := []interface{}{0, uint32(r.Seq), r.ServiceMethod, bodyArr}
|
||||||
|
return c.write(r2, nil, false, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *msgpackSpecRpcCodec) WriteResponse(r *rpc.Response, body interface{}) error {
|
||||||
|
var moe interface{}
|
||||||
|
if r.Error != "" {
|
||||||
|
moe = r.Error
|
||||||
|
}
|
||||||
|
if moe != nil && body != nil {
|
||||||
|
body = nil
|
||||||
|
}
|
||||||
|
r2 := []interface{}{1, uint32(r.Seq), moe, body}
|
||||||
|
return c.write(r2, nil, false, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *msgpackSpecRpcCodec) ReadResponseHeader(r *rpc.Response) error {
|
||||||
|
return c.parseCustomHeader(1, &r.Seq, &r.Error)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *msgpackSpecRpcCodec) ReadRequestHeader(r *rpc.Request) error {
|
||||||
|
return c.parseCustomHeader(0, &r.Seq, &r.ServiceMethod)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *msgpackSpecRpcCodec) ReadRequestBody(body interface{}) error {
|
||||||
|
if body == nil { // read and discard
|
||||||
|
return c.read(nil)
|
||||||
|
}
|
||||||
|
bodyArr := []interface{}{body}
|
||||||
|
return c.read(&bodyArr)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *msgpackSpecRpcCodec) parseCustomHeader(expectTypeByte byte, msgid *uint64, methodOrError *string) (err error) {
|
||||||
|
|
||||||
|
if c.isClosed() {
|
||||||
|
return io.EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
// We read the response header by hand
|
||||||
|
// so that the body can be decoded on its own from the stream at a later time.
|
||||||
|
|
||||||
|
const fia byte = 0x94 //four item array descriptor value
|
||||||
|
// Not sure why the panic of EOF is swallowed above.
|
||||||
|
// if bs1 := c.dec.r.readn1(); bs1 != fia {
|
||||||
|
// err = fmt.Errorf("Unexpected value for array descriptor: Expecting %v. Received %v", fia, bs1)
|
||||||
|
// return
|
||||||
|
// }
|
||||||
|
var b byte
|
||||||
|
b, err = c.br.ReadByte()
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if b != fia {
|
||||||
|
err = fmt.Errorf("Unexpected value for array descriptor: Expecting %v. Received %v", fia, b)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = c.read(&b); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if b != expectTypeByte {
|
||||||
|
err = fmt.Errorf("Unexpected byte descriptor in header. Expecting %v. Received %v", expectTypeByte, b)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err = c.read(msgid); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err = c.read(methodOrError); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
//--------------------------------------------------
|
||||||
|
|
||||||
|
// msgpackSpecRpc is the implementation of Rpc that uses custom communication protocol
|
||||||
|
// as defined in the msgpack spec at https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md
|
||||||
|
type msgpackSpecRpc struct{}
|
||||||
|
|
||||||
|
// MsgpackSpecRpc implements Rpc using the communication protocol defined in
|
||||||
|
// the msgpack spec at https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md .
|
||||||
|
// Its methods (ServerCodec and ClientCodec) return values that implement RpcCodecBuffered.
|
||||||
|
var MsgpackSpecRpc msgpackSpecRpc
|
||||||
|
|
||||||
|
func (x msgpackSpecRpc) ServerCodec(conn io.ReadWriteCloser, h Handle) rpc.ServerCodec {
|
||||||
|
return &msgpackSpecRpcCodec{newRPCCodec(conn, h)}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x msgpackSpecRpc) ClientCodec(conn io.ReadWriteCloser, h Handle) rpc.ClientCodec {
|
||||||
|
return &msgpackSpecRpcCodec{newRPCCodec(conn, h)}
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ decDriver = (*msgpackDecDriver)(nil)
|
||||||
|
var _ encDriver = (*msgpackEncDriver)(nil)
|
214
vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/noop.go
generated
vendored
Normal file
214
vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/noop.go
generated
vendored
Normal file
@ -0,0 +1,214 @@
|
|||||||
|
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT license found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build ignore
|
||||||
|
|
||||||
|
package codec
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math/rand"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// NoopHandle returns a no-op handle. It basically does nothing.
|
||||||
|
// It is only useful for benchmarking, as it gives an idea of the
|
||||||
|
// overhead from the codec framework.
|
||||||
|
//
|
||||||
|
// LIBRARY USERS: *** DO NOT USE ***
|
||||||
|
func NoopHandle(slen int) *noopHandle {
|
||||||
|
h := noopHandle{}
|
||||||
|
h.rand = rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||||
|
h.B = make([][]byte, slen)
|
||||||
|
h.S = make([]string, slen)
|
||||||
|
for i := 0; i < len(h.S); i++ {
|
||||||
|
b := make([]byte, i+1)
|
||||||
|
for j := 0; j < len(b); j++ {
|
||||||
|
b[j] = 'a' + byte(i)
|
||||||
|
}
|
||||||
|
h.B[i] = b
|
||||||
|
h.S[i] = string(b)
|
||||||
|
}
|
||||||
|
return &h
|
||||||
|
}
|
||||||
|
|
||||||
|
// noopHandle does nothing.
|
||||||
|
// It is used to simulate the overhead of the codec framework.
|
||||||
|
type noopHandle struct {
|
||||||
|
BasicHandle
|
||||||
|
binaryEncodingType
|
||||||
|
noopDrv // noopDrv is unexported here, so we can get a copy of it when needed.
|
||||||
|
}
|
||||||
|
|
||||||
|
type noopDrv struct {
|
||||||
|
d *Decoder
|
||||||
|
e *Encoder
|
||||||
|
i int
|
||||||
|
S []string
|
||||||
|
B [][]byte
|
||||||
|
mks []bool // stack. if map (true), else if array (false)
|
||||||
|
mk bool // top of stack. what container are we on? map or array?
|
||||||
|
ct valueType // last response for IsContainerType.
|
||||||
|
cb int // counter for ContainerType
|
||||||
|
rand *rand.Rand
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *noopDrv) r(v int) int { return h.rand.Intn(v) }
|
||||||
|
func (h *noopDrv) m(v int) int { h.i++; return h.i % v }
|
||||||
|
|
||||||
|
func (h *noopDrv) newEncDriver(e *Encoder) encDriver { h.e = e; return h }
|
||||||
|
func (h *noopDrv) newDecDriver(d *Decoder) decDriver { h.d = d; return h }
|
||||||
|
|
||||||
|
func (h *noopDrv) reset() {}
|
||||||
|
func (h *noopDrv) uncacheRead() {}
|
||||||
|
|
||||||
|
// --- encDriver
|
||||||
|
|
||||||
|
// stack functions (for map and array)
|
||||||
|
func (h *noopDrv) start(b bool) {
|
||||||
|
// println("start", len(h.mks)+1)
|
||||||
|
h.mks = append(h.mks, b)
|
||||||
|
h.mk = b
|
||||||
|
}
|
||||||
|
func (h *noopDrv) end() {
|
||||||
|
// println("end: ", len(h.mks)-1)
|
||||||
|
h.mks = h.mks[:len(h.mks)-1]
|
||||||
|
if len(h.mks) > 0 {
|
||||||
|
h.mk = h.mks[len(h.mks)-1]
|
||||||
|
} else {
|
||||||
|
h.mk = false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *noopDrv) EncodeBuiltin(rt uintptr, v interface{}) {}
|
||||||
|
func (h *noopDrv) EncodeNil() {}
|
||||||
|
func (h *noopDrv) EncodeInt(i int64) {}
|
||||||
|
func (h *noopDrv) EncodeUint(i uint64) {}
|
||||||
|
func (h *noopDrv) EncodeBool(b bool) {}
|
||||||
|
func (h *noopDrv) EncodeFloat32(f float32) {}
|
||||||
|
func (h *noopDrv) EncodeFloat64(f float64) {}
|
||||||
|
func (h *noopDrv) EncodeRawExt(re *RawExt, e *Encoder) {}
|
||||||
|
func (h *noopDrv) EncodeArrayStart(length int) { h.start(true) }
|
||||||
|
func (h *noopDrv) EncodeMapStart(length int) { h.start(false) }
|
||||||
|
func (h *noopDrv) EncodeEnd() { h.end() }
|
||||||
|
|
||||||
|
func (h *noopDrv) EncodeString(c charEncoding, v string) {}
|
||||||
|
func (h *noopDrv) EncodeSymbol(v string) {}
|
||||||
|
func (h *noopDrv) EncodeStringBytes(c charEncoding, v []byte) {}
|
||||||
|
|
||||||
|
func (h *noopDrv) EncodeExt(rv interface{}, xtag uint64, ext Ext, e *Encoder) {}
|
||||||
|
|
||||||
|
// ---- decDriver
|
||||||
|
func (h *noopDrv) initReadNext() {}
|
||||||
|
func (h *noopDrv) CheckBreak() bool { return false }
|
||||||
|
func (h *noopDrv) IsBuiltinType(rt uintptr) bool { return false }
|
||||||
|
func (h *noopDrv) DecodeBuiltin(rt uintptr, v interface{}) {}
|
||||||
|
func (h *noopDrv) DecodeInt(bitsize uint8) (i int64) { return int64(h.m(15)) }
|
||||||
|
func (h *noopDrv) DecodeUint(bitsize uint8) (ui uint64) { return uint64(h.m(35)) }
|
||||||
|
func (h *noopDrv) DecodeFloat(chkOverflow32 bool) (f float64) { return float64(h.m(95)) }
|
||||||
|
func (h *noopDrv) DecodeBool() (b bool) { return h.m(2) == 0 }
|
||||||
|
func (h *noopDrv) DecodeString() (s string) { return h.S[h.m(8)] }
|
||||||
|
func (h *noopDrv) DecodeStringAsBytes() []byte { return h.DecodeBytes(nil, true) }
|
||||||
|
|
||||||
|
func (h *noopDrv) DecodeBytes(bs []byte, zerocopy bool) []byte { return h.B[h.m(len(h.B))] }
|
||||||
|
|
||||||
|
func (h *noopDrv) ReadEnd() { h.end() }
|
||||||
|
|
||||||
|
// toggle map/slice
|
||||||
|
func (h *noopDrv) ReadMapStart() int { h.start(true); return h.m(10) }
|
||||||
|
func (h *noopDrv) ReadArrayStart() int { h.start(false); return h.m(10) }
|
||||||
|
|
||||||
|
func (h *noopDrv) ContainerType() (vt valueType) {
|
||||||
|
// return h.m(2) == 0
|
||||||
|
// handle kStruct, which will bomb is it calls this and doesn't get back a map or array.
|
||||||
|
// consequently, if the return value is not map or array, reset it to one of them based on h.m(7) % 2
|
||||||
|
// for kstruct: at least one out of every 2 times, return one of valueTypeMap or Array (else kstruct bombs)
|
||||||
|
// however, every 10th time it is called, we just return something else.
|
||||||
|
var vals = [...]valueType{valueTypeArray, valueTypeMap}
|
||||||
|
// ------------ TAKE ------------
|
||||||
|
// if h.cb%2 == 0 {
|
||||||
|
// if h.ct == valueTypeMap || h.ct == valueTypeArray {
|
||||||
|
// } else {
|
||||||
|
// h.ct = vals[h.m(2)]
|
||||||
|
// }
|
||||||
|
// } else if h.cb%5 == 0 {
|
||||||
|
// h.ct = valueType(h.m(8))
|
||||||
|
// } else {
|
||||||
|
// h.ct = vals[h.m(2)]
|
||||||
|
// }
|
||||||
|
// ------------ TAKE ------------
|
||||||
|
// if h.cb%16 == 0 {
|
||||||
|
// h.ct = valueType(h.cb % 8)
|
||||||
|
// } else {
|
||||||
|
// h.ct = vals[h.cb%2]
|
||||||
|
// }
|
||||||
|
h.ct = vals[h.cb%2]
|
||||||
|
h.cb++
|
||||||
|
return h.ct
|
||||||
|
|
||||||
|
// if h.ct == valueTypeNil || h.ct == valueTypeString || h.ct == valueTypeBytes {
|
||||||
|
// return h.ct
|
||||||
|
// }
|
||||||
|
// return valueTypeUnset
|
||||||
|
// TODO: may need to tweak this so it works.
|
||||||
|
// if h.ct == valueTypeMap && vt == valueTypeArray || h.ct == valueTypeArray && vt == valueTypeMap {
|
||||||
|
// h.cb = !h.cb
|
||||||
|
// h.ct = vt
|
||||||
|
// return h.cb
|
||||||
|
// }
|
||||||
|
// // go in a loop and check it.
|
||||||
|
// h.ct = vt
|
||||||
|
// h.cb = h.m(7) == 0
|
||||||
|
// return h.cb
|
||||||
|
}
|
||||||
|
func (h *noopDrv) TryDecodeAsNil() bool {
|
||||||
|
if h.mk {
|
||||||
|
return false
|
||||||
|
} else {
|
||||||
|
return h.m(8) == 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func (h *noopDrv) DecodeExt(rv interface{}, xtag uint64, ext Ext) uint64 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *noopDrv) DecodeNaked() {
|
||||||
|
// use h.r (random) not h.m() because h.m() could cause the same value to be given.
|
||||||
|
var sk int
|
||||||
|
if h.mk {
|
||||||
|
// if mapkey, do not support values of nil OR bytes, array, map or rawext
|
||||||
|
sk = h.r(7) + 1
|
||||||
|
} else {
|
||||||
|
sk = h.r(12)
|
||||||
|
}
|
||||||
|
n := &h.d.n
|
||||||
|
switch sk {
|
||||||
|
case 0:
|
||||||
|
n.v = valueTypeNil
|
||||||
|
case 1:
|
||||||
|
n.v, n.b = valueTypeBool, false
|
||||||
|
case 2:
|
||||||
|
n.v, n.b = valueTypeBool, true
|
||||||
|
case 3:
|
||||||
|
n.v, n.i = valueTypeInt, h.DecodeInt(64)
|
||||||
|
case 4:
|
||||||
|
n.v, n.u = valueTypeUint, h.DecodeUint(64)
|
||||||
|
case 5:
|
||||||
|
n.v, n.f = valueTypeFloat, h.DecodeFloat(true)
|
||||||
|
case 6:
|
||||||
|
n.v, n.f = valueTypeFloat, h.DecodeFloat(false)
|
||||||
|
case 7:
|
||||||
|
n.v, n.s = valueTypeString, h.DecodeString()
|
||||||
|
case 8:
|
||||||
|
n.v, n.l = valueTypeBytes, h.B[h.m(len(h.B))]
|
||||||
|
case 9:
|
||||||
|
n.v = valueTypeArray
|
||||||
|
case 10:
|
||||||
|
n.v = valueTypeMap
|
||||||
|
default:
|
||||||
|
n.v = valueTypeExt
|
||||||
|
n.u = h.DecodeUint(64)
|
||||||
|
n.l = h.B[h.m(len(h.B))]
|
||||||
|
}
|
||||||
|
h.ct = n.v
|
||||||
|
return
|
||||||
|
}
|
187
vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/rpc.go
generated
vendored
Normal file
187
vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/rpc.go
generated
vendored
Normal file
@ -0,0 +1,187 @@
|
|||||||
|
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT license found in the LICENSE file.
|
||||||
|
|
||||||
|
package codec
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"errors"
|
||||||
|
"io"
|
||||||
|
"net/rpc"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
// // rpcEncodeTerminator allows a handler specify a []byte terminator to send after each Encode.
|
||||||
|
// //
|
||||||
|
// // Some codecs like json need to put a space after each encoded value, to serve as a
|
||||||
|
// // delimiter for things like numbers (else json codec will continue reading till EOF).
|
||||||
|
// type rpcEncodeTerminator interface {
|
||||||
|
// rpcEncodeTerminate() []byte
|
||||||
|
// }
|
||||||
|
|
||||||
|
// Rpc provides a rpc Server or Client Codec for rpc communication.
|
||||||
|
type Rpc interface {
|
||||||
|
ServerCodec(conn io.ReadWriteCloser, h Handle) rpc.ServerCodec
|
||||||
|
ClientCodec(conn io.ReadWriteCloser, h Handle) rpc.ClientCodec
|
||||||
|
}
|
||||||
|
|
||||||
|
// RpcCodecBuffered allows access to the underlying bufio.Reader/Writer
|
||||||
|
// used by the rpc connection. It accommodates use-cases where the connection
|
||||||
|
// should be used by rpc and non-rpc functions, e.g. streaming a file after
|
||||||
|
// sending an rpc response.
|
||||||
|
type RpcCodecBuffered interface {
|
||||||
|
BufferedReader() *bufio.Reader
|
||||||
|
BufferedWriter() *bufio.Writer
|
||||||
|
}
|
||||||
|
|
||||||
|
// -------------------------------------
|
||||||
|
|
||||||
|
// rpcCodec defines the struct members and common methods.
|
||||||
|
type rpcCodec struct {
|
||||||
|
rwc io.ReadWriteCloser
|
||||||
|
dec *Decoder
|
||||||
|
enc *Encoder
|
||||||
|
bw *bufio.Writer
|
||||||
|
br *bufio.Reader
|
||||||
|
mu sync.Mutex
|
||||||
|
h Handle
|
||||||
|
|
||||||
|
cls bool
|
||||||
|
clsmu sync.RWMutex
|
||||||
|
}
|
||||||
|
|
||||||
|
func newRPCCodec(conn io.ReadWriteCloser, h Handle) rpcCodec {
|
||||||
|
bw := bufio.NewWriter(conn)
|
||||||
|
br := bufio.NewReader(conn)
|
||||||
|
|
||||||
|
// defensive: ensure that jsonH has TermWhitespace turned on.
|
||||||
|
if jsonH, ok := h.(*JsonHandle); ok && !jsonH.TermWhitespace {
|
||||||
|
panic(errors.New("rpc requires a JsonHandle with TermWhitespace set to true"))
|
||||||
|
}
|
||||||
|
|
||||||
|
return rpcCodec{
|
||||||
|
rwc: conn,
|
||||||
|
bw: bw,
|
||||||
|
br: br,
|
||||||
|
enc: NewEncoder(bw, h),
|
||||||
|
dec: NewDecoder(br, h),
|
||||||
|
h: h,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *rpcCodec) BufferedReader() *bufio.Reader {
|
||||||
|
return c.br
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *rpcCodec) BufferedWriter() *bufio.Writer {
|
||||||
|
return c.bw
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *rpcCodec) write(obj1, obj2 interface{}, writeObj2, doFlush bool) (err error) {
|
||||||
|
if c.isClosed() {
|
||||||
|
return io.EOF
|
||||||
|
}
|
||||||
|
if err = c.enc.Encode(obj1); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// t, tOk := c.h.(rpcEncodeTerminator)
|
||||||
|
// if tOk {
|
||||||
|
// c.bw.Write(t.rpcEncodeTerminate())
|
||||||
|
// }
|
||||||
|
if writeObj2 {
|
||||||
|
if err = c.enc.Encode(obj2); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// if tOk {
|
||||||
|
// c.bw.Write(t.rpcEncodeTerminate())
|
||||||
|
// }
|
||||||
|
}
|
||||||
|
if doFlush {
|
||||||
|
return c.bw.Flush()
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *rpcCodec) read(obj interface{}) (err error) {
|
||||||
|
if c.isClosed() {
|
||||||
|
return io.EOF
|
||||||
|
}
|
||||||
|
//If nil is passed in, we should still attempt to read content to nowhere.
|
||||||
|
if obj == nil {
|
||||||
|
var obj2 interface{}
|
||||||
|
return c.dec.Decode(&obj2)
|
||||||
|
}
|
||||||
|
return c.dec.Decode(obj)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *rpcCodec) isClosed() bool {
|
||||||
|
c.clsmu.RLock()
|
||||||
|
x := c.cls
|
||||||
|
c.clsmu.RUnlock()
|
||||||
|
return x
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *rpcCodec) Close() error {
|
||||||
|
if c.isClosed() {
|
||||||
|
return io.EOF
|
||||||
|
}
|
||||||
|
c.clsmu.Lock()
|
||||||
|
c.cls = true
|
||||||
|
c.clsmu.Unlock()
|
||||||
|
return c.rwc.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *rpcCodec) ReadResponseBody(body interface{}) error {
|
||||||
|
return c.read(body)
|
||||||
|
}
|
||||||
|
|
||||||
|
// -------------------------------------
|
||||||
|
|
||||||
|
type goRpcCodec struct {
|
||||||
|
rpcCodec
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *goRpcCodec) WriteRequest(r *rpc.Request, body interface{}) error {
|
||||||
|
// Must protect for concurrent access as per API
|
||||||
|
c.mu.Lock()
|
||||||
|
defer c.mu.Unlock()
|
||||||
|
return c.write(r, body, true, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *goRpcCodec) WriteResponse(r *rpc.Response, body interface{}) error {
|
||||||
|
c.mu.Lock()
|
||||||
|
defer c.mu.Unlock()
|
||||||
|
return c.write(r, body, true, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *goRpcCodec) ReadResponseHeader(r *rpc.Response) error {
|
||||||
|
return c.read(r)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *goRpcCodec) ReadRequestHeader(r *rpc.Request) error {
|
||||||
|
return c.read(r)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *goRpcCodec) ReadRequestBody(body interface{}) error {
|
||||||
|
return c.read(body)
|
||||||
|
}
|
||||||
|
|
||||||
|
// -------------------------------------
|
||||||
|
|
||||||
|
// goRpc is the implementation of Rpc that uses the communication protocol
|
||||||
|
// as defined in net/rpc package.
|
||||||
|
type goRpc struct{}
|
||||||
|
|
||||||
|
// GoRpc implements Rpc using the communication protocol defined in net/rpc package.
|
||||||
|
// Its methods (ServerCodec and ClientCodec) return values that implement RpcCodecBuffered.
|
||||||
|
var GoRpc goRpc
|
||||||
|
|
||||||
|
func (x goRpc) ServerCodec(conn io.ReadWriteCloser, h Handle) rpc.ServerCodec {
|
||||||
|
return &goRpcCodec{newRPCCodec(conn, h)}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x goRpc) ClientCodec(conn io.ReadWriteCloser, h Handle) rpc.ClientCodec {
|
||||||
|
return &goRpcCodec{newRPCCodec(conn, h)}
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ RpcCodecBuffered = (*rpcCodec)(nil) // ensure *rpcCodec implements RpcCodecBuffered
|
541
vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/simple.go
generated
vendored
Normal file
541
vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/simple.go
generated
vendored
Normal file
@ -0,0 +1,541 @@
|
|||||||
|
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT license found in the LICENSE file.
|
||||||
|
|
||||||
|
package codec
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math"
|
||||||
|
"reflect"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
_ uint8 = iota
|
||||||
|
simpleVdNil = 1
|
||||||
|
simpleVdFalse = 2
|
||||||
|
simpleVdTrue = 3
|
||||||
|
simpleVdFloat32 = 4
|
||||||
|
simpleVdFloat64 = 5
|
||||||
|
|
||||||
|
// each lasts for 4 (ie n, n+1, n+2, n+3)
|
||||||
|
simpleVdPosInt = 8
|
||||||
|
simpleVdNegInt = 12
|
||||||
|
|
||||||
|
// containers: each lasts for 4 (ie n, n+1, n+2, ... n+7)
|
||||||
|
simpleVdString = 216
|
||||||
|
simpleVdByteArray = 224
|
||||||
|
simpleVdArray = 232
|
||||||
|
simpleVdMap = 240
|
||||||
|
simpleVdExt = 248
|
||||||
|
)
|
||||||
|
|
||||||
|
type simpleEncDriver struct {
|
||||||
|
noBuiltInTypes
|
||||||
|
encDriverNoopContainerWriter
|
||||||
|
// encNoSeparator
|
||||||
|
e *Encoder
|
||||||
|
h *SimpleHandle
|
||||||
|
w encWriter
|
||||||
|
b [8]byte
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *simpleEncDriver) EncodeNil() {
|
||||||
|
e.w.writen1(simpleVdNil)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *simpleEncDriver) EncodeBool(b bool) {
|
||||||
|
if b {
|
||||||
|
e.w.writen1(simpleVdTrue)
|
||||||
|
} else {
|
||||||
|
e.w.writen1(simpleVdFalse)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *simpleEncDriver) EncodeFloat32(f float32) {
|
||||||
|
e.w.writen1(simpleVdFloat32)
|
||||||
|
bigenHelper{e.b[:4], e.w}.writeUint32(math.Float32bits(f))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *simpleEncDriver) EncodeFloat64(f float64) {
|
||||||
|
e.w.writen1(simpleVdFloat64)
|
||||||
|
bigenHelper{e.b[:8], e.w}.writeUint64(math.Float64bits(f))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *simpleEncDriver) EncodeInt(v int64) {
|
||||||
|
if v < 0 {
|
||||||
|
e.encUint(uint64(-v), simpleVdNegInt)
|
||||||
|
} else {
|
||||||
|
e.encUint(uint64(v), simpleVdPosInt)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *simpleEncDriver) EncodeUint(v uint64) {
|
||||||
|
e.encUint(v, simpleVdPosInt)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *simpleEncDriver) encUint(v uint64, bd uint8) {
|
||||||
|
if v <= math.MaxUint8 {
|
||||||
|
e.w.writen2(bd, uint8(v))
|
||||||
|
} else if v <= math.MaxUint16 {
|
||||||
|
e.w.writen1(bd + 1)
|
||||||
|
bigenHelper{e.b[:2], e.w}.writeUint16(uint16(v))
|
||||||
|
} else if v <= math.MaxUint32 {
|
||||||
|
e.w.writen1(bd + 2)
|
||||||
|
bigenHelper{e.b[:4], e.w}.writeUint32(uint32(v))
|
||||||
|
} else { // if v <= math.MaxUint64 {
|
||||||
|
e.w.writen1(bd + 3)
|
||||||
|
bigenHelper{e.b[:8], e.w}.writeUint64(v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *simpleEncDriver) encLen(bd byte, length int) {
|
||||||
|
if length == 0 {
|
||||||
|
e.w.writen1(bd)
|
||||||
|
} else if length <= math.MaxUint8 {
|
||||||
|
e.w.writen1(bd + 1)
|
||||||
|
e.w.writen1(uint8(length))
|
||||||
|
} else if length <= math.MaxUint16 {
|
||||||
|
e.w.writen1(bd + 2)
|
||||||
|
bigenHelper{e.b[:2], e.w}.writeUint16(uint16(length))
|
||||||
|
} else if int64(length) <= math.MaxUint32 {
|
||||||
|
e.w.writen1(bd + 3)
|
||||||
|
bigenHelper{e.b[:4], e.w}.writeUint32(uint32(length))
|
||||||
|
} else {
|
||||||
|
e.w.writen1(bd + 4)
|
||||||
|
bigenHelper{e.b[:8], e.w}.writeUint64(uint64(length))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *simpleEncDriver) EncodeExt(rv interface{}, xtag uint64, ext Ext, _ *Encoder) {
|
||||||
|
bs := ext.WriteExt(rv)
|
||||||
|
if bs == nil {
|
||||||
|
e.EncodeNil()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
e.encodeExtPreamble(uint8(xtag), len(bs))
|
||||||
|
e.w.writeb(bs)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *simpleEncDriver) EncodeRawExt(re *RawExt, _ *Encoder) {
|
||||||
|
e.encodeExtPreamble(uint8(re.Tag), len(re.Data))
|
||||||
|
e.w.writeb(re.Data)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *simpleEncDriver) encodeExtPreamble(xtag byte, length int) {
|
||||||
|
e.encLen(simpleVdExt, length)
|
||||||
|
e.w.writen1(xtag)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *simpleEncDriver) WriteArrayStart(length int) {
|
||||||
|
e.encLen(simpleVdArray, length)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *simpleEncDriver) WriteMapStart(length int) {
|
||||||
|
e.encLen(simpleVdMap, length)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *simpleEncDriver) EncodeString(c charEncoding, v string) {
|
||||||
|
e.encLen(simpleVdString, len(v))
|
||||||
|
e.w.writestr(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *simpleEncDriver) EncodeSymbol(v string) {
|
||||||
|
e.EncodeString(c_UTF8, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *simpleEncDriver) EncodeStringBytes(c charEncoding, v []byte) {
|
||||||
|
e.encLen(simpleVdByteArray, len(v))
|
||||||
|
e.w.writeb(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
//------------------------------------
|
||||||
|
|
||||||
|
type simpleDecDriver struct {
|
||||||
|
d *Decoder
|
||||||
|
h *SimpleHandle
|
||||||
|
r decReader
|
||||||
|
bdRead bool
|
||||||
|
bd byte
|
||||||
|
br bool // bytes reader
|
||||||
|
b [scratchByteArrayLen]byte
|
||||||
|
noBuiltInTypes
|
||||||
|
// noStreamingCodec
|
||||||
|
decDriverNoopContainerReader
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *simpleDecDriver) readNextBd() {
|
||||||
|
d.bd = d.r.readn1()
|
||||||
|
d.bdRead = true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *simpleDecDriver) uncacheRead() {
|
||||||
|
if d.bdRead {
|
||||||
|
d.r.unreadn1()
|
||||||
|
d.bdRead = false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *simpleDecDriver) ContainerType() (vt valueType) {
|
||||||
|
if !d.bdRead {
|
||||||
|
d.readNextBd()
|
||||||
|
}
|
||||||
|
if d.bd == simpleVdNil {
|
||||||
|
return valueTypeNil
|
||||||
|
} else if d.bd == simpleVdByteArray || d.bd == simpleVdByteArray+1 ||
|
||||||
|
d.bd == simpleVdByteArray+2 || d.bd == simpleVdByteArray+3 || d.bd == simpleVdByteArray+4 {
|
||||||
|
return valueTypeBytes
|
||||||
|
} else if d.bd == simpleVdString || d.bd == simpleVdString+1 ||
|
||||||
|
d.bd == simpleVdString+2 || d.bd == simpleVdString+3 || d.bd == simpleVdString+4 {
|
||||||
|
return valueTypeString
|
||||||
|
} else if d.bd == simpleVdArray || d.bd == simpleVdArray+1 ||
|
||||||
|
d.bd == simpleVdArray+2 || d.bd == simpleVdArray+3 || d.bd == simpleVdArray+4 {
|
||||||
|
return valueTypeArray
|
||||||
|
} else if d.bd == simpleVdMap || d.bd == simpleVdMap+1 ||
|
||||||
|
d.bd == simpleVdMap+2 || d.bd == simpleVdMap+3 || d.bd == simpleVdMap+4 {
|
||||||
|
return valueTypeMap
|
||||||
|
} else {
|
||||||
|
// d.d.errorf("isContainerType: unsupported parameter: %v", vt)
|
||||||
|
}
|
||||||
|
return valueTypeUnset
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *simpleDecDriver) TryDecodeAsNil() bool {
|
||||||
|
if !d.bdRead {
|
||||||
|
d.readNextBd()
|
||||||
|
}
|
||||||
|
if d.bd == simpleVdNil {
|
||||||
|
d.bdRead = false
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *simpleDecDriver) decCheckInteger() (ui uint64, neg bool) {
|
||||||
|
if !d.bdRead {
|
||||||
|
d.readNextBd()
|
||||||
|
}
|
||||||
|
switch d.bd {
|
||||||
|
case simpleVdPosInt:
|
||||||
|
ui = uint64(d.r.readn1())
|
||||||
|
case simpleVdPosInt + 1:
|
||||||
|
ui = uint64(bigen.Uint16(d.r.readx(2)))
|
||||||
|
case simpleVdPosInt + 2:
|
||||||
|
ui = uint64(bigen.Uint32(d.r.readx(4)))
|
||||||
|
case simpleVdPosInt + 3:
|
||||||
|
ui = uint64(bigen.Uint64(d.r.readx(8)))
|
||||||
|
case simpleVdNegInt:
|
||||||
|
ui = uint64(d.r.readn1())
|
||||||
|
neg = true
|
||||||
|
case simpleVdNegInt + 1:
|
||||||
|
ui = uint64(bigen.Uint16(d.r.readx(2)))
|
||||||
|
neg = true
|
||||||
|
case simpleVdNegInt + 2:
|
||||||
|
ui = uint64(bigen.Uint32(d.r.readx(4)))
|
||||||
|
neg = true
|
||||||
|
case simpleVdNegInt + 3:
|
||||||
|
ui = uint64(bigen.Uint64(d.r.readx(8)))
|
||||||
|
neg = true
|
||||||
|
default:
|
||||||
|
d.d.errorf("decIntAny: Integer only valid from pos/neg integer1..8. Invalid descriptor: %v", d.bd)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// don't do this check, because callers may only want the unsigned value.
|
||||||
|
// if ui > math.MaxInt64 {
|
||||||
|
// d.d.errorf("decIntAny: Integer out of range for signed int64: %v", ui)
|
||||||
|
// return
|
||||||
|
// }
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *simpleDecDriver) DecodeInt(bitsize uint8) (i int64) {
|
||||||
|
ui, neg := d.decCheckInteger()
|
||||||
|
i, overflow := chkOvf.SignedInt(ui)
|
||||||
|
if overflow {
|
||||||
|
d.d.errorf("simple: overflow converting %v to signed integer", ui)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if neg {
|
||||||
|
i = -i
|
||||||
|
}
|
||||||
|
if chkOvf.Int(i, bitsize) {
|
||||||
|
d.d.errorf("simple: overflow integer: %v", i)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
d.bdRead = false
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *simpleDecDriver) DecodeUint(bitsize uint8) (ui uint64) {
|
||||||
|
ui, neg := d.decCheckInteger()
|
||||||
|
if neg {
|
||||||
|
d.d.errorf("Assigning negative signed value to unsigned type")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if chkOvf.Uint(ui, bitsize) {
|
||||||
|
d.d.errorf("simple: overflow integer: %v", ui)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
d.bdRead = false
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *simpleDecDriver) DecodeFloat(chkOverflow32 bool) (f float64) {
|
||||||
|
if !d.bdRead {
|
||||||
|
d.readNextBd()
|
||||||
|
}
|
||||||
|
if d.bd == simpleVdFloat32 {
|
||||||
|
f = float64(math.Float32frombits(bigen.Uint32(d.r.readx(4))))
|
||||||
|
} else if d.bd == simpleVdFloat64 {
|
||||||
|
f = math.Float64frombits(bigen.Uint64(d.r.readx(8)))
|
||||||
|
} else {
|
||||||
|
if d.bd >= simpleVdPosInt && d.bd <= simpleVdNegInt+3 {
|
||||||
|
f = float64(d.DecodeInt(64))
|
||||||
|
} else {
|
||||||
|
d.d.errorf("Float only valid from float32/64: Invalid descriptor: %v", d.bd)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if chkOverflow32 && chkOvf.Float32(f) {
|
||||||
|
d.d.errorf("msgpack: float32 overflow: %v", f)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
d.bdRead = false
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// bool can be decoded from bool only (single byte).
|
||||||
|
func (d *simpleDecDriver) DecodeBool() (b bool) {
|
||||||
|
if !d.bdRead {
|
||||||
|
d.readNextBd()
|
||||||
|
}
|
||||||
|
if d.bd == simpleVdTrue {
|
||||||
|
b = true
|
||||||
|
} else if d.bd == simpleVdFalse {
|
||||||
|
} else {
|
||||||
|
d.d.errorf("Invalid single-byte value for bool: %s: %x", msgBadDesc, d.bd)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
d.bdRead = false
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *simpleDecDriver) ReadMapStart() (length int) {
|
||||||
|
if !d.bdRead {
|
||||||
|
d.readNextBd()
|
||||||
|
}
|
||||||
|
d.bdRead = false
|
||||||
|
return d.decLen()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *simpleDecDriver) ReadArrayStart() (length int) {
|
||||||
|
if !d.bdRead {
|
||||||
|
d.readNextBd()
|
||||||
|
}
|
||||||
|
d.bdRead = false
|
||||||
|
return d.decLen()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *simpleDecDriver) decLen() int {
|
||||||
|
switch d.bd % 8 {
|
||||||
|
case 0:
|
||||||
|
return 0
|
||||||
|
case 1:
|
||||||
|
return int(d.r.readn1())
|
||||||
|
case 2:
|
||||||
|
return int(bigen.Uint16(d.r.readx(2)))
|
||||||
|
case 3:
|
||||||
|
ui := uint64(bigen.Uint32(d.r.readx(4)))
|
||||||
|
if chkOvf.Uint(ui, intBitsize) {
|
||||||
|
d.d.errorf("simple: overflow integer: %v", ui)
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return int(ui)
|
||||||
|
case 4:
|
||||||
|
ui := bigen.Uint64(d.r.readx(8))
|
||||||
|
if chkOvf.Uint(ui, intBitsize) {
|
||||||
|
d.d.errorf("simple: overflow integer: %v", ui)
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return int(ui)
|
||||||
|
}
|
||||||
|
d.d.errorf("decLen: Cannot read length: bd%%8 must be in range 0..4. Got: %d", d.bd%8)
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *simpleDecDriver) DecodeString() (s string) {
|
||||||
|
return string(d.DecodeBytes(d.b[:], true))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *simpleDecDriver) DecodeStringAsBytes() (s []byte) {
|
||||||
|
return d.DecodeBytes(d.b[:], true)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *simpleDecDriver) DecodeBytes(bs []byte, zerocopy bool) (bsOut []byte) {
|
||||||
|
if !d.bdRead {
|
||||||
|
d.readNextBd()
|
||||||
|
}
|
||||||
|
if d.bd == simpleVdNil {
|
||||||
|
d.bdRead = false
|
||||||
|
return
|
||||||
|
}
|
||||||
|
clen := d.decLen()
|
||||||
|
d.bdRead = false
|
||||||
|
if zerocopy {
|
||||||
|
if d.br {
|
||||||
|
return d.r.readx(clen)
|
||||||
|
} else if len(bs) == 0 {
|
||||||
|
bs = d.b[:]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return decByteSlice(d.r, clen, d.d.h.MaxInitLen, bs)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *simpleDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) {
|
||||||
|
if xtag > 0xff {
|
||||||
|
d.d.errorf("decodeExt: tag must be <= 0xff; got: %v", xtag)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
realxtag1, xbs := d.decodeExtV(ext != nil, uint8(xtag))
|
||||||
|
realxtag = uint64(realxtag1)
|
||||||
|
if ext == nil {
|
||||||
|
re := rv.(*RawExt)
|
||||||
|
re.Tag = realxtag
|
||||||
|
re.Data = detachZeroCopyBytes(d.br, re.Data, xbs)
|
||||||
|
} else {
|
||||||
|
ext.ReadExt(rv, xbs)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *simpleDecDriver) decodeExtV(verifyTag bool, tag byte) (xtag byte, xbs []byte) {
|
||||||
|
if !d.bdRead {
|
||||||
|
d.readNextBd()
|
||||||
|
}
|
||||||
|
switch d.bd {
|
||||||
|
case simpleVdExt, simpleVdExt + 1, simpleVdExt + 2, simpleVdExt + 3, simpleVdExt + 4:
|
||||||
|
l := d.decLen()
|
||||||
|
xtag = d.r.readn1()
|
||||||
|
if verifyTag && xtag != tag {
|
||||||
|
d.d.errorf("Wrong extension tag. Got %b. Expecting: %v", xtag, tag)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
xbs = d.r.readx(l)
|
||||||
|
case simpleVdByteArray, simpleVdByteArray + 1, simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4:
|
||||||
|
xbs = d.DecodeBytes(nil, true)
|
||||||
|
default:
|
||||||
|
d.d.errorf("Invalid d.bd for extensions (Expecting extensions or byte array). Got: 0x%x", d.bd)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
d.bdRead = false
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *simpleDecDriver) DecodeNaked() {
|
||||||
|
if !d.bdRead {
|
||||||
|
d.readNextBd()
|
||||||
|
}
|
||||||
|
|
||||||
|
n := d.d.n
|
||||||
|
var decodeFurther bool
|
||||||
|
|
||||||
|
switch d.bd {
|
||||||
|
case simpleVdNil:
|
||||||
|
n.v = valueTypeNil
|
||||||
|
case simpleVdFalse:
|
||||||
|
n.v = valueTypeBool
|
||||||
|
n.b = false
|
||||||
|
case simpleVdTrue:
|
||||||
|
n.v = valueTypeBool
|
||||||
|
n.b = true
|
||||||
|
case simpleVdPosInt, simpleVdPosInt + 1, simpleVdPosInt + 2, simpleVdPosInt + 3:
|
||||||
|
if d.h.SignedInteger {
|
||||||
|
n.v = valueTypeInt
|
||||||
|
n.i = d.DecodeInt(64)
|
||||||
|
} else {
|
||||||
|
n.v = valueTypeUint
|
||||||
|
n.u = d.DecodeUint(64)
|
||||||
|
}
|
||||||
|
case simpleVdNegInt, simpleVdNegInt + 1, simpleVdNegInt + 2, simpleVdNegInt + 3:
|
||||||
|
n.v = valueTypeInt
|
||||||
|
n.i = d.DecodeInt(64)
|
||||||
|
case simpleVdFloat32:
|
||||||
|
n.v = valueTypeFloat
|
||||||
|
n.f = d.DecodeFloat(true)
|
||||||
|
case simpleVdFloat64:
|
||||||
|
n.v = valueTypeFloat
|
||||||
|
n.f = d.DecodeFloat(false)
|
||||||
|
case simpleVdString, simpleVdString + 1, simpleVdString + 2, simpleVdString + 3, simpleVdString + 4:
|
||||||
|
n.v = valueTypeString
|
||||||
|
n.s = d.DecodeString()
|
||||||
|
case simpleVdByteArray, simpleVdByteArray + 1, simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4:
|
||||||
|
n.v = valueTypeBytes
|
||||||
|
n.l = d.DecodeBytes(nil, false)
|
||||||
|
case simpleVdExt, simpleVdExt + 1, simpleVdExt + 2, simpleVdExt + 3, simpleVdExt + 4:
|
||||||
|
n.v = valueTypeExt
|
||||||
|
l := d.decLen()
|
||||||
|
n.u = uint64(d.r.readn1())
|
||||||
|
n.l = d.r.readx(l)
|
||||||
|
case simpleVdArray, simpleVdArray + 1, simpleVdArray + 2, simpleVdArray + 3, simpleVdArray + 4:
|
||||||
|
n.v = valueTypeArray
|
||||||
|
decodeFurther = true
|
||||||
|
case simpleVdMap, simpleVdMap + 1, simpleVdMap + 2, simpleVdMap + 3, simpleVdMap + 4:
|
||||||
|
n.v = valueTypeMap
|
||||||
|
decodeFurther = true
|
||||||
|
default:
|
||||||
|
d.d.errorf("decodeNaked: Unrecognized d.bd: 0x%x", d.bd)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !decodeFurther {
|
||||||
|
d.bdRead = false
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
//------------------------------------
|
||||||
|
|
||||||
|
// SimpleHandle is a Handle for a very simple encoding format.
|
||||||
|
//
|
||||||
|
// simple is a simplistic codec similar to binc, but not as compact.
|
||||||
|
// - Encoding of a value is always preceded by the descriptor byte (bd)
|
||||||
|
// - True, false, nil are encoded fully in 1 byte (the descriptor)
|
||||||
|
// - Integers (intXXX, uintXXX) are encoded in 1, 2, 4 or 8 bytes (plus a descriptor byte).
|
||||||
|
// There are positive (uintXXX and intXXX >= 0) and negative (intXXX < 0) integers.
|
||||||
|
// - Floats are encoded in 4 or 8 bytes (plus a descriptor byte)
|
||||||
|
// - Lenght of containers (strings, bytes, array, map, extensions)
|
||||||
|
// are encoded in 0, 1, 2, 4 or 8 bytes.
|
||||||
|
// Zero-length containers have no length encoded.
|
||||||
|
// For others, the number of bytes is given by pow(2, bd%3)
|
||||||
|
// - maps are encoded as [bd] [length] [[key][value]]...
|
||||||
|
// - arrays are encoded as [bd] [length] [value]...
|
||||||
|
// - extensions are encoded as [bd] [length] [tag] [byte]...
|
||||||
|
// - strings/bytearrays are encoded as [bd] [length] [byte]...
|
||||||
|
//
|
||||||
|
// The full spec will be published soon.
|
||||||
|
type SimpleHandle struct {
|
||||||
|
BasicHandle
|
||||||
|
binaryEncodingType
|
||||||
|
noElemSeparators
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *SimpleHandle) SetBytesExt(rt reflect.Type, tag uint64, ext BytesExt) (err error) {
|
||||||
|
return h.SetExt(rt, tag, &setExtWrapper{b: ext})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *SimpleHandle) newEncDriver(e *Encoder) encDriver {
|
||||||
|
return &simpleEncDriver{e: e, w: e.w, h: h}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *SimpleHandle) newDecDriver(d *Decoder) decDriver {
|
||||||
|
return &simpleDecDriver{d: d, h: h, r: d.r, br: d.bytes}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *simpleEncDriver) reset() {
|
||||||
|
e.w = e.e.w
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *simpleDecDriver) reset() {
|
||||||
|
d.r, d.br = d.d.r, d.d.bytes
|
||||||
|
d.bd, d.bdRead = 0, false
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ decDriver = (*simpleDecDriver)(nil)
|
||||||
|
var _ encDriver = (*simpleEncDriver)(nil)
|
220
vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/time.go
generated
vendored
Normal file
220
vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/time.go
generated
vendored
Normal file
@ -0,0 +1,220 @@
|
|||||||
|
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT license found in the LICENSE file.
|
||||||
|
|
||||||
|
package codec
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
var timeDigits = [...]byte{'0', '1', '2', '3', '4', '5', '6', '7', '8', '9'}
|
||||||
|
|
||||||
|
type timeExt struct{}
|
||||||
|
|
||||||
|
func (x timeExt) WriteExt(v interface{}) (bs []byte) {
|
||||||
|
switch v2 := v.(type) {
|
||||||
|
case time.Time:
|
||||||
|
bs = encodeTime(v2)
|
||||||
|
case *time.Time:
|
||||||
|
bs = encodeTime(*v2)
|
||||||
|
default:
|
||||||
|
panic(fmt.Errorf("unsupported format for time conversion: expecting time.Time; got %T", v2))
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
func (x timeExt) ReadExt(v interface{}, bs []byte) {
|
||||||
|
tt, err := decodeTime(bs)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
*(v.(*time.Time)) = tt
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x timeExt) ConvertExt(v interface{}) interface{} {
|
||||||
|
return x.WriteExt(v)
|
||||||
|
}
|
||||||
|
func (x timeExt) UpdateExt(v interface{}, src interface{}) {
|
||||||
|
x.ReadExt(v, src.([]byte))
|
||||||
|
}
|
||||||
|
|
||||||
|
// EncodeTime encodes a time.Time as a []byte, including
|
||||||
|
// information on the instant in time and UTC offset.
|
||||||
|
//
|
||||||
|
// Format Description
|
||||||
|
//
|
||||||
|
// A timestamp is composed of 3 components:
|
||||||
|
//
|
||||||
|
// - secs: signed integer representing seconds since unix epoch
|
||||||
|
// - nsces: unsigned integer representing fractional seconds as a
|
||||||
|
// nanosecond offset within secs, in the range 0 <= nsecs < 1e9
|
||||||
|
// - tz: signed integer representing timezone offset in minutes east of UTC,
|
||||||
|
// and a dst (daylight savings time) flag
|
||||||
|
//
|
||||||
|
// When encoding a timestamp, the first byte is the descriptor, which
|
||||||
|
// defines which components are encoded and how many bytes are used to
|
||||||
|
// encode secs and nsecs components. *If secs/nsecs is 0 or tz is UTC, it
|
||||||
|
// is not encoded in the byte array explicitly*.
|
||||||
|
//
|
||||||
|
// Descriptor 8 bits are of the form `A B C DDD EE`:
|
||||||
|
// A: Is secs component encoded? 1 = true
|
||||||
|
// B: Is nsecs component encoded? 1 = true
|
||||||
|
// C: Is tz component encoded? 1 = true
|
||||||
|
// DDD: Number of extra bytes for secs (range 0-7).
|
||||||
|
// If A = 1, secs encoded in DDD+1 bytes.
|
||||||
|
// If A = 0, secs is not encoded, and is assumed to be 0.
|
||||||
|
// If A = 1, then we need at least 1 byte to encode secs.
|
||||||
|
// DDD says the number of extra bytes beyond that 1.
|
||||||
|
// E.g. if DDD=0, then secs is represented in 1 byte.
|
||||||
|
// if DDD=2, then secs is represented in 3 bytes.
|
||||||
|
// EE: Number of extra bytes for nsecs (range 0-3).
|
||||||
|
// If B = 1, nsecs encoded in EE+1 bytes (similar to secs/DDD above)
|
||||||
|
//
|
||||||
|
// Following the descriptor bytes, subsequent bytes are:
|
||||||
|
//
|
||||||
|
// secs component encoded in `DDD + 1` bytes (if A == 1)
|
||||||
|
// nsecs component encoded in `EE + 1` bytes (if B == 1)
|
||||||
|
// tz component encoded in 2 bytes (if C == 1)
|
||||||
|
//
|
||||||
|
// secs and nsecs components are integers encoded in a BigEndian
|
||||||
|
// 2-complement encoding format.
|
||||||
|
//
|
||||||
|
// tz component is encoded as 2 bytes (16 bits). Most significant bit 15 to
|
||||||
|
// Least significant bit 0 are described below:
|
||||||
|
//
|
||||||
|
// Timezone offset has a range of -12:00 to +14:00 (ie -720 to +840 minutes).
|
||||||
|
// Bit 15 = have\_dst: set to 1 if we set the dst flag.
|
||||||
|
// Bit 14 = dst\_on: set to 1 if dst is in effect at the time, or 0 if not.
|
||||||
|
// Bits 13..0 = timezone offset in minutes. It is a signed integer in Big Endian format.
|
||||||
|
//
|
||||||
|
func encodeTime(t time.Time) []byte {
|
||||||
|
//t := rv.Interface().(time.Time)
|
||||||
|
tsecs, tnsecs := t.Unix(), t.Nanosecond()
|
||||||
|
var (
|
||||||
|
bd byte
|
||||||
|
btmp [8]byte
|
||||||
|
bs [16]byte
|
||||||
|
i int = 1
|
||||||
|
)
|
||||||
|
l := t.Location()
|
||||||
|
if l == time.UTC {
|
||||||
|
l = nil
|
||||||
|
}
|
||||||
|
if tsecs != 0 {
|
||||||
|
bd = bd | 0x80
|
||||||
|
bigen.PutUint64(btmp[:], uint64(tsecs))
|
||||||
|
f := pruneSignExt(btmp[:], tsecs >= 0)
|
||||||
|
bd = bd | (byte(7-f) << 2)
|
||||||
|
copy(bs[i:], btmp[f:])
|
||||||
|
i = i + (8 - f)
|
||||||
|
}
|
||||||
|
if tnsecs != 0 {
|
||||||
|
bd = bd | 0x40
|
||||||
|
bigen.PutUint32(btmp[:4], uint32(tnsecs))
|
||||||
|
f := pruneSignExt(btmp[:4], true)
|
||||||
|
bd = bd | byte(3-f)
|
||||||
|
copy(bs[i:], btmp[f:4])
|
||||||
|
i = i + (4 - f)
|
||||||
|
}
|
||||||
|
if l != nil {
|
||||||
|
bd = bd | 0x20
|
||||||
|
// Note that Go Libs do not give access to dst flag.
|
||||||
|
_, zoneOffset := t.Zone()
|
||||||
|
//zoneName, zoneOffset := t.Zone()
|
||||||
|
zoneOffset /= 60
|
||||||
|
z := uint16(zoneOffset)
|
||||||
|
bigen.PutUint16(btmp[:2], z)
|
||||||
|
// clear dst flags
|
||||||
|
bs[i] = btmp[0] & 0x3f
|
||||||
|
bs[i+1] = btmp[1]
|
||||||
|
i = i + 2
|
||||||
|
}
|
||||||
|
bs[0] = bd
|
||||||
|
return bs[0:i]
|
||||||
|
}
|
||||||
|
|
||||||
|
// DecodeTime decodes a []byte into a time.Time.
|
||||||
|
func decodeTime(bs []byte) (tt time.Time, err error) {
|
||||||
|
bd := bs[0]
|
||||||
|
var (
|
||||||
|
tsec int64
|
||||||
|
tnsec uint32
|
||||||
|
tz uint16
|
||||||
|
i byte = 1
|
||||||
|
i2 byte
|
||||||
|
n byte
|
||||||
|
)
|
||||||
|
if bd&(1<<7) != 0 {
|
||||||
|
var btmp [8]byte
|
||||||
|
n = ((bd >> 2) & 0x7) + 1
|
||||||
|
i2 = i + n
|
||||||
|
copy(btmp[8-n:], bs[i:i2])
|
||||||
|
//if first bit of bs[i] is set, then fill btmp[0..8-n] with 0xff (ie sign extend it)
|
||||||
|
if bs[i]&(1<<7) != 0 {
|
||||||
|
copy(btmp[0:8-n], bsAll0xff)
|
||||||
|
//for j,k := byte(0), 8-n; j < k; j++ { btmp[j] = 0xff }
|
||||||
|
}
|
||||||
|
i = i2
|
||||||
|
tsec = int64(bigen.Uint64(btmp[:]))
|
||||||
|
}
|
||||||
|
if bd&(1<<6) != 0 {
|
||||||
|
var btmp [4]byte
|
||||||
|
n = (bd & 0x3) + 1
|
||||||
|
i2 = i + n
|
||||||
|
copy(btmp[4-n:], bs[i:i2])
|
||||||
|
i = i2
|
||||||
|
tnsec = bigen.Uint32(btmp[:])
|
||||||
|
}
|
||||||
|
if bd&(1<<5) == 0 {
|
||||||
|
tt = time.Unix(tsec, int64(tnsec)).UTC()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// In stdlib time.Parse, when a date is parsed without a zone name, it uses "" as zone name.
|
||||||
|
// However, we need name here, so it can be shown when time is printed.
|
||||||
|
// Zone name is in form: UTC-08:00.
|
||||||
|
// Note that Go Libs do not give access to dst flag, so we ignore dst bits
|
||||||
|
|
||||||
|
i2 = i + 2
|
||||||
|
tz = bigen.Uint16(bs[i:i2])
|
||||||
|
i = i2
|
||||||
|
// sign extend sign bit into top 2 MSB (which were dst bits):
|
||||||
|
if tz&(1<<13) == 0 { // positive
|
||||||
|
tz = tz & 0x3fff //clear 2 MSBs: dst bits
|
||||||
|
} else { // negative
|
||||||
|
tz = tz | 0xc000 //set 2 MSBs: dst bits
|
||||||
|
//tzname[3] = '-' (TODO: verify. this works here)
|
||||||
|
}
|
||||||
|
tzint := int16(tz)
|
||||||
|
if tzint == 0 {
|
||||||
|
tt = time.Unix(tsec, int64(tnsec)).UTC()
|
||||||
|
} else {
|
||||||
|
// For Go Time, do not use a descriptive timezone.
|
||||||
|
// It's unnecessary, and makes it harder to do a reflect.DeepEqual.
|
||||||
|
// The Offset already tells what the offset should be, if not on UTC and unknown zone name.
|
||||||
|
// var zoneName = timeLocUTCName(tzint)
|
||||||
|
tt = time.Unix(tsec, int64(tnsec)).In(time.FixedZone("", int(tzint)*60))
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// func timeLocUTCName(tzint int16) string {
|
||||||
|
// if tzint == 0 {
|
||||||
|
// return "UTC"
|
||||||
|
// }
|
||||||
|
// var tzname = []byte("UTC+00:00")
|
||||||
|
// //tzname := fmt.Sprintf("UTC%s%02d:%02d", tzsign, tz/60, tz%60) //perf issue using Sprintf. inline below.
|
||||||
|
// //tzhr, tzmin := tz/60, tz%60 //faster if u convert to int first
|
||||||
|
// var tzhr, tzmin int16
|
||||||
|
// if tzint < 0 {
|
||||||
|
// tzname[3] = '-' // (TODO: verify. this works here)
|
||||||
|
// tzhr, tzmin = -tzint/60, (-tzint)%60
|
||||||
|
// } else {
|
||||||
|
// tzhr, tzmin = tzint/60, tzint%60
|
||||||
|
// }
|
||||||
|
// tzname[4] = timeDigits[tzhr/10]
|
||||||
|
// tzname[5] = timeDigits[tzhr%10]
|
||||||
|
// tzname[7] = timeDigits[tzmin/10]
|
||||||
|
// tzname[8] = timeDigits[tzmin%10]
|
||||||
|
// return string(tzname)
|
||||||
|
// //return time.FixedZone(string(tzname), int(tzint)*60)
|
||||||
|
// }
|
426
vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/xml.go
generated
vendored
Normal file
426
vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/xml.go
generated
vendored
Normal file
@ -0,0 +1,426 @@
|
|||||||
|
// +build ignore
|
||||||
|
|
||||||
|
package codec
|
||||||
|
|
||||||
|
import "reflect"
|
||||||
|
|
||||||
|
/*
|
||||||
|
|
||||||
|
A strict Non-validating namespace-aware XML 1.0 parser and (en|de)coder.
|
||||||
|
|
||||||
|
We are attempting this due to perceived issues with encoding/xml:
|
||||||
|
- Complicated. It tried to do too much, and is not as simple to use as json.
|
||||||
|
- Due to over-engineering, reflection is over-used AND performance suffers:
|
||||||
|
java is 6X faster:http://fabsk.eu/blog/category/informatique/dev/golang/
|
||||||
|
even PYTHON performs better: http://outgoing.typepad.com/outgoing/2014/07/exploring-golang.html
|
||||||
|
|
||||||
|
codec framework will offer the following benefits
|
||||||
|
- VASTLY improved performance (when using reflection-mode or codecgen)
|
||||||
|
- simplicity and consistency: with the rest of the supported formats
|
||||||
|
- all other benefits of codec framework (streaming, codegeneration, etc)
|
||||||
|
|
||||||
|
codec is not a drop-in replacement for encoding/xml.
|
||||||
|
It is a replacement, based on the simplicity and performance of codec.
|
||||||
|
Look at it like JAXB for Go.
|
||||||
|
|
||||||
|
Challenges:
|
||||||
|
|
||||||
|
- Need to output XML preamble, with all namespaces at the right location in the output.
|
||||||
|
- Each "end" block is dynamic, so we need to maintain a context-aware stack
|
||||||
|
- How to decide when to use an attribute VS an element
|
||||||
|
- How to handle chardata, attr, comment EXPLICITLY.
|
||||||
|
- Should it output fragments?
|
||||||
|
e.g. encoding a bool should just output true OR false, which is not well-formed XML.
|
||||||
|
|
||||||
|
Extend the struct tag. See representative example:
|
||||||
|
type X struct {
|
||||||
|
ID uint8 codec:"xid|http://ugorji.net/x-namespace id,omitempty,toarray,attr,cdata"
|
||||||
|
}
|
||||||
|
|
||||||
|
Based on this, we encode
|
||||||
|
- fields as elements, BUT encode as attributes if struct tag contains ",attr".
|
||||||
|
- text as entity-escaped text, BUT encode as CDATA if struct tag contains ",cdata".
|
||||||
|
|
||||||
|
In this mode, we only encode as attribute if ",attr" is found, and only encode as CDATA
|
||||||
|
if ",cdata" is found in the struct tag.
|
||||||
|
|
||||||
|
To handle namespaces:
|
||||||
|
- XMLHandle is denoted as being namespace-aware.
|
||||||
|
Consequently, we WILL use the ns:name pair to encode and decode if defined, else use the plain name.
|
||||||
|
- *Encoder and *Decoder know whether the Handle "prefers" namespaces.
|
||||||
|
- add *Encoder.getEncName(*structFieldInfo).
|
||||||
|
No one calls *structFieldInfo.indexForEncName directly anymore
|
||||||
|
- add *Decoder.getStructFieldInfo(encName string) // encName here is either like abc, or h1:nsabc
|
||||||
|
No one accesses .encName anymore except in
|
||||||
|
- let encode.go and decode.go use these (for consistency)
|
||||||
|
- only problem exists for gen.go, where we create a big switch on encName.
|
||||||
|
Now, we also have to add a switch on strings.endsWith(kName, encNsName)
|
||||||
|
- gen.go will need to have many more methods, and then double-on the 2 switch loops like:
|
||||||
|
switch k {
|
||||||
|
case "abc" : x.abc()
|
||||||
|
case "def" : x.def()
|
||||||
|
default {
|
||||||
|
switch {
|
||||||
|
case !nsAware: panic(...)
|
||||||
|
case strings.endsWith("nsabc"): x.abc()
|
||||||
|
default: panic(...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
The structure below accomodates this:
|
||||||
|
|
||||||
|
type typeInfo struct {
|
||||||
|
sfi []*structFieldInfo // sorted by encName
|
||||||
|
sfins // sorted by namespace
|
||||||
|
sfia // sorted, to have those with attributes at the top. Needed to write XML appropriately.
|
||||||
|
sfip // unsorted
|
||||||
|
}
|
||||||
|
type structFieldInfo struct {
|
||||||
|
encName
|
||||||
|
nsEncName
|
||||||
|
ns string
|
||||||
|
attr bool
|
||||||
|
cdata bool
|
||||||
|
}
|
||||||
|
|
||||||
|
indexForEncName is now an internal helper function that takes a sorted array
|
||||||
|
(one of ti.sfins or ti.sfi). It is only used by *Encoder.getStructFieldInfo(...)
|
||||||
|
|
||||||
|
There will be a separate parser from the builder.
|
||||||
|
The parser will have a method: next() xmlToken method.
|
||||||
|
|
||||||
|
xmlToken has fields:
|
||||||
|
- type uint8: 0 | ElementStart | ElementEnd | AttrKey | AttrVal | Text
|
||||||
|
- value string
|
||||||
|
- ns string
|
||||||
|
|
||||||
|
SEE: http://www.xml.com/pub/a/98/10/guide0.html?page=3#ENTDECL
|
||||||
|
|
||||||
|
The following are skipped when parsing:
|
||||||
|
- External Entities (from external file)
|
||||||
|
- Notation Declaration e.g. <!NOTATION GIF87A SYSTEM "GIF">
|
||||||
|
- Entity Declarations & References
|
||||||
|
- XML Declaration (assume UTF-8)
|
||||||
|
- XML Directive i.e. <! ... >
|
||||||
|
- Other Declarations: Notation, etc.
|
||||||
|
- Comment
|
||||||
|
- Processing Instruction
|
||||||
|
- schema / DTD for validation:
|
||||||
|
We are not a VALIDATING parser. Validation is done elsewhere.
|
||||||
|
However, some parts of the DTD internal subset are used (SEE BELOW).
|
||||||
|
For Attribute List Declarations e.g.
|
||||||
|
<!ATTLIST foo:oldjoke name ID #REQUIRED label CDATA #IMPLIED status ( funny | notfunny ) 'funny' >
|
||||||
|
We considered using the ATTLIST to get "default" value, but not to validate the contents. (VETOED)
|
||||||
|
|
||||||
|
The following XML features are supported
|
||||||
|
- Namespace
|
||||||
|
- Element
|
||||||
|
- Attribute
|
||||||
|
- cdata
|
||||||
|
- Unicode escape
|
||||||
|
|
||||||
|
The following DTD (when as an internal sub-set) features are supported:
|
||||||
|
- Internal Entities e.g.
|
||||||
|
<!ELEMENT burns "ugorji is cool" > AND entities for the set: [<>&"']
|
||||||
|
- Parameter entities e.g.
|
||||||
|
<!ENTITY % personcontent "ugorji is cool"> <!ELEMENT burns (%personcontent;)*>
|
||||||
|
|
||||||
|
At decode time, a structure containing the following is kept
|
||||||
|
- namespace mapping
|
||||||
|
- default attribute values
|
||||||
|
- all internal entities (<>&"' and others written in the document)
|
||||||
|
|
||||||
|
When decode starts, it parses XML namespace declarations and creates a map in the
|
||||||
|
xmlDecDriver. While parsing, that map continously gets updated.
|
||||||
|
The only problem happens when a namespace declaration happens on the node that it defines.
|
||||||
|
e.g. <hn:name xmlns:hn="http://www.ugorji.net" >
|
||||||
|
To handle this, each Element must be fully parsed at a time,
|
||||||
|
even if it amounts to multiple tokens which are returned one at a time on request.
|
||||||
|
|
||||||
|
xmlns is a special attribute name.
|
||||||
|
- It is used to define namespaces, including the default
|
||||||
|
- It is never returned as an AttrKey or AttrVal.
|
||||||
|
*We may decide later to allow user to use it e.g. you want to parse the xmlns mappings into a field.*
|
||||||
|
|
||||||
|
Number, bool, null, mapKey, etc can all be decoded from any xmlToken.
|
||||||
|
This accomodates map[int]string for example.
|
||||||
|
|
||||||
|
It should be possible to create a schema from the types,
|
||||||
|
or vice versa (generate types from schema with appropriate tags).
|
||||||
|
This is however out-of-scope from this parsing project.
|
||||||
|
|
||||||
|
We should write all namespace information at the first point that it is referenced in the tree,
|
||||||
|
and use the mapping for all child nodes and attributes. This means that state is maintained
|
||||||
|
at a point in the tree. This also means that calls to Decode or MustDecode will reset some state.
|
||||||
|
|
||||||
|
When decoding, it is important to keep track of entity references and default attribute values.
|
||||||
|
It seems these can only be stored in the DTD components. We should honor them when decoding.
|
||||||
|
|
||||||
|
Configuration for XMLHandle will look like this:
|
||||||
|
|
||||||
|
XMLHandle
|
||||||
|
DefaultNS string
|
||||||
|
// Encoding:
|
||||||
|
NS map[string]string // ns URI to key, used for encoding
|
||||||
|
// Decoding: in case ENTITY declared in external schema or dtd, store info needed here
|
||||||
|
Entities map[string]string // map of entity rep to character
|
||||||
|
|
||||||
|
|
||||||
|
During encode, if a namespace mapping is not defined for a namespace found on a struct,
|
||||||
|
then we create a mapping for it using nsN (where N is 1..1000000, and doesn't conflict
|
||||||
|
with any other namespace mapping).
|
||||||
|
|
||||||
|
Note that different fields in a struct can have different namespaces.
|
||||||
|
However, all fields will default to the namespace on the _struct field (if defined).
|
||||||
|
|
||||||
|
An XML document is a name, a map of attributes and a list of children.
|
||||||
|
Consequently, we cannot "DecodeNaked" into a map[string]interface{} (for example).
|
||||||
|
We have to "DecodeNaked" into something that resembles XML data.
|
||||||
|
|
||||||
|
To support DecodeNaked (decode into nil interface{}) we have to define some "supporting" types:
|
||||||
|
type Name struct { // Prefered. Less allocations due to conversions.
|
||||||
|
Local string
|
||||||
|
Space string
|
||||||
|
}
|
||||||
|
type Element struct {
|
||||||
|
Name Name
|
||||||
|
Attrs map[Name]string
|
||||||
|
Children []interface{} // each child is either *Element or string
|
||||||
|
}
|
||||||
|
Only two "supporting" types are exposed for XML: Name and Element.
|
||||||
|
|
||||||
|
We considered 'type Name string' where Name is like "Space Local" (space-separated).
|
||||||
|
We decided against it, because each creation of a name would lead to
|
||||||
|
double allocation (first convert []byte to string, then concatenate them into a string).
|
||||||
|
The benefit is that it is faster to read Attrs from a map. But given that Element is a value
|
||||||
|
object, we want to eschew methods and have public exposed variables.
|
||||||
|
|
||||||
|
We also considered the following, where xml types were not value objects, and we used
|
||||||
|
intelligent accessor methods to extract information and for performance.
|
||||||
|
*** WE DECIDED AGAINST THIS. ***
|
||||||
|
type Attr struct {
|
||||||
|
Name Name
|
||||||
|
Value string
|
||||||
|
}
|
||||||
|
// Element is a ValueObject: There are no accessor methods.
|
||||||
|
// Make element self-contained.
|
||||||
|
type Element struct {
|
||||||
|
Name Name
|
||||||
|
attrsMap map[string]string // where key is "Space Local"
|
||||||
|
attrs []Attr
|
||||||
|
childrenT []string
|
||||||
|
childrenE []Element
|
||||||
|
childrenI []int // each child is a index into T or E.
|
||||||
|
}
|
||||||
|
func (x *Element) child(i) interface{} // returns string or *Element
|
||||||
|
|
||||||
|
Per XML spec and our default handling, white space is insignificant between elements,
|
||||||
|
specifically between parent-child or siblings. White space occuring alone between start
|
||||||
|
and end element IS significant. However, if xml:space='preserve', then we 'preserve'
|
||||||
|
all whitespace. This is more critical when doing a DecodeNaked, but MAY not be as critical
|
||||||
|
when decoding into a typed value.
|
||||||
|
|
||||||
|
**Note: there is no xml: namespace. The xml: attributes were defined before namespaces.**
|
||||||
|
**So treat them as just "directives" that should be interpreted to mean something**.
|
||||||
|
|
||||||
|
On encoding, we don't add any prettifying markup (indenting, etc).
|
||||||
|
|
||||||
|
A document or element can only be encoded/decoded from/to a struct. In this mode:
|
||||||
|
- struct name maps to element name (or tag-info from _struct field)
|
||||||
|
- fields are mapped to child elements or attributes
|
||||||
|
|
||||||
|
A map is either encoded as attributes on current element, or as a set of child elements.
|
||||||
|
Maps are encoded as attributes iff their keys and values are primitives (number, bool, string).
|
||||||
|
|
||||||
|
A list is encoded as a set of child elements.
|
||||||
|
|
||||||
|
Primitives (number, bool, string) are encoded as an element, attribute or text
|
||||||
|
depending on the context.
|
||||||
|
|
||||||
|
Extensions must encode themselves as a text string.
|
||||||
|
|
||||||
|
Encoding is tough, specifically when encoding mappings, because we need to encode
|
||||||
|
as either attribute or element. To do this, we need to default to encoding as attributes,
|
||||||
|
and then let Encoder inform the Handle when to start encoding as nodes.
|
||||||
|
i.e. Encoder does something like:
|
||||||
|
|
||||||
|
h.EncodeMapStart()
|
||||||
|
h.Encode(), h.Encode(), ...
|
||||||
|
h.EncodeMapNotAttrSignal() // this is not a bool, because it's a signal
|
||||||
|
h.Encode(), h.Encode(), ...
|
||||||
|
h.EncodeEnd()
|
||||||
|
|
||||||
|
Only XMLHandle understands this, and will set itself to start encoding as elements.
|
||||||
|
|
||||||
|
This support extends to maps. For example, if a struct field is a map, and it has
|
||||||
|
the struct tag signifying it should be attr, then all its fields are encoded as attributes.
|
||||||
|
e.g.
|
||||||
|
|
||||||
|
type X struct {
|
||||||
|
M map[string]int `codec:"m,attr"` // encode as attributes
|
||||||
|
}
|
||||||
|
|
||||||
|
Question:
|
||||||
|
- if encoding a map, what if map keys have spaces in them???
|
||||||
|
Then they cannot be attributes or child elements. Error.
|
||||||
|
|
||||||
|
Misc:
|
||||||
|
|
||||||
|
- For attribute values, normalize by trimming beginning and ending white space,
|
||||||
|
and converting every white space sequence to a single space.
|
||||||
|
- ATTLIST restrictions are enforced.
|
||||||
|
e.g. default value of xml:space, skipping xml:XYZ style attributes, etc.
|
||||||
|
- Consider supporting NON-STRICT mode (e.g. to handle HTML parsing).
|
||||||
|
Some elements e.g. br, hr, etc need not close and should be auto-closed
|
||||||
|
... (see http://www.w3.org/TR/html4/loose.dtd)
|
||||||
|
An expansive set of entities are pre-defined.
|
||||||
|
- Have easy way to create a HTML parser:
|
||||||
|
add a HTML() method to XMLHandle, that will set Strict=false, specify AutoClose,
|
||||||
|
and add HTML Entities to the list.
|
||||||
|
- Support validating element/attribute XMLName before writing it.
|
||||||
|
Keep this behind a flag, which is set to false by default (for performance).
|
||||||
|
type XMLHandle struct {
|
||||||
|
CheckName bool
|
||||||
|
}
|
||||||
|
|
||||||
|
ROADMAP (1 weeks):
|
||||||
|
- build encoder (1 day)
|
||||||
|
- build decoder (based off xmlParser) (1 day)
|
||||||
|
- implement xmlParser (2 days).
|
||||||
|
Look at encoding/xml for inspiration.
|
||||||
|
- integrate and TEST (1 days)
|
||||||
|
- write article and post it (1 day)
|
||||||
|
|
||||||
|
|
||||||
|
*/
|
||||||
|
|
||||||
|
// ----------- PARSER -------------------
|
||||||
|
|
||||||
|
type xmlTokenType uint8
|
||||||
|
|
||||||
|
const (
|
||||||
|
_ xmlTokenType = iota << 1
|
||||||
|
xmlTokenElemStart
|
||||||
|
xmlTokenElemEnd
|
||||||
|
xmlTokenAttrKey
|
||||||
|
xmlTokenAttrVal
|
||||||
|
xmlTokenText
|
||||||
|
)
|
||||||
|
|
||||||
|
type xmlToken struct {
|
||||||
|
Type xmlTokenType
|
||||||
|
Value string
|
||||||
|
Namespace string // blank for AttrVal and Text
|
||||||
|
}
|
||||||
|
|
||||||
|
type xmlParser struct {
|
||||||
|
r decReader
|
||||||
|
toks []xmlToken // list of tokens.
|
||||||
|
ptr int // ptr into the toks slice
|
||||||
|
done bool // nothing else to parse. r now returns EOF.
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *xmlParser) next() (t *xmlToken) {
|
||||||
|
// once x.done, or x.ptr == len(x.toks) == 0, then return nil (to signify finish)
|
||||||
|
if !x.done && len(x.toks) == 0 {
|
||||||
|
x.nextTag()
|
||||||
|
}
|
||||||
|
// parses one element at a time (into possible many tokens)
|
||||||
|
if x.ptr < len(x.toks) {
|
||||||
|
t = &(x.toks[x.ptr])
|
||||||
|
x.ptr++
|
||||||
|
if x.ptr == len(x.toks) {
|
||||||
|
x.ptr = 0
|
||||||
|
x.toks = x.toks[:0]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// nextTag will parses the next element and fill up toks.
|
||||||
|
// It set done flag if/once EOF is reached.
|
||||||
|
func (x *xmlParser) nextTag() {
|
||||||
|
// TODO: implement.
|
||||||
|
}
|
||||||
|
|
||||||
|
// ----------- ENCODER -------------------
|
||||||
|
|
||||||
|
type xmlEncDriver struct {
|
||||||
|
e *Encoder
|
||||||
|
w encWriter
|
||||||
|
h *XMLHandle
|
||||||
|
b [64]byte // scratch
|
||||||
|
bs []byte // scratch
|
||||||
|
// s jsonStack
|
||||||
|
noBuiltInTypes
|
||||||
|
}
|
||||||
|
|
||||||
|
// ----------- DECODER -------------------
|
||||||
|
|
||||||
|
type xmlDecDriver struct {
|
||||||
|
d *Decoder
|
||||||
|
h *XMLHandle
|
||||||
|
r decReader // *bytesDecReader decReader
|
||||||
|
ct valueType // container type. one of unset, array or map.
|
||||||
|
bstr [8]byte // scratch used for string \UXXX parsing
|
||||||
|
b [64]byte // scratch
|
||||||
|
|
||||||
|
// wsSkipped bool // whitespace skipped
|
||||||
|
|
||||||
|
// s jsonStack
|
||||||
|
|
||||||
|
noBuiltInTypes
|
||||||
|
}
|
||||||
|
|
||||||
|
// DecodeNaked will decode into an XMLNode
|
||||||
|
|
||||||
|
// XMLName is a value object representing a namespace-aware NAME
|
||||||
|
type XMLName struct {
|
||||||
|
Local string
|
||||||
|
Space string
|
||||||
|
}
|
||||||
|
|
||||||
|
// XMLNode represents a "union" of the different types of XML Nodes.
|
||||||
|
// Only one of fields (Text or *Element) is set.
|
||||||
|
type XMLNode struct {
|
||||||
|
Element *Element
|
||||||
|
Text string
|
||||||
|
}
|
||||||
|
|
||||||
|
// XMLElement is a value object representing an fully-parsed XML element.
|
||||||
|
type XMLElement struct {
|
||||||
|
Name Name
|
||||||
|
Attrs map[XMLName]string
|
||||||
|
// Children is a list of child nodes, each being a *XMLElement or string
|
||||||
|
Children []XMLNode
|
||||||
|
}
|
||||||
|
|
||||||
|
// ----------- HANDLE -------------------
|
||||||
|
|
||||||
|
type XMLHandle struct {
|
||||||
|
BasicHandle
|
||||||
|
textEncodingType
|
||||||
|
|
||||||
|
DefaultNS string
|
||||||
|
NS map[string]string // ns URI to key, for encoding
|
||||||
|
Entities map[string]string // entity representation to string, for encoding.
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *XMLHandle) newEncDriver(e *Encoder) encDriver {
|
||||||
|
return &xmlEncDriver{e: e, w: e.w, h: h}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *XMLHandle) newDecDriver(d *Decoder) decDriver {
|
||||||
|
// d := xmlDecDriver{r: r.(*bytesDecReader), h: h}
|
||||||
|
hd := xmlDecDriver{d: d, r: d.r, h: h}
|
||||||
|
hd.n.bytes = d.b[:]
|
||||||
|
return &hd
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *XMLHandle) SetInterfaceExt(rt reflect.Type, tag uint64, ext InterfaceExt) (err error) {
|
||||||
|
return h.SetExt(rt, tag, &setExtWrapper{i: ext})
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ decDriver = (*xmlDecDriver)(nil)
|
||||||
|
var _ encDriver = (*xmlEncDriver)(nil)
|
23
vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/z.go
generated
vendored
Normal file
23
vendor/github.com/coreos/etcd/vendor/github.com/ugorji/go/codec/z.go
generated
vendored
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT license found in the LICENSE file.
|
||||||
|
|
||||||
|
package codec
|
||||||
|
|
||||||
|
import "sort"
|
||||||
|
|
||||||
|
// TODO: this is brittle, as it depends on z.go's init() being called last.
|
||||||
|
// The current build tools all honor that files are passed in lexical order.
|
||||||
|
// However, we should consider using an init_channel,
|
||||||
|
// that each person doing init will write to.
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
if !useLookupRecognizedTypes {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
sort.Sort(uintptrSlice(recognizedRtids))
|
||||||
|
sort.Sort(uintptrSlice(recognizedRtidPtrs))
|
||||||
|
recognizedRtidOrPtrs = make([]uintptr, len(recognizedRtids)+len(recognizedRtidPtrs))
|
||||||
|
copy(recognizedRtidOrPtrs, recognizedRtids)
|
||||||
|
copy(recognizedRtidOrPtrs[len(recognizedRtids):], recognizedRtidPtrs)
|
||||||
|
sort.Sort(uintptrSlice(recognizedRtidOrPtrs))
|
||||||
|
}
|
202
vendor/github.com/coreos/etcd/version/LICENSE
generated
vendored
Normal file
202
vendor/github.com/coreos/etcd/version/LICENSE
generated
vendored
Normal file
@ -0,0 +1,202 @@
|
|||||||
|
|
||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
APPENDIX: How to apply the Apache License to your work.
|
||||||
|
|
||||||
|
To apply the Apache License to your work, attach the following
|
||||||
|
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||||
|
replaced with your own identifying information. (Don't include
|
||||||
|
the brackets!) The text should be enclosed in the appropriate
|
||||||
|
comment syntax for the file format. We also recommend that a
|
||||||
|
file or class name and description of purpose be included on the
|
||||||
|
same "printed page" as the copyright notice for easier
|
||||||
|
identification within third-party archives.
|
||||||
|
|
||||||
|
Copyright [yyyy] [name of copyright owner]
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
56
vendor/github.com/coreos/etcd/version/version.go
generated
vendored
Normal file
56
vendor/github.com/coreos/etcd/version/version.go
generated
vendored
Normal file
@ -0,0 +1,56 @@
|
|||||||
|
// Copyright 2015 The etcd Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// Package version implements etcd version parsing and contains latest version
|
||||||
|
// information.
|
||||||
|
package version
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/coreos/go-semver/semver"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// MinClusterVersion is the min cluster version this etcd binary is compatible with.
|
||||||
|
MinClusterVersion = "3.0.0"
|
||||||
|
Version = "3.3.0+git"
|
||||||
|
APIVersion = "unknown"
|
||||||
|
|
||||||
|
// Git SHA Value will be set during build
|
||||||
|
GitSHA = "Not provided (use ./build instead of go build)"
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
ver, err := semver.NewVersion(Version)
|
||||||
|
if err == nil {
|
||||||
|
APIVersion = fmt.Sprintf("%d.%d", ver.Major, ver.Minor)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type Versions struct {
|
||||||
|
Server string `json:"etcdserver"`
|
||||||
|
Cluster string `json:"etcdcluster"`
|
||||||
|
// TODO: raft state machine version
|
||||||
|
}
|
||||||
|
|
||||||
|
// Cluster only keeps the major.minor.
|
||||||
|
func Cluster(v string) string {
|
||||||
|
vs := strings.Split(v, ".")
|
||||||
|
if len(vs) <= 2 {
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%s.%s", vs[0], vs[1])
|
||||||
|
}
|
28
vendor/github.com/fsnotify/fsnotify/LICENSE
generated
vendored
Normal file
28
vendor/github.com/fsnotify/fsnotify/LICENSE
generated
vendored
Normal file
@ -0,0 +1,28 @@
|
|||||||
|
Copyright (c) 2012 The Go Authors. All rights reserved.
|
||||||
|
Copyright (c) 2012 fsnotify Authors. All rights reserved.
|
||||||
|
|
||||||
|
Redistribution and use in source and binary forms, with or without
|
||||||
|
modification, are permitted provided that the following conditions are
|
||||||
|
met:
|
||||||
|
|
||||||
|
* Redistributions of source code must retain the above copyright
|
||||||
|
notice, this list of conditions and the following disclaimer.
|
||||||
|
* Redistributions in binary form must reproduce the above
|
||||||
|
copyright notice, this list of conditions and the following disclaimer
|
||||||
|
in the documentation and/or other materials provided with the
|
||||||
|
distribution.
|
||||||
|
* Neither the name of Google Inc. nor the names of its
|
||||||
|
contributors may be used to endorse or promote products derived from
|
||||||
|
this software without specific prior written permission.
|
||||||
|
|
||||||
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
37
vendor/github.com/fsnotify/fsnotify/fen.go
generated
vendored
Normal file
37
vendor/github.com/fsnotify/fsnotify/fen.go
generated
vendored
Normal file
@ -0,0 +1,37 @@
|
|||||||
|
// Copyright 2010 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build solaris
|
||||||
|
|
||||||
|
package fsnotify
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Watcher watches a set of files, delivering events to a channel.
|
||||||
|
type Watcher struct {
|
||||||
|
Events chan Event
|
||||||
|
Errors chan error
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
|
||||||
|
func NewWatcher() (*Watcher, error) {
|
||||||
|
return nil, errors.New("FEN based watcher not yet supported for fsnotify\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close removes all watches and closes the events channel.
|
||||||
|
func (w *Watcher) Close() error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add starts watching the named file or directory (non-recursively).
|
||||||
|
func (w *Watcher) Add(name string) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove stops watching the the named file or directory (non-recursively).
|
||||||
|
func (w *Watcher) Remove(name string) error {
|
||||||
|
return nil
|
||||||
|
}
|
66
vendor/github.com/fsnotify/fsnotify/fsnotify.go
generated
vendored
Normal file
66
vendor/github.com/fsnotify/fsnotify/fsnotify.go
generated
vendored
Normal file
@ -0,0 +1,66 @@
|
|||||||
|
// Copyright 2012 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build !plan9
|
||||||
|
|
||||||
|
// Package fsnotify provides a platform-independent interface for file system notifications.
|
||||||
|
package fsnotify
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Event represents a single file system notification.
|
||||||
|
type Event struct {
|
||||||
|
Name string // Relative path to the file or directory.
|
||||||
|
Op Op // File operation that triggered the event.
|
||||||
|
}
|
||||||
|
|
||||||
|
// Op describes a set of file operations.
|
||||||
|
type Op uint32
|
||||||
|
|
||||||
|
// These are the generalized file operations that can trigger a notification.
|
||||||
|
const (
|
||||||
|
Create Op = 1 << iota
|
||||||
|
Write
|
||||||
|
Remove
|
||||||
|
Rename
|
||||||
|
Chmod
|
||||||
|
)
|
||||||
|
|
||||||
|
func (op Op) String() string {
|
||||||
|
// Use a buffer for efficient string concatenation
|
||||||
|
var buffer bytes.Buffer
|
||||||
|
|
||||||
|
if op&Create == Create {
|
||||||
|
buffer.WriteString("|CREATE")
|
||||||
|
}
|
||||||
|
if op&Remove == Remove {
|
||||||
|
buffer.WriteString("|REMOVE")
|
||||||
|
}
|
||||||
|
if op&Write == Write {
|
||||||
|
buffer.WriteString("|WRITE")
|
||||||
|
}
|
||||||
|
if op&Rename == Rename {
|
||||||
|
buffer.WriteString("|RENAME")
|
||||||
|
}
|
||||||
|
if op&Chmod == Chmod {
|
||||||
|
buffer.WriteString("|CHMOD")
|
||||||
|
}
|
||||||
|
if buffer.Len() == 0 {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
return buffer.String()[1:] // Strip leading pipe
|
||||||
|
}
|
||||||
|
|
||||||
|
// String returns a string representation of the event in the form
|
||||||
|
// "file: REMOVE|WRITE|..."
|
||||||
|
func (e Event) String() string {
|
||||||
|
return fmt.Sprintf("%q: %s", e.Name, e.Op.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Common errors that can be reported by a watcher
|
||||||
|
var ErrEventOverflow = errors.New("fsnotify queue overflow")
|
337
vendor/github.com/fsnotify/fsnotify/inotify.go
generated
vendored
Normal file
337
vendor/github.com/fsnotify/fsnotify/inotify.go
generated
vendored
Normal file
@ -0,0 +1,337 @@
|
|||||||
|
// Copyright 2010 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build linux
|
||||||
|
|
||||||
|
package fsnotify
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"unsafe"
|
||||||
|
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Watcher watches a set of files, delivering events to a channel.
|
||||||
|
type Watcher struct {
|
||||||
|
Events chan Event
|
||||||
|
Errors chan error
|
||||||
|
mu sync.Mutex // Map access
|
||||||
|
fd int
|
||||||
|
poller *fdPoller
|
||||||
|
watches map[string]*watch // Map of inotify watches (key: path)
|
||||||
|
paths map[int]string // Map of watched paths (key: watch descriptor)
|
||||||
|
done chan struct{} // Channel for sending a "quit message" to the reader goroutine
|
||||||
|
doneResp chan struct{} // Channel to respond to Close
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
|
||||||
|
func NewWatcher() (*Watcher, error) {
|
||||||
|
// Create inotify fd
|
||||||
|
fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC)
|
||||||
|
if fd == -1 {
|
||||||
|
return nil, errno
|
||||||
|
}
|
||||||
|
// Create epoll
|
||||||
|
poller, err := newFdPoller(fd)
|
||||||
|
if err != nil {
|
||||||
|
unix.Close(fd)
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
w := &Watcher{
|
||||||
|
fd: fd,
|
||||||
|
poller: poller,
|
||||||
|
watches: make(map[string]*watch),
|
||||||
|
paths: make(map[int]string),
|
||||||
|
Events: make(chan Event),
|
||||||
|
Errors: make(chan error),
|
||||||
|
done: make(chan struct{}),
|
||||||
|
doneResp: make(chan struct{}),
|
||||||
|
}
|
||||||
|
|
||||||
|
go w.readEvents()
|
||||||
|
return w, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *Watcher) isClosed() bool {
|
||||||
|
select {
|
||||||
|
case <-w.done:
|
||||||
|
return true
|
||||||
|
default:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close removes all watches and closes the events channel.
|
||||||
|
func (w *Watcher) Close() error {
|
||||||
|
if w.isClosed() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Send 'close' signal to goroutine, and set the Watcher to closed.
|
||||||
|
close(w.done)
|
||||||
|
|
||||||
|
// Wake up goroutine
|
||||||
|
w.poller.wake()
|
||||||
|
|
||||||
|
// Wait for goroutine to close
|
||||||
|
<-w.doneResp
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add starts watching the named file or directory (non-recursively).
|
||||||
|
func (w *Watcher) Add(name string) error {
|
||||||
|
name = filepath.Clean(name)
|
||||||
|
if w.isClosed() {
|
||||||
|
return errors.New("inotify instance already closed")
|
||||||
|
}
|
||||||
|
|
||||||
|
const agnosticEvents = unix.IN_MOVED_TO | unix.IN_MOVED_FROM |
|
||||||
|
unix.IN_CREATE | unix.IN_ATTRIB | unix.IN_MODIFY |
|
||||||
|
unix.IN_MOVE_SELF | unix.IN_DELETE | unix.IN_DELETE_SELF
|
||||||
|
|
||||||
|
var flags uint32 = agnosticEvents
|
||||||
|
|
||||||
|
w.mu.Lock()
|
||||||
|
defer w.mu.Unlock()
|
||||||
|
watchEntry := w.watches[name]
|
||||||
|
if watchEntry != nil {
|
||||||
|
flags |= watchEntry.flags | unix.IN_MASK_ADD
|
||||||
|
}
|
||||||
|
wd, errno := unix.InotifyAddWatch(w.fd, name, flags)
|
||||||
|
if wd == -1 {
|
||||||
|
return errno
|
||||||
|
}
|
||||||
|
|
||||||
|
if watchEntry == nil {
|
||||||
|
w.watches[name] = &watch{wd: uint32(wd), flags: flags}
|
||||||
|
w.paths[wd] = name
|
||||||
|
} else {
|
||||||
|
watchEntry.wd = uint32(wd)
|
||||||
|
watchEntry.flags = flags
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove stops watching the named file or directory (non-recursively).
|
||||||
|
func (w *Watcher) Remove(name string) error {
|
||||||
|
name = filepath.Clean(name)
|
||||||
|
|
||||||
|
// Fetch the watch.
|
||||||
|
w.mu.Lock()
|
||||||
|
defer w.mu.Unlock()
|
||||||
|
watch, ok := w.watches[name]
|
||||||
|
|
||||||
|
// Remove it from inotify.
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("can't remove non-existent inotify watch for: %s", name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// We successfully removed the watch if InotifyRmWatch doesn't return an
|
||||||
|
// error, we need to clean up our internal state to ensure it matches
|
||||||
|
// inotify's kernel state.
|
||||||
|
delete(w.paths, int(watch.wd))
|
||||||
|
delete(w.watches, name)
|
||||||
|
|
||||||
|
// inotify_rm_watch will return EINVAL if the file has been deleted;
|
||||||
|
// the inotify will already have been removed.
|
||||||
|
// watches and pathes are deleted in ignoreLinux() implicitly and asynchronously
|
||||||
|
// by calling inotify_rm_watch() below. e.g. readEvents() goroutine receives IN_IGNORE
|
||||||
|
// so that EINVAL means that the wd is being rm_watch()ed or its file removed
|
||||||
|
// by another thread and we have not received IN_IGNORE event.
|
||||||
|
success, errno := unix.InotifyRmWatch(w.fd, watch.wd)
|
||||||
|
if success == -1 {
|
||||||
|
// TODO: Perhaps it's not helpful to return an error here in every case.
|
||||||
|
// the only two possible errors are:
|
||||||
|
// EBADF, which happens when w.fd is not a valid file descriptor of any kind.
|
||||||
|
// EINVAL, which is when fd is not an inotify descriptor or wd is not a valid watch descriptor.
|
||||||
|
// Watch descriptors are invalidated when they are removed explicitly or implicitly;
|
||||||
|
// explicitly by inotify_rm_watch, implicitly when the file they are watching is deleted.
|
||||||
|
return errno
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type watch struct {
|
||||||
|
wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall)
|
||||||
|
flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags)
|
||||||
|
}
|
||||||
|
|
||||||
|
// readEvents reads from the inotify file descriptor, converts the
|
||||||
|
// received events into Event objects and sends them via the Events channel
|
||||||
|
func (w *Watcher) readEvents() {
|
||||||
|
var (
|
||||||
|
buf [unix.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events
|
||||||
|
n int // Number of bytes read with read()
|
||||||
|
errno error // Syscall errno
|
||||||
|
ok bool // For poller.wait
|
||||||
|
)
|
||||||
|
|
||||||
|
defer close(w.doneResp)
|
||||||
|
defer close(w.Errors)
|
||||||
|
defer close(w.Events)
|
||||||
|
defer unix.Close(w.fd)
|
||||||
|
defer w.poller.close()
|
||||||
|
|
||||||
|
for {
|
||||||
|
// See if we have been closed.
|
||||||
|
if w.isClosed() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
ok, errno = w.poller.wait()
|
||||||
|
if errno != nil {
|
||||||
|
select {
|
||||||
|
case w.Errors <- errno:
|
||||||
|
case <-w.done:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
n, errno = unix.Read(w.fd, buf[:])
|
||||||
|
// If a signal interrupted execution, see if we've been asked to close, and try again.
|
||||||
|
// http://man7.org/linux/man-pages/man7/signal.7.html :
|
||||||
|
// "Before Linux 3.8, reads from an inotify(7) file descriptor were not restartable"
|
||||||
|
if errno == unix.EINTR {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// unix.Read might have been woken up by Close. If so, we're done.
|
||||||
|
if w.isClosed() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if n < unix.SizeofInotifyEvent {
|
||||||
|
var err error
|
||||||
|
if n == 0 {
|
||||||
|
// If EOF is received. This should really never happen.
|
||||||
|
err = io.EOF
|
||||||
|
} else if n < 0 {
|
||||||
|
// If an error occurred while reading.
|
||||||
|
err = errno
|
||||||
|
} else {
|
||||||
|
// Read was too short.
|
||||||
|
err = errors.New("notify: short read in readEvents()")
|
||||||
|
}
|
||||||
|
select {
|
||||||
|
case w.Errors <- err:
|
||||||
|
case <-w.done:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
var offset uint32
|
||||||
|
// We don't know how many events we just read into the buffer
|
||||||
|
// While the offset points to at least one whole event...
|
||||||
|
for offset <= uint32(n-unix.SizeofInotifyEvent) {
|
||||||
|
// Point "raw" to the event in the buffer
|
||||||
|
raw := (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset]))
|
||||||
|
|
||||||
|
mask := uint32(raw.Mask)
|
||||||
|
nameLen := uint32(raw.Len)
|
||||||
|
|
||||||
|
if mask&unix.IN_Q_OVERFLOW != 0 {
|
||||||
|
select {
|
||||||
|
case w.Errors <- ErrEventOverflow:
|
||||||
|
case <-w.done:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the event happened to the watched directory or the watched file, the kernel
|
||||||
|
// doesn't append the filename to the event, but we would like to always fill the
|
||||||
|
// the "Name" field with a valid filename. We retrieve the path of the watch from
|
||||||
|
// the "paths" map.
|
||||||
|
w.mu.Lock()
|
||||||
|
name, ok := w.paths[int(raw.Wd)]
|
||||||
|
// IN_DELETE_SELF occurs when the file/directory being watched is removed.
|
||||||
|
// This is a sign to clean up the maps, otherwise we are no longer in sync
|
||||||
|
// with the inotify kernel state which has already deleted the watch
|
||||||
|
// automatically.
|
||||||
|
if ok && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF {
|
||||||
|
delete(w.paths, int(raw.Wd))
|
||||||
|
delete(w.watches, name)
|
||||||
|
}
|
||||||
|
w.mu.Unlock()
|
||||||
|
|
||||||
|
if nameLen > 0 {
|
||||||
|
// Point "bytes" at the first byte of the filename
|
||||||
|
bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))
|
||||||
|
// The filename is padded with NULL bytes. TrimRight() gets rid of those.
|
||||||
|
name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000")
|
||||||
|
}
|
||||||
|
|
||||||
|
event := newEvent(name, mask)
|
||||||
|
|
||||||
|
// Send the events that are not ignored on the events channel
|
||||||
|
if !event.ignoreLinux(mask) {
|
||||||
|
select {
|
||||||
|
case w.Events <- event:
|
||||||
|
case <-w.done:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Move to the next event in the buffer
|
||||||
|
offset += unix.SizeofInotifyEvent + nameLen
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Certain types of events can be "ignored" and not sent over the Events
|
||||||
|
// channel. Such as events marked ignore by the kernel, or MODIFY events
|
||||||
|
// against files that do not exist.
|
||||||
|
func (e *Event) ignoreLinux(mask uint32) bool {
|
||||||
|
// Ignore anything the inotify API says to ignore
|
||||||
|
if mask&unix.IN_IGNORED == unix.IN_IGNORED {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the event is not a DELETE or RENAME, the file must exist.
|
||||||
|
// Otherwise the event is ignored.
|
||||||
|
// *Note*: this was put in place because it was seen that a MODIFY
|
||||||
|
// event was sent after the DELETE. This ignores that MODIFY and
|
||||||
|
// assumes a DELETE will come or has come if the file doesn't exist.
|
||||||
|
if !(e.Op&Remove == Remove || e.Op&Rename == Rename) {
|
||||||
|
_, statErr := os.Lstat(e.Name)
|
||||||
|
return os.IsNotExist(statErr)
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// newEvent returns an platform-independent Event based on an inotify mask.
|
||||||
|
func newEvent(name string, mask uint32) Event {
|
||||||
|
e := Event{Name: name}
|
||||||
|
if mask&unix.IN_CREATE == unix.IN_CREATE || mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO {
|
||||||
|
e.Op |= Create
|
||||||
|
}
|
||||||
|
if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF || mask&unix.IN_DELETE == unix.IN_DELETE {
|
||||||
|
e.Op |= Remove
|
||||||
|
}
|
||||||
|
if mask&unix.IN_MODIFY == unix.IN_MODIFY {
|
||||||
|
e.Op |= Write
|
||||||
|
}
|
||||||
|
if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF || mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM {
|
||||||
|
e.Op |= Rename
|
||||||
|
}
|
||||||
|
if mask&unix.IN_ATTRIB == unix.IN_ATTRIB {
|
||||||
|
e.Op |= Chmod
|
||||||
|
}
|
||||||
|
return e
|
||||||
|
}
|
187
vendor/github.com/fsnotify/fsnotify/inotify_poller.go
generated
vendored
Normal file
187
vendor/github.com/fsnotify/fsnotify/inotify_poller.go
generated
vendored
Normal file
@ -0,0 +1,187 @@
|
|||||||
|
// Copyright 2015 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build linux
|
||||||
|
|
||||||
|
package fsnotify
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
|
)
|
||||||
|
|
||||||
|
type fdPoller struct {
|
||||||
|
fd int // File descriptor (as returned by the inotify_init() syscall)
|
||||||
|
epfd int // Epoll file descriptor
|
||||||
|
pipe [2]int // Pipe for waking up
|
||||||
|
}
|
||||||
|
|
||||||
|
func emptyPoller(fd int) *fdPoller {
|
||||||
|
poller := new(fdPoller)
|
||||||
|
poller.fd = fd
|
||||||
|
poller.epfd = -1
|
||||||
|
poller.pipe[0] = -1
|
||||||
|
poller.pipe[1] = -1
|
||||||
|
return poller
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create a new inotify poller.
|
||||||
|
// This creates an inotify handler, and an epoll handler.
|
||||||
|
func newFdPoller(fd int) (*fdPoller, error) {
|
||||||
|
var errno error
|
||||||
|
poller := emptyPoller(fd)
|
||||||
|
defer func() {
|
||||||
|
if errno != nil {
|
||||||
|
poller.close()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
poller.fd = fd
|
||||||
|
|
||||||
|
// Create epoll fd
|
||||||
|
poller.epfd, errno = unix.EpollCreate1(0)
|
||||||
|
if poller.epfd == -1 {
|
||||||
|
return nil, errno
|
||||||
|
}
|
||||||
|
// Create pipe; pipe[0] is the read end, pipe[1] the write end.
|
||||||
|
errno = unix.Pipe2(poller.pipe[:], unix.O_NONBLOCK)
|
||||||
|
if errno != nil {
|
||||||
|
return nil, errno
|
||||||
|
}
|
||||||
|
|
||||||
|
// Register inotify fd with epoll
|
||||||
|
event := unix.EpollEvent{
|
||||||
|
Fd: int32(poller.fd),
|
||||||
|
Events: unix.EPOLLIN,
|
||||||
|
}
|
||||||
|
errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.fd, &event)
|
||||||
|
if errno != nil {
|
||||||
|
return nil, errno
|
||||||
|
}
|
||||||
|
|
||||||
|
// Register pipe fd with epoll
|
||||||
|
event = unix.EpollEvent{
|
||||||
|
Fd: int32(poller.pipe[0]),
|
||||||
|
Events: unix.EPOLLIN,
|
||||||
|
}
|
||||||
|
errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.pipe[0], &event)
|
||||||
|
if errno != nil {
|
||||||
|
return nil, errno
|
||||||
|
}
|
||||||
|
|
||||||
|
return poller, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait using epoll.
|
||||||
|
// Returns true if something is ready to be read,
|
||||||
|
// false if there is not.
|
||||||
|
func (poller *fdPoller) wait() (bool, error) {
|
||||||
|
// 3 possible events per fd, and 2 fds, makes a maximum of 6 events.
|
||||||
|
// I don't know whether epoll_wait returns the number of events returned,
|
||||||
|
// or the total number of events ready.
|
||||||
|
// I decided to catch both by making the buffer one larger than the maximum.
|
||||||
|
events := make([]unix.EpollEvent, 7)
|
||||||
|
for {
|
||||||
|
n, errno := unix.EpollWait(poller.epfd, events, -1)
|
||||||
|
if n == -1 {
|
||||||
|
if errno == unix.EINTR {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return false, errno
|
||||||
|
}
|
||||||
|
if n == 0 {
|
||||||
|
// If there are no events, try again.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if n > 6 {
|
||||||
|
// This should never happen. More events were returned than should be possible.
|
||||||
|
return false, errors.New("epoll_wait returned more events than I know what to do with")
|
||||||
|
}
|
||||||
|
ready := events[:n]
|
||||||
|
epollhup := false
|
||||||
|
epollerr := false
|
||||||
|
epollin := false
|
||||||
|
for _, event := range ready {
|
||||||
|
if event.Fd == int32(poller.fd) {
|
||||||
|
if event.Events&unix.EPOLLHUP != 0 {
|
||||||
|
// This should not happen, but if it does, treat it as a wakeup.
|
||||||
|
epollhup = true
|
||||||
|
}
|
||||||
|
if event.Events&unix.EPOLLERR != 0 {
|
||||||
|
// If an error is waiting on the file descriptor, we should pretend
|
||||||
|
// something is ready to read, and let unix.Read pick up the error.
|
||||||
|
epollerr = true
|
||||||
|
}
|
||||||
|
if event.Events&unix.EPOLLIN != 0 {
|
||||||
|
// There is data to read.
|
||||||
|
epollin = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if event.Fd == int32(poller.pipe[0]) {
|
||||||
|
if event.Events&unix.EPOLLHUP != 0 {
|
||||||
|
// Write pipe descriptor was closed, by us. This means we're closing down the
|
||||||
|
// watcher, and we should wake up.
|
||||||
|
}
|
||||||
|
if event.Events&unix.EPOLLERR != 0 {
|
||||||
|
// If an error is waiting on the pipe file descriptor.
|
||||||
|
// This is an absolute mystery, and should never ever happen.
|
||||||
|
return false, errors.New("Error on the pipe descriptor.")
|
||||||
|
}
|
||||||
|
if event.Events&unix.EPOLLIN != 0 {
|
||||||
|
// This is a regular wakeup, so we have to clear the buffer.
|
||||||
|
err := poller.clearWake()
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if epollhup || epollerr || epollin {
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close the write end of the poller.
|
||||||
|
func (poller *fdPoller) wake() error {
|
||||||
|
buf := make([]byte, 1)
|
||||||
|
n, errno := unix.Write(poller.pipe[1], buf)
|
||||||
|
if n == -1 {
|
||||||
|
if errno == unix.EAGAIN {
|
||||||
|
// Buffer is full, poller will wake.
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return errno
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (poller *fdPoller) clearWake() error {
|
||||||
|
// You have to be woken up a LOT in order to get to 100!
|
||||||
|
buf := make([]byte, 100)
|
||||||
|
n, errno := unix.Read(poller.pipe[0], buf)
|
||||||
|
if n == -1 {
|
||||||
|
if errno == unix.EAGAIN {
|
||||||
|
// Buffer is empty, someone else cleared our wake.
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return errno
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close all poller file descriptors, but not the one passed to it.
|
||||||
|
func (poller *fdPoller) close() {
|
||||||
|
if poller.pipe[1] != -1 {
|
||||||
|
unix.Close(poller.pipe[1])
|
||||||
|
}
|
||||||
|
if poller.pipe[0] != -1 {
|
||||||
|
unix.Close(poller.pipe[0])
|
||||||
|
}
|
||||||
|
if poller.epfd != -1 {
|
||||||
|
unix.Close(poller.epfd)
|
||||||
|
}
|
||||||
|
}
|
521
vendor/github.com/fsnotify/fsnotify/kqueue.go
generated
vendored
Normal file
521
vendor/github.com/fsnotify/fsnotify/kqueue.go
generated
vendored
Normal file
@ -0,0 +1,521 @@
|
|||||||
|
// Copyright 2010 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build freebsd openbsd netbsd dragonfly darwin
|
||||||
|
|
||||||
|
package fsnotify
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Watcher watches a set of files, delivering events to a channel.
|
||||||
|
type Watcher struct {
|
||||||
|
Events chan Event
|
||||||
|
Errors chan error
|
||||||
|
done chan struct{} // Channel for sending a "quit message" to the reader goroutine
|
||||||
|
|
||||||
|
kq int // File descriptor (as returned by the kqueue() syscall).
|
||||||
|
|
||||||
|
mu sync.Mutex // Protects access to watcher data
|
||||||
|
watches map[string]int // Map of watched file descriptors (key: path).
|
||||||
|
externalWatches map[string]bool // Map of watches added by user of the library.
|
||||||
|
dirFlags map[string]uint32 // Map of watched directories to fflags used in kqueue.
|
||||||
|
paths map[int]pathInfo // Map file descriptors to path names for processing kqueue events.
|
||||||
|
fileExists map[string]bool // Keep track of if we know this file exists (to stop duplicate create events).
|
||||||
|
isClosed bool // Set to true when Close() is first called
|
||||||
|
}
|
||||||
|
|
||||||
|
type pathInfo struct {
|
||||||
|
name string
|
||||||
|
isDir bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
|
||||||
|
func NewWatcher() (*Watcher, error) {
|
||||||
|
kq, err := kqueue()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
w := &Watcher{
|
||||||
|
kq: kq,
|
||||||
|
watches: make(map[string]int),
|
||||||
|
dirFlags: make(map[string]uint32),
|
||||||
|
paths: make(map[int]pathInfo),
|
||||||
|
fileExists: make(map[string]bool),
|
||||||
|
externalWatches: make(map[string]bool),
|
||||||
|
Events: make(chan Event),
|
||||||
|
Errors: make(chan error),
|
||||||
|
done: make(chan struct{}),
|
||||||
|
}
|
||||||
|
|
||||||
|
go w.readEvents()
|
||||||
|
return w, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close removes all watches and closes the events channel.
|
||||||
|
func (w *Watcher) Close() error {
|
||||||
|
w.mu.Lock()
|
||||||
|
if w.isClosed {
|
||||||
|
w.mu.Unlock()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
w.isClosed = true
|
||||||
|
|
||||||
|
// copy paths to remove while locked
|
||||||
|
var pathsToRemove = make([]string, 0, len(w.watches))
|
||||||
|
for name := range w.watches {
|
||||||
|
pathsToRemove = append(pathsToRemove, name)
|
||||||
|
}
|
||||||
|
w.mu.Unlock()
|
||||||
|
// unlock before calling Remove, which also locks
|
||||||
|
|
||||||
|
for _, name := range pathsToRemove {
|
||||||
|
w.Remove(name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// send a "quit" message to the reader goroutine
|
||||||
|
close(w.done)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add starts watching the named file or directory (non-recursively).
|
||||||
|
func (w *Watcher) Add(name string) error {
|
||||||
|
w.mu.Lock()
|
||||||
|
w.externalWatches[name] = true
|
||||||
|
w.mu.Unlock()
|
||||||
|
_, err := w.addWatch(name, noteAllEvents)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove stops watching the the named file or directory (non-recursively).
|
||||||
|
func (w *Watcher) Remove(name string) error {
|
||||||
|
name = filepath.Clean(name)
|
||||||
|
w.mu.Lock()
|
||||||
|
watchfd, ok := w.watches[name]
|
||||||
|
w.mu.Unlock()
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("can't remove non-existent kevent watch for: %s", name)
|
||||||
|
}
|
||||||
|
|
||||||
|
const registerRemove = unix.EV_DELETE
|
||||||
|
if err := register(w.kq, []int{watchfd}, registerRemove, 0); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
unix.Close(watchfd)
|
||||||
|
|
||||||
|
w.mu.Lock()
|
||||||
|
isDir := w.paths[watchfd].isDir
|
||||||
|
delete(w.watches, name)
|
||||||
|
delete(w.paths, watchfd)
|
||||||
|
delete(w.dirFlags, name)
|
||||||
|
w.mu.Unlock()
|
||||||
|
|
||||||
|
// Find all watched paths that are in this directory that are not external.
|
||||||
|
if isDir {
|
||||||
|
var pathsToRemove []string
|
||||||
|
w.mu.Lock()
|
||||||
|
for _, path := range w.paths {
|
||||||
|
wdir, _ := filepath.Split(path.name)
|
||||||
|
if filepath.Clean(wdir) == name {
|
||||||
|
if !w.externalWatches[path.name] {
|
||||||
|
pathsToRemove = append(pathsToRemove, path.name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
w.mu.Unlock()
|
||||||
|
for _, name := range pathsToRemove {
|
||||||
|
// Since these are internal, not much sense in propagating error
|
||||||
|
// to the user, as that will just confuse them with an error about
|
||||||
|
// a path they did not explicitly watch themselves.
|
||||||
|
w.Remove(name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE)
|
||||||
|
const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | unix.NOTE_RENAME
|
||||||
|
|
||||||
|
// keventWaitTime to block on each read from kevent
|
||||||
|
var keventWaitTime = durationToTimespec(100 * time.Millisecond)
|
||||||
|
|
||||||
|
// addWatch adds name to the watched file set.
|
||||||
|
// The flags are interpreted as described in kevent(2).
|
||||||
|
// Returns the real path to the file which was added, if any, which may be different from the one passed in the case of symlinks.
|
||||||
|
func (w *Watcher) addWatch(name string, flags uint32) (string, error) {
|
||||||
|
var isDir bool
|
||||||
|
// Make ./name and name equivalent
|
||||||
|
name = filepath.Clean(name)
|
||||||
|
|
||||||
|
w.mu.Lock()
|
||||||
|
if w.isClosed {
|
||||||
|
w.mu.Unlock()
|
||||||
|
return "", errors.New("kevent instance already closed")
|
||||||
|
}
|
||||||
|
watchfd, alreadyWatching := w.watches[name]
|
||||||
|
// We already have a watch, but we can still override flags.
|
||||||
|
if alreadyWatching {
|
||||||
|
isDir = w.paths[watchfd].isDir
|
||||||
|
}
|
||||||
|
w.mu.Unlock()
|
||||||
|
|
||||||
|
if !alreadyWatching {
|
||||||
|
fi, err := os.Lstat(name)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Don't watch sockets.
|
||||||
|
if fi.Mode()&os.ModeSocket == os.ModeSocket {
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Don't watch named pipes.
|
||||||
|
if fi.Mode()&os.ModeNamedPipe == os.ModeNamedPipe {
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Follow Symlinks
|
||||||
|
// Unfortunately, Linux can add bogus symlinks to watch list without
|
||||||
|
// issue, and Windows can't do symlinks period (AFAIK). To maintain
|
||||||
|
// consistency, we will act like everything is fine. There will simply
|
||||||
|
// be no file events for broken symlinks.
|
||||||
|
// Hence the returns of nil on errors.
|
||||||
|
if fi.Mode()&os.ModeSymlink == os.ModeSymlink {
|
||||||
|
name, err = filepath.EvalSymlinks(name)
|
||||||
|
if err != nil {
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
|
||||||
|
w.mu.Lock()
|
||||||
|
_, alreadyWatching = w.watches[name]
|
||||||
|
w.mu.Unlock()
|
||||||
|
|
||||||
|
if alreadyWatching {
|
||||||
|
return name, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
fi, err = os.Lstat(name)
|
||||||
|
if err != nil {
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
watchfd, err = unix.Open(name, openMode, 0700)
|
||||||
|
if watchfd == -1 {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
isDir = fi.IsDir()
|
||||||
|
}
|
||||||
|
|
||||||
|
const registerAdd = unix.EV_ADD | unix.EV_CLEAR | unix.EV_ENABLE
|
||||||
|
if err := register(w.kq, []int{watchfd}, registerAdd, flags); err != nil {
|
||||||
|
unix.Close(watchfd)
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
if !alreadyWatching {
|
||||||
|
w.mu.Lock()
|
||||||
|
w.watches[name] = watchfd
|
||||||
|
w.paths[watchfd] = pathInfo{name: name, isDir: isDir}
|
||||||
|
w.mu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
if isDir {
|
||||||
|
// Watch the directory if it has not been watched before,
|
||||||
|
// or if it was watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles)
|
||||||
|
w.mu.Lock()
|
||||||
|
|
||||||
|
watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE &&
|
||||||
|
(!alreadyWatching || (w.dirFlags[name]&unix.NOTE_WRITE) != unix.NOTE_WRITE)
|
||||||
|
// Store flags so this watch can be updated later
|
||||||
|
w.dirFlags[name] = flags
|
||||||
|
w.mu.Unlock()
|
||||||
|
|
||||||
|
if watchDir {
|
||||||
|
if err := w.watchDirectoryFiles(name); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return name, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// readEvents reads from kqueue and converts the received kevents into
|
||||||
|
// Event values that it sends down the Events channel.
|
||||||
|
func (w *Watcher) readEvents() {
|
||||||
|
eventBuffer := make([]unix.Kevent_t, 10)
|
||||||
|
|
||||||
|
loop:
|
||||||
|
for {
|
||||||
|
// See if there is a message on the "done" channel
|
||||||
|
select {
|
||||||
|
case <-w.done:
|
||||||
|
break loop
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get new events
|
||||||
|
kevents, err := read(w.kq, eventBuffer, &keventWaitTime)
|
||||||
|
// EINTR is okay, the syscall was interrupted before timeout expired.
|
||||||
|
if err != nil && err != unix.EINTR {
|
||||||
|
select {
|
||||||
|
case w.Errors <- err:
|
||||||
|
case <-w.done:
|
||||||
|
break loop
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Flush the events we received to the Events channel
|
||||||
|
for len(kevents) > 0 {
|
||||||
|
kevent := &kevents[0]
|
||||||
|
watchfd := int(kevent.Ident)
|
||||||
|
mask := uint32(kevent.Fflags)
|
||||||
|
w.mu.Lock()
|
||||||
|
path := w.paths[watchfd]
|
||||||
|
w.mu.Unlock()
|
||||||
|
event := newEvent(path.name, mask)
|
||||||
|
|
||||||
|
if path.isDir && !(event.Op&Remove == Remove) {
|
||||||
|
// Double check to make sure the directory exists. This can happen when
|
||||||
|
// we do a rm -fr on a recursively watched folders and we receive a
|
||||||
|
// modification event first but the folder has been deleted and later
|
||||||
|
// receive the delete event
|
||||||
|
if _, err := os.Lstat(event.Name); os.IsNotExist(err) {
|
||||||
|
// mark is as delete event
|
||||||
|
event.Op |= Remove
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if event.Op&Rename == Rename || event.Op&Remove == Remove {
|
||||||
|
w.Remove(event.Name)
|
||||||
|
w.mu.Lock()
|
||||||
|
delete(w.fileExists, event.Name)
|
||||||
|
w.mu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
if path.isDir && event.Op&Write == Write && !(event.Op&Remove == Remove) {
|
||||||
|
w.sendDirectoryChangeEvents(event.Name)
|
||||||
|
} else {
|
||||||
|
// Send the event on the Events channel.
|
||||||
|
select {
|
||||||
|
case w.Events <- event:
|
||||||
|
case <-w.done:
|
||||||
|
break loop
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if event.Op&Remove == Remove {
|
||||||
|
// Look for a file that may have overwritten this.
|
||||||
|
// For example, mv f1 f2 will delete f2, then create f2.
|
||||||
|
if path.isDir {
|
||||||
|
fileDir := filepath.Clean(event.Name)
|
||||||
|
w.mu.Lock()
|
||||||
|
_, found := w.watches[fileDir]
|
||||||
|
w.mu.Unlock()
|
||||||
|
if found {
|
||||||
|
// make sure the directory exists before we watch for changes. When we
|
||||||
|
// do a recursive watch and perform rm -fr, the parent directory might
|
||||||
|
// have gone missing, ignore the missing directory and let the
|
||||||
|
// upcoming delete event remove the watch from the parent directory.
|
||||||
|
if _, err := os.Lstat(fileDir); err == nil {
|
||||||
|
w.sendDirectoryChangeEvents(fileDir)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
filePath := filepath.Clean(event.Name)
|
||||||
|
if fileInfo, err := os.Lstat(filePath); err == nil {
|
||||||
|
w.sendFileCreatedEventIfNew(filePath, fileInfo)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Move to next event
|
||||||
|
kevents = kevents[1:]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// cleanup
|
||||||
|
err := unix.Close(w.kq)
|
||||||
|
if err != nil {
|
||||||
|
// only way the previous loop breaks is if w.done was closed so we need to async send to w.Errors.
|
||||||
|
select {
|
||||||
|
case w.Errors <- err:
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
close(w.Events)
|
||||||
|
close(w.Errors)
|
||||||
|
}
|
||||||
|
|
||||||
|
// newEvent returns an platform-independent Event based on kqueue Fflags.
|
||||||
|
func newEvent(name string, mask uint32) Event {
|
||||||
|
e := Event{Name: name}
|
||||||
|
if mask&unix.NOTE_DELETE == unix.NOTE_DELETE {
|
||||||
|
e.Op |= Remove
|
||||||
|
}
|
||||||
|
if mask&unix.NOTE_WRITE == unix.NOTE_WRITE {
|
||||||
|
e.Op |= Write
|
||||||
|
}
|
||||||
|
if mask&unix.NOTE_RENAME == unix.NOTE_RENAME {
|
||||||
|
e.Op |= Rename
|
||||||
|
}
|
||||||
|
if mask&unix.NOTE_ATTRIB == unix.NOTE_ATTRIB {
|
||||||
|
e.Op |= Chmod
|
||||||
|
}
|
||||||
|
return e
|
||||||
|
}
|
||||||
|
|
||||||
|
func newCreateEvent(name string) Event {
|
||||||
|
return Event{Name: name, Op: Create}
|
||||||
|
}
|
||||||
|
|
||||||
|
// watchDirectoryFiles to mimic inotify when adding a watch on a directory
|
||||||
|
func (w *Watcher) watchDirectoryFiles(dirPath string) error {
|
||||||
|
// Get all files
|
||||||
|
files, err := ioutil.ReadDir(dirPath)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, fileInfo := range files {
|
||||||
|
filePath := filepath.Join(dirPath, fileInfo.Name())
|
||||||
|
filePath, err = w.internalWatch(filePath, fileInfo)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
w.mu.Lock()
|
||||||
|
w.fileExists[filePath] = true
|
||||||
|
w.mu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// sendDirectoryEvents searches the directory for newly created files
|
||||||
|
// and sends them over the event channel. This functionality is to have
|
||||||
|
// the BSD version of fsnotify match Linux inotify which provides a
|
||||||
|
// create event for files created in a watched directory.
|
||||||
|
func (w *Watcher) sendDirectoryChangeEvents(dirPath string) {
|
||||||
|
// Get all files
|
||||||
|
files, err := ioutil.ReadDir(dirPath)
|
||||||
|
if err != nil {
|
||||||
|
select {
|
||||||
|
case w.Errors <- err:
|
||||||
|
case <-w.done:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Search for new files
|
||||||
|
for _, fileInfo := range files {
|
||||||
|
filePath := filepath.Join(dirPath, fileInfo.Name())
|
||||||
|
err := w.sendFileCreatedEventIfNew(filePath, fileInfo)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// sendFileCreatedEvent sends a create event if the file isn't already being tracked.
|
||||||
|
func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fileInfo os.FileInfo) (err error) {
|
||||||
|
w.mu.Lock()
|
||||||
|
_, doesExist := w.fileExists[filePath]
|
||||||
|
w.mu.Unlock()
|
||||||
|
if !doesExist {
|
||||||
|
// Send create event
|
||||||
|
select {
|
||||||
|
case w.Events <- newCreateEvent(filePath):
|
||||||
|
case <-w.done:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// like watchDirectoryFiles (but without doing another ReadDir)
|
||||||
|
filePath, err = w.internalWatch(filePath, fileInfo)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
w.mu.Lock()
|
||||||
|
w.fileExists[filePath] = true
|
||||||
|
w.mu.Unlock()
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *Watcher) internalWatch(name string, fileInfo os.FileInfo) (string, error) {
|
||||||
|
if fileInfo.IsDir() {
|
||||||
|
// mimic Linux providing delete events for subdirectories
|
||||||
|
// but preserve the flags used if currently watching subdirectory
|
||||||
|
w.mu.Lock()
|
||||||
|
flags := w.dirFlags[name]
|
||||||
|
w.mu.Unlock()
|
||||||
|
|
||||||
|
flags |= unix.NOTE_DELETE | unix.NOTE_RENAME
|
||||||
|
return w.addWatch(name, flags)
|
||||||
|
}
|
||||||
|
|
||||||
|
// watch file to mimic Linux inotify
|
||||||
|
return w.addWatch(name, noteAllEvents)
|
||||||
|
}
|
||||||
|
|
||||||
|
// kqueue creates a new kernel event queue and returns a descriptor.
|
||||||
|
func kqueue() (kq int, err error) {
|
||||||
|
kq, err = unix.Kqueue()
|
||||||
|
if kq == -1 {
|
||||||
|
return kq, err
|
||||||
|
}
|
||||||
|
return kq, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// register events with the queue
|
||||||
|
func register(kq int, fds []int, flags int, fflags uint32) error {
|
||||||
|
changes := make([]unix.Kevent_t, len(fds))
|
||||||
|
|
||||||
|
for i, fd := range fds {
|
||||||
|
// SetKevent converts int to the platform-specific types:
|
||||||
|
unix.SetKevent(&changes[i], fd, unix.EVFILT_VNODE, flags)
|
||||||
|
changes[i].Fflags = fflags
|
||||||
|
}
|
||||||
|
|
||||||
|
// register the events
|
||||||
|
success, err := unix.Kevent(kq, changes, nil, nil)
|
||||||
|
if success == -1 {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// read retrieves pending events, or waits until an event occurs.
|
||||||
|
// A timeout of nil blocks indefinitely, while 0 polls the queue.
|
||||||
|
func read(kq int, events []unix.Kevent_t, timeout *unix.Timespec) ([]unix.Kevent_t, error) {
|
||||||
|
n, err := unix.Kevent(kq, nil, events, timeout)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return events[0:n], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// durationToTimespec prepares a timeout value
|
||||||
|
func durationToTimespec(d time.Duration) unix.Timespec {
|
||||||
|
return unix.NsecToTimespec(d.Nanoseconds())
|
||||||
|
}
|
11
vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go
generated
vendored
Normal file
11
vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go
generated
vendored
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
// Copyright 2013 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build freebsd openbsd netbsd dragonfly
|
||||||
|
|
||||||
|
package fsnotify
|
||||||
|
|
||||||
|
import "golang.org/x/sys/unix"
|
||||||
|
|
||||||
|
const openMode = unix.O_NONBLOCK | unix.O_RDONLY
|
12
vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go
generated
vendored
Normal file
12
vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go
generated
vendored
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
// Copyright 2013 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build darwin
|
||||||
|
|
||||||
|
package fsnotify
|
||||||
|
|
||||||
|
import "golang.org/x/sys/unix"
|
||||||
|
|
||||||
|
// note: this constant is not defined on BSD
|
||||||
|
const openMode = unix.O_EVTONLY
|
561
vendor/github.com/fsnotify/fsnotify/windows.go
generated
vendored
Normal file
561
vendor/github.com/fsnotify/fsnotify/windows.go
generated
vendored
Normal file
@ -0,0 +1,561 @@
|
|||||||
|
// Copyright 2011 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build windows
|
||||||
|
|
||||||
|
package fsnotify
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"runtime"
|
||||||
|
"sync"
|
||||||
|
"syscall"
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Watcher watches a set of files, delivering events to a channel.
|
||||||
|
type Watcher struct {
|
||||||
|
Events chan Event
|
||||||
|
Errors chan error
|
||||||
|
isClosed bool // Set to true when Close() is first called
|
||||||
|
mu sync.Mutex // Map access
|
||||||
|
port syscall.Handle // Handle to completion port
|
||||||
|
watches watchMap // Map of watches (key: i-number)
|
||||||
|
input chan *input // Inputs to the reader are sent on this channel
|
||||||
|
quit chan chan<- error
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
|
||||||
|
func NewWatcher() (*Watcher, error) {
|
||||||
|
port, e := syscall.CreateIoCompletionPort(syscall.InvalidHandle, 0, 0, 0)
|
||||||
|
if e != nil {
|
||||||
|
return nil, os.NewSyscallError("CreateIoCompletionPort", e)
|
||||||
|
}
|
||||||
|
w := &Watcher{
|
||||||
|
port: port,
|
||||||
|
watches: make(watchMap),
|
||||||
|
input: make(chan *input, 1),
|
||||||
|
Events: make(chan Event, 50),
|
||||||
|
Errors: make(chan error),
|
||||||
|
quit: make(chan chan<- error, 1),
|
||||||
|
}
|
||||||
|
go w.readEvents()
|
||||||
|
return w, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close removes all watches and closes the events channel.
|
||||||
|
func (w *Watcher) Close() error {
|
||||||
|
if w.isClosed {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
w.isClosed = true
|
||||||
|
|
||||||
|
// Send "quit" message to the reader goroutine
|
||||||
|
ch := make(chan error)
|
||||||
|
w.quit <- ch
|
||||||
|
if err := w.wakeupReader(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return <-ch
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add starts watching the named file or directory (non-recursively).
|
||||||
|
func (w *Watcher) Add(name string) error {
|
||||||
|
if w.isClosed {
|
||||||
|
return errors.New("watcher already closed")
|
||||||
|
}
|
||||||
|
in := &input{
|
||||||
|
op: opAddWatch,
|
||||||
|
path: filepath.Clean(name),
|
||||||
|
flags: sysFSALLEVENTS,
|
||||||
|
reply: make(chan error),
|
||||||
|
}
|
||||||
|
w.input <- in
|
||||||
|
if err := w.wakeupReader(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return <-in.reply
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove stops watching the the named file or directory (non-recursively).
|
||||||
|
func (w *Watcher) Remove(name string) error {
|
||||||
|
in := &input{
|
||||||
|
op: opRemoveWatch,
|
||||||
|
path: filepath.Clean(name),
|
||||||
|
reply: make(chan error),
|
||||||
|
}
|
||||||
|
w.input <- in
|
||||||
|
if err := w.wakeupReader(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return <-in.reply
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Options for AddWatch
|
||||||
|
sysFSONESHOT = 0x80000000
|
||||||
|
sysFSONLYDIR = 0x1000000
|
||||||
|
|
||||||
|
// Events
|
||||||
|
sysFSACCESS = 0x1
|
||||||
|
sysFSALLEVENTS = 0xfff
|
||||||
|
sysFSATTRIB = 0x4
|
||||||
|
sysFSCLOSE = 0x18
|
||||||
|
sysFSCREATE = 0x100
|
||||||
|
sysFSDELETE = 0x200
|
||||||
|
sysFSDELETESELF = 0x400
|
||||||
|
sysFSMODIFY = 0x2
|
||||||
|
sysFSMOVE = 0xc0
|
||||||
|
sysFSMOVEDFROM = 0x40
|
||||||
|
sysFSMOVEDTO = 0x80
|
||||||
|
sysFSMOVESELF = 0x800
|
||||||
|
|
||||||
|
// Special events
|
||||||
|
sysFSIGNORED = 0x8000
|
||||||
|
sysFSQOVERFLOW = 0x4000
|
||||||
|
)
|
||||||
|
|
||||||
|
func newEvent(name string, mask uint32) Event {
|
||||||
|
e := Event{Name: name}
|
||||||
|
if mask&sysFSCREATE == sysFSCREATE || mask&sysFSMOVEDTO == sysFSMOVEDTO {
|
||||||
|
e.Op |= Create
|
||||||
|
}
|
||||||
|
if mask&sysFSDELETE == sysFSDELETE || mask&sysFSDELETESELF == sysFSDELETESELF {
|
||||||
|
e.Op |= Remove
|
||||||
|
}
|
||||||
|
if mask&sysFSMODIFY == sysFSMODIFY {
|
||||||
|
e.Op |= Write
|
||||||
|
}
|
||||||
|
if mask&sysFSMOVE == sysFSMOVE || mask&sysFSMOVESELF == sysFSMOVESELF || mask&sysFSMOVEDFROM == sysFSMOVEDFROM {
|
||||||
|
e.Op |= Rename
|
||||||
|
}
|
||||||
|
if mask&sysFSATTRIB == sysFSATTRIB {
|
||||||
|
e.Op |= Chmod
|
||||||
|
}
|
||||||
|
return e
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
opAddWatch = iota
|
||||||
|
opRemoveWatch
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
provisional uint64 = 1 << (32 + iota)
|
||||||
|
)
|
||||||
|
|
||||||
|
type input struct {
|
||||||
|
op int
|
||||||
|
path string
|
||||||
|
flags uint32
|
||||||
|
reply chan error
|
||||||
|
}
|
||||||
|
|
||||||
|
type inode struct {
|
||||||
|
handle syscall.Handle
|
||||||
|
volume uint32
|
||||||
|
index uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
type watch struct {
|
||||||
|
ov syscall.Overlapped
|
||||||
|
ino *inode // i-number
|
||||||
|
path string // Directory path
|
||||||
|
mask uint64 // Directory itself is being watched with these notify flags
|
||||||
|
names map[string]uint64 // Map of names being watched and their notify flags
|
||||||
|
rename string // Remembers the old name while renaming a file
|
||||||
|
buf [4096]byte
|
||||||
|
}
|
||||||
|
|
||||||
|
type indexMap map[uint64]*watch
|
||||||
|
type watchMap map[uint32]indexMap
|
||||||
|
|
||||||
|
func (w *Watcher) wakeupReader() error {
|
||||||
|
e := syscall.PostQueuedCompletionStatus(w.port, 0, 0, nil)
|
||||||
|
if e != nil {
|
||||||
|
return os.NewSyscallError("PostQueuedCompletionStatus", e)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getDir(pathname string) (dir string, err error) {
|
||||||
|
attr, e := syscall.GetFileAttributes(syscall.StringToUTF16Ptr(pathname))
|
||||||
|
if e != nil {
|
||||||
|
return "", os.NewSyscallError("GetFileAttributes", e)
|
||||||
|
}
|
||||||
|
if attr&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 {
|
||||||
|
dir = pathname
|
||||||
|
} else {
|
||||||
|
dir, _ = filepath.Split(pathname)
|
||||||
|
dir = filepath.Clean(dir)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func getIno(path string) (ino *inode, err error) {
|
||||||
|
h, e := syscall.CreateFile(syscall.StringToUTF16Ptr(path),
|
||||||
|
syscall.FILE_LIST_DIRECTORY,
|
||||||
|
syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE,
|
||||||
|
nil, syscall.OPEN_EXISTING,
|
||||||
|
syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OVERLAPPED, 0)
|
||||||
|
if e != nil {
|
||||||
|
return nil, os.NewSyscallError("CreateFile", e)
|
||||||
|
}
|
||||||
|
var fi syscall.ByHandleFileInformation
|
||||||
|
if e = syscall.GetFileInformationByHandle(h, &fi); e != nil {
|
||||||
|
syscall.CloseHandle(h)
|
||||||
|
return nil, os.NewSyscallError("GetFileInformationByHandle", e)
|
||||||
|
}
|
||||||
|
ino = &inode{
|
||||||
|
handle: h,
|
||||||
|
volume: fi.VolumeSerialNumber,
|
||||||
|
index: uint64(fi.FileIndexHigh)<<32 | uint64(fi.FileIndexLow),
|
||||||
|
}
|
||||||
|
return ino, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Must run within the I/O thread.
|
||||||
|
func (m watchMap) get(ino *inode) *watch {
|
||||||
|
if i := m[ino.volume]; i != nil {
|
||||||
|
return i[ino.index]
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Must run within the I/O thread.
|
||||||
|
func (m watchMap) set(ino *inode, watch *watch) {
|
||||||
|
i := m[ino.volume]
|
||||||
|
if i == nil {
|
||||||
|
i = make(indexMap)
|
||||||
|
m[ino.volume] = i
|
||||||
|
}
|
||||||
|
i[ino.index] = watch
|
||||||
|
}
|
||||||
|
|
||||||
|
// Must run within the I/O thread.
|
||||||
|
func (w *Watcher) addWatch(pathname string, flags uint64) error {
|
||||||
|
dir, err := getDir(pathname)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if flags&sysFSONLYDIR != 0 && pathname != dir {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
ino, err := getIno(dir)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
w.mu.Lock()
|
||||||
|
watchEntry := w.watches.get(ino)
|
||||||
|
w.mu.Unlock()
|
||||||
|
if watchEntry == nil {
|
||||||
|
if _, e := syscall.CreateIoCompletionPort(ino.handle, w.port, 0, 0); e != nil {
|
||||||
|
syscall.CloseHandle(ino.handle)
|
||||||
|
return os.NewSyscallError("CreateIoCompletionPort", e)
|
||||||
|
}
|
||||||
|
watchEntry = &watch{
|
||||||
|
ino: ino,
|
||||||
|
path: dir,
|
||||||
|
names: make(map[string]uint64),
|
||||||
|
}
|
||||||
|
w.mu.Lock()
|
||||||
|
w.watches.set(ino, watchEntry)
|
||||||
|
w.mu.Unlock()
|
||||||
|
flags |= provisional
|
||||||
|
} else {
|
||||||
|
syscall.CloseHandle(ino.handle)
|
||||||
|
}
|
||||||
|
if pathname == dir {
|
||||||
|
watchEntry.mask |= flags
|
||||||
|
} else {
|
||||||
|
watchEntry.names[filepath.Base(pathname)] |= flags
|
||||||
|
}
|
||||||
|
if err = w.startRead(watchEntry); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if pathname == dir {
|
||||||
|
watchEntry.mask &= ^provisional
|
||||||
|
} else {
|
||||||
|
watchEntry.names[filepath.Base(pathname)] &= ^provisional
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Must run within the I/O thread.
|
||||||
|
func (w *Watcher) remWatch(pathname string) error {
|
||||||
|
dir, err := getDir(pathname)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
ino, err := getIno(dir)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
w.mu.Lock()
|
||||||
|
watch := w.watches.get(ino)
|
||||||
|
w.mu.Unlock()
|
||||||
|
if watch == nil {
|
||||||
|
return fmt.Errorf("can't remove non-existent watch for: %s", pathname)
|
||||||
|
}
|
||||||
|
if pathname == dir {
|
||||||
|
w.sendEvent(watch.path, watch.mask&sysFSIGNORED)
|
||||||
|
watch.mask = 0
|
||||||
|
} else {
|
||||||
|
name := filepath.Base(pathname)
|
||||||
|
w.sendEvent(filepath.Join(watch.path, name), watch.names[name]&sysFSIGNORED)
|
||||||
|
delete(watch.names, name)
|
||||||
|
}
|
||||||
|
return w.startRead(watch)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Must run within the I/O thread.
|
||||||
|
func (w *Watcher) deleteWatch(watch *watch) {
|
||||||
|
for name, mask := range watch.names {
|
||||||
|
if mask&provisional == 0 {
|
||||||
|
w.sendEvent(filepath.Join(watch.path, name), mask&sysFSIGNORED)
|
||||||
|
}
|
||||||
|
delete(watch.names, name)
|
||||||
|
}
|
||||||
|
if watch.mask != 0 {
|
||||||
|
if watch.mask&provisional == 0 {
|
||||||
|
w.sendEvent(watch.path, watch.mask&sysFSIGNORED)
|
||||||
|
}
|
||||||
|
watch.mask = 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Must run within the I/O thread.
|
||||||
|
func (w *Watcher) startRead(watch *watch) error {
|
||||||
|
if e := syscall.CancelIo(watch.ino.handle); e != nil {
|
||||||
|
w.Errors <- os.NewSyscallError("CancelIo", e)
|
||||||
|
w.deleteWatch(watch)
|
||||||
|
}
|
||||||
|
mask := toWindowsFlags(watch.mask)
|
||||||
|
for _, m := range watch.names {
|
||||||
|
mask |= toWindowsFlags(m)
|
||||||
|
}
|
||||||
|
if mask == 0 {
|
||||||
|
if e := syscall.CloseHandle(watch.ino.handle); e != nil {
|
||||||
|
w.Errors <- os.NewSyscallError("CloseHandle", e)
|
||||||
|
}
|
||||||
|
w.mu.Lock()
|
||||||
|
delete(w.watches[watch.ino.volume], watch.ino.index)
|
||||||
|
w.mu.Unlock()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
e := syscall.ReadDirectoryChanges(watch.ino.handle, &watch.buf[0],
|
||||||
|
uint32(unsafe.Sizeof(watch.buf)), false, mask, nil, &watch.ov, 0)
|
||||||
|
if e != nil {
|
||||||
|
err := os.NewSyscallError("ReadDirectoryChanges", e)
|
||||||
|
if e == syscall.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 {
|
||||||
|
// Watched directory was probably removed
|
||||||
|
if w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) {
|
||||||
|
if watch.mask&sysFSONESHOT != 0 {
|
||||||
|
watch.mask = 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
err = nil
|
||||||
|
}
|
||||||
|
w.deleteWatch(watch)
|
||||||
|
w.startRead(watch)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// readEvents reads from the I/O completion port, converts the
|
||||||
|
// received events into Event objects and sends them via the Events channel.
|
||||||
|
// Entry point to the I/O thread.
|
||||||
|
func (w *Watcher) readEvents() {
|
||||||
|
var (
|
||||||
|
n, key uint32
|
||||||
|
ov *syscall.Overlapped
|
||||||
|
)
|
||||||
|
runtime.LockOSThread()
|
||||||
|
|
||||||
|
for {
|
||||||
|
e := syscall.GetQueuedCompletionStatus(w.port, &n, &key, &ov, syscall.INFINITE)
|
||||||
|
watch := (*watch)(unsafe.Pointer(ov))
|
||||||
|
|
||||||
|
if watch == nil {
|
||||||
|
select {
|
||||||
|
case ch := <-w.quit:
|
||||||
|
w.mu.Lock()
|
||||||
|
var indexes []indexMap
|
||||||
|
for _, index := range w.watches {
|
||||||
|
indexes = append(indexes, index)
|
||||||
|
}
|
||||||
|
w.mu.Unlock()
|
||||||
|
for _, index := range indexes {
|
||||||
|
for _, watch := range index {
|
||||||
|
w.deleteWatch(watch)
|
||||||
|
w.startRead(watch)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
var err error
|
||||||
|
if e := syscall.CloseHandle(w.port); e != nil {
|
||||||
|
err = os.NewSyscallError("CloseHandle", e)
|
||||||
|
}
|
||||||
|
close(w.Events)
|
||||||
|
close(w.Errors)
|
||||||
|
ch <- err
|
||||||
|
return
|
||||||
|
case in := <-w.input:
|
||||||
|
switch in.op {
|
||||||
|
case opAddWatch:
|
||||||
|
in.reply <- w.addWatch(in.path, uint64(in.flags))
|
||||||
|
case opRemoveWatch:
|
||||||
|
in.reply <- w.remWatch(in.path)
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
switch e {
|
||||||
|
case syscall.ERROR_MORE_DATA:
|
||||||
|
if watch == nil {
|
||||||
|
w.Errors <- errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer")
|
||||||
|
} else {
|
||||||
|
// The i/o succeeded but the buffer is full.
|
||||||
|
// In theory we should be building up a full packet.
|
||||||
|
// In practice we can get away with just carrying on.
|
||||||
|
n = uint32(unsafe.Sizeof(watch.buf))
|
||||||
|
}
|
||||||
|
case syscall.ERROR_ACCESS_DENIED:
|
||||||
|
// Watched directory was probably removed
|
||||||
|
w.sendEvent(watch.path, watch.mask&sysFSDELETESELF)
|
||||||
|
w.deleteWatch(watch)
|
||||||
|
w.startRead(watch)
|
||||||
|
continue
|
||||||
|
case syscall.ERROR_OPERATION_ABORTED:
|
||||||
|
// CancelIo was called on this handle
|
||||||
|
continue
|
||||||
|
default:
|
||||||
|
w.Errors <- os.NewSyscallError("GetQueuedCompletionPort", e)
|
||||||
|
continue
|
||||||
|
case nil:
|
||||||
|
}
|
||||||
|
|
||||||
|
var offset uint32
|
||||||
|
for {
|
||||||
|
if n == 0 {
|
||||||
|
w.Events <- newEvent("", sysFSQOVERFLOW)
|
||||||
|
w.Errors <- errors.New("short read in readEvents()")
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
// Point "raw" to the event in the buffer
|
||||||
|
raw := (*syscall.FileNotifyInformation)(unsafe.Pointer(&watch.buf[offset]))
|
||||||
|
buf := (*[syscall.MAX_PATH]uint16)(unsafe.Pointer(&raw.FileName))
|
||||||
|
name := syscall.UTF16ToString(buf[:raw.FileNameLength/2])
|
||||||
|
fullname := filepath.Join(watch.path, name)
|
||||||
|
|
||||||
|
var mask uint64
|
||||||
|
switch raw.Action {
|
||||||
|
case syscall.FILE_ACTION_REMOVED:
|
||||||
|
mask = sysFSDELETESELF
|
||||||
|
case syscall.FILE_ACTION_MODIFIED:
|
||||||
|
mask = sysFSMODIFY
|
||||||
|
case syscall.FILE_ACTION_RENAMED_OLD_NAME:
|
||||||
|
watch.rename = name
|
||||||
|
case syscall.FILE_ACTION_RENAMED_NEW_NAME:
|
||||||
|
if watch.names[watch.rename] != 0 {
|
||||||
|
watch.names[name] |= watch.names[watch.rename]
|
||||||
|
delete(watch.names, watch.rename)
|
||||||
|
mask = sysFSMOVESELF
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
sendNameEvent := func() {
|
||||||
|
if w.sendEvent(fullname, watch.names[name]&mask) {
|
||||||
|
if watch.names[name]&sysFSONESHOT != 0 {
|
||||||
|
delete(watch.names, name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if raw.Action != syscall.FILE_ACTION_RENAMED_NEW_NAME {
|
||||||
|
sendNameEvent()
|
||||||
|
}
|
||||||
|
if raw.Action == syscall.FILE_ACTION_REMOVED {
|
||||||
|
w.sendEvent(fullname, watch.names[name]&sysFSIGNORED)
|
||||||
|
delete(watch.names, name)
|
||||||
|
}
|
||||||
|
if w.sendEvent(fullname, watch.mask&toFSnotifyFlags(raw.Action)) {
|
||||||
|
if watch.mask&sysFSONESHOT != 0 {
|
||||||
|
watch.mask = 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if raw.Action == syscall.FILE_ACTION_RENAMED_NEW_NAME {
|
||||||
|
fullname = filepath.Join(watch.path, watch.rename)
|
||||||
|
sendNameEvent()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Move to the next event in the buffer
|
||||||
|
if raw.NextEntryOffset == 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
offset += raw.NextEntryOffset
|
||||||
|
|
||||||
|
// Error!
|
||||||
|
if offset >= n {
|
||||||
|
w.Errors <- errors.New("Windows system assumed buffer larger than it is, events have likely been missed.")
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := w.startRead(watch); err != nil {
|
||||||
|
w.Errors <- err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *Watcher) sendEvent(name string, mask uint64) bool {
|
||||||
|
if mask == 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
event := newEvent(name, uint32(mask))
|
||||||
|
select {
|
||||||
|
case ch := <-w.quit:
|
||||||
|
w.quit <- ch
|
||||||
|
case w.Events <- event:
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func toWindowsFlags(mask uint64) uint32 {
|
||||||
|
var m uint32
|
||||||
|
if mask&sysFSACCESS != 0 {
|
||||||
|
m |= syscall.FILE_NOTIFY_CHANGE_LAST_ACCESS
|
||||||
|
}
|
||||||
|
if mask&sysFSMODIFY != 0 {
|
||||||
|
m |= syscall.FILE_NOTIFY_CHANGE_LAST_WRITE
|
||||||
|
}
|
||||||
|
if mask&sysFSATTRIB != 0 {
|
||||||
|
m |= syscall.FILE_NOTIFY_CHANGE_ATTRIBUTES
|
||||||
|
}
|
||||||
|
if mask&(sysFSMOVE|sysFSCREATE|sysFSDELETE) != 0 {
|
||||||
|
m |= syscall.FILE_NOTIFY_CHANGE_FILE_NAME | syscall.FILE_NOTIFY_CHANGE_DIR_NAME
|
||||||
|
}
|
||||||
|
return m
|
||||||
|
}
|
||||||
|
|
||||||
|
func toFSnotifyFlags(action uint32) uint64 {
|
||||||
|
switch action {
|
||||||
|
case syscall.FILE_ACTION_ADDED:
|
||||||
|
return sysFSCREATE
|
||||||
|
case syscall.FILE_ACTION_REMOVED:
|
||||||
|
return sysFSDELETE
|
||||||
|
case syscall.FILE_ACTION_MODIFIED:
|
||||||
|
return sysFSMODIFY
|
||||||
|
case syscall.FILE_ACTION_RENAMED_OLD_NAME:
|
||||||
|
return sysFSMOVEDFROM
|
||||||
|
case syscall.FILE_ACTION_RENAMED_NEW_NAME:
|
||||||
|
return sysFSMOVEDTO
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
354
vendor/github.com/hashicorp/hcl/LICENSE
generated
vendored
Normal file
354
vendor/github.com/hashicorp/hcl/LICENSE
generated
vendored
Normal file
@ -0,0 +1,354 @@
|
|||||||
|
Mozilla Public License, version 2.0
|
||||||
|
|
||||||
|
1. Definitions
|
||||||
|
|
||||||
|
1.1. “Contributor”
|
||||||
|
|
||||||
|
means each individual or legal entity that creates, contributes to the
|
||||||
|
creation of, or owns Covered Software.
|
||||||
|
|
||||||
|
1.2. “Contributor Version”
|
||||||
|
|
||||||
|
means the combination of the Contributions of others (if any) used by a
|
||||||
|
Contributor and that particular Contributor’s Contribution.
|
||||||
|
|
||||||
|
1.3. “Contribution”
|
||||||
|
|
||||||
|
means Covered Software of a particular Contributor.
|
||||||
|
|
||||||
|
1.4. “Covered Software”
|
||||||
|
|
||||||
|
means Source Code Form to which the initial Contributor has attached the
|
||||||
|
notice in Exhibit A, the Executable Form of such Source Code Form, and
|
||||||
|
Modifications of such Source Code Form, in each case including portions
|
||||||
|
thereof.
|
||||||
|
|
||||||
|
1.5. “Incompatible With Secondary Licenses”
|
||||||
|
means
|
||||||
|
|
||||||
|
a. that the initial Contributor has attached the notice described in
|
||||||
|
Exhibit B to the Covered Software; or
|
||||||
|
|
||||||
|
b. that the Covered Software was made available under the terms of version
|
||||||
|
1.1 or earlier of the License, but not also under the terms of a
|
||||||
|
Secondary License.
|
||||||
|
|
||||||
|
1.6. “Executable Form”
|
||||||
|
|
||||||
|
means any form of the work other than Source Code Form.
|
||||||
|
|
||||||
|
1.7. “Larger Work”
|
||||||
|
|
||||||
|
means a work that combines Covered Software with other material, in a separate
|
||||||
|
file or files, that is not Covered Software.
|
||||||
|
|
||||||
|
1.8. “License”
|
||||||
|
|
||||||
|
means this document.
|
||||||
|
|
||||||
|
1.9. “Licensable”
|
||||||
|
|
||||||
|
means having the right to grant, to the maximum extent possible, whether at the
|
||||||
|
time of the initial grant or subsequently, any and all of the rights conveyed by
|
||||||
|
this License.
|
||||||
|
|
||||||
|
1.10. “Modifications”
|
||||||
|
|
||||||
|
means any of the following:
|
||||||
|
|
||||||
|
a. any file in Source Code Form that results from an addition to, deletion
|
||||||
|
from, or modification of the contents of Covered Software; or
|
||||||
|
|
||||||
|
b. any new file in Source Code Form that contains any Covered Software.
|
||||||
|
|
||||||
|
1.11. “Patent Claims” of a Contributor
|
||||||
|
|
||||||
|
means any patent claim(s), including without limitation, method, process,
|
||||||
|
and apparatus claims, in any patent Licensable by such Contributor that
|
||||||
|
would be infringed, but for the grant of the License, by the making,
|
||||||
|
using, selling, offering for sale, having made, import, or transfer of
|
||||||
|
either its Contributions or its Contributor Version.
|
||||||
|
|
||||||
|
1.12. “Secondary License”
|
||||||
|
|
||||||
|
means either the GNU General Public License, Version 2.0, the GNU Lesser
|
||||||
|
General Public License, Version 2.1, the GNU Affero General Public
|
||||||
|
License, Version 3.0, or any later versions of those licenses.
|
||||||
|
|
||||||
|
1.13. “Source Code Form”
|
||||||
|
|
||||||
|
means the form of the work preferred for making modifications.
|
||||||
|
|
||||||
|
1.14. “You” (or “Your”)
|
||||||
|
|
||||||
|
means an individual or a legal entity exercising rights under this
|
||||||
|
License. For legal entities, “You” includes any entity that controls, is
|
||||||
|
controlled by, or is under common control with You. For purposes of this
|
||||||
|
definition, “control” means (a) the power, direct or indirect, to cause
|
||||||
|
the direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (b) ownership of more than fifty percent (50%) of the
|
||||||
|
outstanding shares or beneficial ownership of such entity.
|
||||||
|
|
||||||
|
|
||||||
|
2. License Grants and Conditions
|
||||||
|
|
||||||
|
2.1. Grants
|
||||||
|
|
||||||
|
Each Contributor hereby grants You a world-wide, royalty-free,
|
||||||
|
non-exclusive license:
|
||||||
|
|
||||||
|
a. under intellectual property rights (other than patent or trademark)
|
||||||
|
Licensable by such Contributor to use, reproduce, make available,
|
||||||
|
modify, display, perform, distribute, and otherwise exploit its
|
||||||
|
Contributions, either on an unmodified basis, with Modifications, or as
|
||||||
|
part of a Larger Work; and
|
||||||
|
|
||||||
|
b. under Patent Claims of such Contributor to make, use, sell, offer for
|
||||||
|
sale, have made, import, and otherwise transfer either its Contributions
|
||||||
|
or its Contributor Version.
|
||||||
|
|
||||||
|
2.2. Effective Date
|
||||||
|
|
||||||
|
The licenses granted in Section 2.1 with respect to any Contribution become
|
||||||
|
effective for each Contribution on the date the Contributor first distributes
|
||||||
|
such Contribution.
|
||||||
|
|
||||||
|
2.3. Limitations on Grant Scope
|
||||||
|
|
||||||
|
The licenses granted in this Section 2 are the only rights granted under this
|
||||||
|
License. No additional rights or licenses will be implied from the distribution
|
||||||
|
or licensing of Covered Software under this License. Notwithstanding Section
|
||||||
|
2.1(b) above, no patent license is granted by a Contributor:
|
||||||
|
|
||||||
|
a. for any code that a Contributor has removed from Covered Software; or
|
||||||
|
|
||||||
|
b. for infringements caused by: (i) Your and any other third party’s
|
||||||
|
modifications of Covered Software, or (ii) the combination of its
|
||||||
|
Contributions with other software (except as part of its Contributor
|
||||||
|
Version); or
|
||||||
|
|
||||||
|
c. under Patent Claims infringed by Covered Software in the absence of its
|
||||||
|
Contributions.
|
||||||
|
|
||||||
|
This License does not grant any rights in the trademarks, service marks, or
|
||||||
|
logos of any Contributor (except as may be necessary to comply with the
|
||||||
|
notice requirements in Section 3.4).
|
||||||
|
|
||||||
|
2.4. Subsequent Licenses
|
||||||
|
|
||||||
|
No Contributor makes additional grants as a result of Your choice to
|
||||||
|
distribute the Covered Software under a subsequent version of this License
|
||||||
|
(see Section 10.2) or under the terms of a Secondary License (if permitted
|
||||||
|
under the terms of Section 3.3).
|
||||||
|
|
||||||
|
2.5. Representation
|
||||||
|
|
||||||
|
Each Contributor represents that the Contributor believes its Contributions
|
||||||
|
are its original creation(s) or it has sufficient rights to grant the
|
||||||
|
rights to its Contributions conveyed by this License.
|
||||||
|
|
||||||
|
2.6. Fair Use
|
||||||
|
|
||||||
|
This License is not intended to limit any rights You have under applicable
|
||||||
|
copyright doctrines of fair use, fair dealing, or other equivalents.
|
||||||
|
|
||||||
|
2.7. Conditions
|
||||||
|
|
||||||
|
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
|
||||||
|
Section 2.1.
|
||||||
|
|
||||||
|
|
||||||
|
3. Responsibilities
|
||||||
|
|
||||||
|
3.1. Distribution of Source Form
|
||||||
|
|
||||||
|
All distribution of Covered Software in Source Code Form, including any
|
||||||
|
Modifications that You create or to which You contribute, must be under the
|
||||||
|
terms of this License. You must inform recipients that the Source Code Form
|
||||||
|
of the Covered Software is governed by the terms of this License, and how
|
||||||
|
they can obtain a copy of this License. You may not attempt to alter or
|
||||||
|
restrict the recipients’ rights in the Source Code Form.
|
||||||
|
|
||||||
|
3.2. Distribution of Executable Form
|
||||||
|
|
||||||
|
If You distribute Covered Software in Executable Form then:
|
||||||
|
|
||||||
|
a. such Covered Software must also be made available in Source Code Form,
|
||||||
|
as described in Section 3.1, and You must inform recipients of the
|
||||||
|
Executable Form how they can obtain a copy of such Source Code Form by
|
||||||
|
reasonable means in a timely manner, at a charge no more than the cost
|
||||||
|
of distribution to the recipient; and
|
||||||
|
|
||||||
|
b. You may distribute such Executable Form under the terms of this License,
|
||||||
|
or sublicense it under different terms, provided that the license for
|
||||||
|
the Executable Form does not attempt to limit or alter the recipients’
|
||||||
|
rights in the Source Code Form under this License.
|
||||||
|
|
||||||
|
3.3. Distribution of a Larger Work
|
||||||
|
|
||||||
|
You may create and distribute a Larger Work under terms of Your choice,
|
||||||
|
provided that You also comply with the requirements of this License for the
|
||||||
|
Covered Software. If the Larger Work is a combination of Covered Software
|
||||||
|
with a work governed by one or more Secondary Licenses, and the Covered
|
||||||
|
Software is not Incompatible With Secondary Licenses, this License permits
|
||||||
|
You to additionally distribute such Covered Software under the terms of
|
||||||
|
such Secondary License(s), so that the recipient of the Larger Work may, at
|
||||||
|
their option, further distribute the Covered Software under the terms of
|
||||||
|
either this License or such Secondary License(s).
|
||||||
|
|
||||||
|
3.4. Notices
|
||||||
|
|
||||||
|
You may not remove or alter the substance of any license notices (including
|
||||||
|
copyright notices, patent notices, disclaimers of warranty, or limitations
|
||||||
|
of liability) contained within the Source Code Form of the Covered
|
||||||
|
Software, except that You may alter any license notices to the extent
|
||||||
|
required to remedy known factual inaccuracies.
|
||||||
|
|
||||||
|
3.5. Application of Additional Terms
|
||||||
|
|
||||||
|
You may choose to offer, and to charge a fee for, warranty, support,
|
||||||
|
indemnity or liability obligations to one or more recipients of Covered
|
||||||
|
Software. However, You may do so only on Your own behalf, and not on behalf
|
||||||
|
of any Contributor. You must make it absolutely clear that any such
|
||||||
|
warranty, support, indemnity, or liability obligation is offered by You
|
||||||
|
alone, and You hereby agree to indemnify every Contributor for any
|
||||||
|
liability incurred by such Contributor as a result of warranty, support,
|
||||||
|
indemnity or liability terms You offer. You may include additional
|
||||||
|
disclaimers of warranty and limitations of liability specific to any
|
||||||
|
jurisdiction.
|
||||||
|
|
||||||
|
4. Inability to Comply Due to Statute or Regulation
|
||||||
|
|
||||||
|
If it is impossible for You to comply with any of the terms of this License
|
||||||
|
with respect to some or all of the Covered Software due to statute, judicial
|
||||||
|
order, or regulation then You must: (a) comply with the terms of this License
|
||||||
|
to the maximum extent possible; and (b) describe the limitations and the code
|
||||||
|
they affect. Such description must be placed in a text file included with all
|
||||||
|
distributions of the Covered Software under this License. Except to the
|
||||||
|
extent prohibited by statute or regulation, such description must be
|
||||||
|
sufficiently detailed for a recipient of ordinary skill to be able to
|
||||||
|
understand it.
|
||||||
|
|
||||||
|
5. Termination
|
||||||
|
|
||||||
|
5.1. The rights granted under this License will terminate automatically if You
|
||||||
|
fail to comply with any of its terms. However, if You become compliant,
|
||||||
|
then the rights granted under this License from a particular Contributor
|
||||||
|
are reinstated (a) provisionally, unless and until such Contributor
|
||||||
|
explicitly and finally terminates Your grants, and (b) on an ongoing basis,
|
||||||
|
if such Contributor fails to notify You of the non-compliance by some
|
||||||
|
reasonable means prior to 60 days after You have come back into compliance.
|
||||||
|
Moreover, Your grants from a particular Contributor are reinstated on an
|
||||||
|
ongoing basis if such Contributor notifies You of the non-compliance by
|
||||||
|
some reasonable means, this is the first time You have received notice of
|
||||||
|
non-compliance with this License from such Contributor, and You become
|
||||||
|
compliant prior to 30 days after Your receipt of the notice.
|
||||||
|
|
||||||
|
5.2. If You initiate litigation against any entity by asserting a patent
|
||||||
|
infringement claim (excluding declaratory judgment actions, counter-claims,
|
||||||
|
and cross-claims) alleging that a Contributor Version directly or
|
||||||
|
indirectly infringes any patent, then the rights granted to You by any and
|
||||||
|
all Contributors for the Covered Software under Section 2.1 of this License
|
||||||
|
shall terminate.
|
||||||
|
|
||||||
|
5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
|
||||||
|
license agreements (excluding distributors and resellers) which have been
|
||||||
|
validly granted by You or Your distributors under this License prior to
|
||||||
|
termination shall survive termination.
|
||||||
|
|
||||||
|
6. Disclaimer of Warranty
|
||||||
|
|
||||||
|
Covered Software is provided under this License on an “as is” basis, without
|
||||||
|
warranty of any kind, either expressed, implied, or statutory, including,
|
||||||
|
without limitation, warranties that the Covered Software is free of defects,
|
||||||
|
merchantable, fit for a particular purpose or non-infringing. The entire
|
||||||
|
risk as to the quality and performance of the Covered Software is with You.
|
||||||
|
Should any Covered Software prove defective in any respect, You (not any
|
||||||
|
Contributor) assume the cost of any necessary servicing, repair, or
|
||||||
|
correction. This disclaimer of warranty constitutes an essential part of this
|
||||||
|
License. No use of any Covered Software is authorized under this License
|
||||||
|
except under this disclaimer.
|
||||||
|
|
||||||
|
7. Limitation of Liability
|
||||||
|
|
||||||
|
Under no circumstances and under no legal theory, whether tort (including
|
||||||
|
negligence), contract, or otherwise, shall any Contributor, or anyone who
|
||||||
|
distributes Covered Software as permitted above, be liable to You for any
|
||||||
|
direct, indirect, special, incidental, or consequential damages of any
|
||||||
|
character including, without limitation, damages for lost profits, loss of
|
||||||
|
goodwill, work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses, even if such party shall have been
|
||||||
|
informed of the possibility of such damages. This limitation of liability
|
||||||
|
shall not apply to liability for death or personal injury resulting from such
|
||||||
|
party’s negligence to the extent applicable law prohibits such limitation.
|
||||||
|
Some jurisdictions do not allow the exclusion or limitation of incidental or
|
||||||
|
consequential damages, so this exclusion and limitation may not apply to You.
|
||||||
|
|
||||||
|
8. Litigation
|
||||||
|
|
||||||
|
Any litigation relating to this License may be brought only in the courts of
|
||||||
|
a jurisdiction where the defendant maintains its principal place of business
|
||||||
|
and such litigation shall be governed by laws of that jurisdiction, without
|
||||||
|
reference to its conflict-of-law provisions. Nothing in this Section shall
|
||||||
|
prevent a party’s ability to bring cross-claims or counter-claims.
|
||||||
|
|
||||||
|
9. Miscellaneous
|
||||||
|
|
||||||
|
This License represents the complete agreement concerning the subject matter
|
||||||
|
hereof. If any provision of this License is held to be unenforceable, such
|
||||||
|
provision shall be reformed only to the extent necessary to make it
|
||||||
|
enforceable. Any law or regulation which provides that the language of a
|
||||||
|
contract shall be construed against the drafter shall not be used to construe
|
||||||
|
this License against a Contributor.
|
||||||
|
|
||||||
|
|
||||||
|
10. Versions of the License
|
||||||
|
|
||||||
|
10.1. New Versions
|
||||||
|
|
||||||
|
Mozilla Foundation is the license steward. Except as provided in Section
|
||||||
|
10.3, no one other than the license steward has the right to modify or
|
||||||
|
publish new versions of this License. Each version will be given a
|
||||||
|
distinguishing version number.
|
||||||
|
|
||||||
|
10.2. Effect of New Versions
|
||||||
|
|
||||||
|
You may distribute the Covered Software under the terms of the version of
|
||||||
|
the License under which You originally received the Covered Software, or
|
||||||
|
under the terms of any subsequent version published by the license
|
||||||
|
steward.
|
||||||
|
|
||||||
|
10.3. Modified Versions
|
||||||
|
|
||||||
|
If you create software not governed by this License, and you want to
|
||||||
|
create a new license for such software, you may create and use a modified
|
||||||
|
version of this License if you rename the license and remove any
|
||||||
|
references to the name of the license steward (except to note that such
|
||||||
|
modified license differs from this License).
|
||||||
|
|
||||||
|
10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
|
||||||
|
If You choose to distribute Source Code Form that is Incompatible With
|
||||||
|
Secondary Licenses under the terms of this version of the License, the
|
||||||
|
notice described in Exhibit B of this License must be attached.
|
||||||
|
|
||||||
|
Exhibit A - Source Code Form License Notice
|
||||||
|
|
||||||
|
This Source Code Form is subject to the
|
||||||
|
terms of the Mozilla Public License, v.
|
||||||
|
2.0. If a copy of the MPL was not
|
||||||
|
distributed with this file, You can
|
||||||
|
obtain one at
|
||||||
|
http://mozilla.org/MPL/2.0/.
|
||||||
|
|
||||||
|
If it is not possible or desirable to put the notice in a particular file, then
|
||||||
|
You may include the notice in a location (such as a LICENSE file in a relevant
|
||||||
|
directory) where a recipient would be likely to look for such a notice.
|
||||||
|
|
||||||
|
You may add additional accurate notices of copyright ownership.
|
||||||
|
|
||||||
|
Exhibit B - “Incompatible With Secondary Licenses” Notice
|
||||||
|
|
||||||
|
This Source Code Form is “Incompatible
|
||||||
|
With Secondary Licenses”, as defined by
|
||||||
|
the Mozilla Public License, v. 2.0.
|
||||||
|
|
729
vendor/github.com/hashicorp/hcl/decoder.go
generated
vendored
Normal file
729
vendor/github.com/hashicorp/hcl/decoder.go
generated
vendored
Normal file
@ -0,0 +1,729 @@
|
|||||||
|
package hcl
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
"sort"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/hashicorp/hcl/hcl/ast"
|
||||||
|
"github.com/hashicorp/hcl/hcl/parser"
|
||||||
|
"github.com/hashicorp/hcl/hcl/token"
|
||||||
|
)
|
||||||
|
|
||||||
|
// This is the tag to use with structures to have settings for HCL
|
||||||
|
const tagName = "hcl"
|
||||||
|
|
||||||
|
var (
|
||||||
|
// nodeType holds a reference to the type of ast.Node
|
||||||
|
nodeType reflect.Type = findNodeType()
|
||||||
|
)
|
||||||
|
|
||||||
|
// Unmarshal accepts a byte slice as input and writes the
|
||||||
|
// data to the value pointed to by v.
|
||||||
|
func Unmarshal(bs []byte, v interface{}) error {
|
||||||
|
root, err := parse(bs)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return DecodeObject(v, root)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decode reads the given input and decodes it into the structure
|
||||||
|
// given by `out`.
|
||||||
|
func Decode(out interface{}, in string) error {
|
||||||
|
obj, err := Parse(in)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return DecodeObject(out, obj)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DecodeObject is a lower-level version of Decode. It decodes a
|
||||||
|
// raw Object into the given output.
|
||||||
|
func DecodeObject(out interface{}, n ast.Node) error {
|
||||||
|
val := reflect.ValueOf(out)
|
||||||
|
if val.Kind() != reflect.Ptr {
|
||||||
|
return errors.New("result must be a pointer")
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we have the file, we really decode the root node
|
||||||
|
if f, ok := n.(*ast.File); ok {
|
||||||
|
n = f.Node
|
||||||
|
}
|
||||||
|
|
||||||
|
var d decoder
|
||||||
|
return d.decode("root", n, val.Elem())
|
||||||
|
}
|
||||||
|
|
||||||
|
type decoder struct {
|
||||||
|
stack []reflect.Kind
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *decoder) decode(name string, node ast.Node, result reflect.Value) error {
|
||||||
|
k := result
|
||||||
|
|
||||||
|
// If we have an interface with a valid value, we use that
|
||||||
|
// for the check.
|
||||||
|
if result.Kind() == reflect.Interface {
|
||||||
|
elem := result.Elem()
|
||||||
|
if elem.IsValid() {
|
||||||
|
k = elem
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Push current onto stack unless it is an interface.
|
||||||
|
if k.Kind() != reflect.Interface {
|
||||||
|
d.stack = append(d.stack, k.Kind())
|
||||||
|
|
||||||
|
// Schedule a pop
|
||||||
|
defer func() {
|
||||||
|
d.stack = d.stack[:len(d.stack)-1]
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
switch k.Kind() {
|
||||||
|
case reflect.Bool:
|
||||||
|
return d.decodeBool(name, node, result)
|
||||||
|
case reflect.Float32, reflect.Float64:
|
||||||
|
return d.decodeFloat(name, node, result)
|
||||||
|
case reflect.Int, reflect.Int32, reflect.Int64:
|
||||||
|
return d.decodeInt(name, node, result)
|
||||||
|
case reflect.Interface:
|
||||||
|
// When we see an interface, we make our own thing
|
||||||
|
return d.decodeInterface(name, node, result)
|
||||||
|
case reflect.Map:
|
||||||
|
return d.decodeMap(name, node, result)
|
||||||
|
case reflect.Ptr:
|
||||||
|
return d.decodePtr(name, node, result)
|
||||||
|
case reflect.Slice:
|
||||||
|
return d.decodeSlice(name, node, result)
|
||||||
|
case reflect.String:
|
||||||
|
return d.decodeString(name, node, result)
|
||||||
|
case reflect.Struct:
|
||||||
|
return d.decodeStruct(name, node, result)
|
||||||
|
default:
|
||||||
|
return &parser.PosError{
|
||||||
|
Pos: node.Pos(),
|
||||||
|
Err: fmt.Errorf("%s: unknown kind to decode into: %s", name, k.Kind()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *decoder) decodeBool(name string, node ast.Node, result reflect.Value) error {
|
||||||
|
switch n := node.(type) {
|
||||||
|
case *ast.LiteralType:
|
||||||
|
if n.Token.Type == token.BOOL {
|
||||||
|
v, err := strconv.ParseBool(n.Token.Text)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
result.Set(reflect.ValueOf(v))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return &parser.PosError{
|
||||||
|
Pos: node.Pos(),
|
||||||
|
Err: fmt.Errorf("%s: unknown type %T", name, node),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *decoder) decodeFloat(name string, node ast.Node, result reflect.Value) error {
|
||||||
|
switch n := node.(type) {
|
||||||
|
case *ast.LiteralType:
|
||||||
|
if n.Token.Type == token.FLOAT || n.Token.Type == token.NUMBER {
|
||||||
|
v, err := strconv.ParseFloat(n.Token.Text, 64)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
result.Set(reflect.ValueOf(v).Convert(result.Type()))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return &parser.PosError{
|
||||||
|
Pos: node.Pos(),
|
||||||
|
Err: fmt.Errorf("%s: unknown type %T", name, node),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *decoder) decodeInt(name string, node ast.Node, result reflect.Value) error {
|
||||||
|
switch n := node.(type) {
|
||||||
|
case *ast.LiteralType:
|
||||||
|
switch n.Token.Type {
|
||||||
|
case token.NUMBER:
|
||||||
|
v, err := strconv.ParseInt(n.Token.Text, 0, 0)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if result.Kind() == reflect.Interface {
|
||||||
|
result.Set(reflect.ValueOf(int(v)))
|
||||||
|
} else {
|
||||||
|
result.SetInt(v)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
case token.STRING:
|
||||||
|
v, err := strconv.ParseInt(n.Token.Value().(string), 0, 0)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if result.Kind() == reflect.Interface {
|
||||||
|
result.Set(reflect.ValueOf(int(v)))
|
||||||
|
} else {
|
||||||
|
result.SetInt(v)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return &parser.PosError{
|
||||||
|
Pos: node.Pos(),
|
||||||
|
Err: fmt.Errorf("%s: unknown type %T", name, node),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *decoder) decodeInterface(name string, node ast.Node, result reflect.Value) error {
|
||||||
|
// When we see an ast.Node, we retain the value to enable deferred decoding.
|
||||||
|
// Very useful in situations where we want to preserve ast.Node information
|
||||||
|
// like Pos
|
||||||
|
if result.Type() == nodeType && result.CanSet() {
|
||||||
|
result.Set(reflect.ValueOf(node))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var set reflect.Value
|
||||||
|
redecode := true
|
||||||
|
|
||||||
|
// For testing types, ObjectType should just be treated as a list. We
|
||||||
|
// set this to a temporary var because we want to pass in the real node.
|
||||||
|
testNode := node
|
||||||
|
if ot, ok := node.(*ast.ObjectType); ok {
|
||||||
|
testNode = ot.List
|
||||||
|
}
|
||||||
|
|
||||||
|
switch n := testNode.(type) {
|
||||||
|
case *ast.ObjectList:
|
||||||
|
// If we're at the root or we're directly within a slice, then we
|
||||||
|
// decode objects into map[string]interface{}, otherwise we decode
|
||||||
|
// them into lists.
|
||||||
|
if len(d.stack) == 0 || d.stack[len(d.stack)-1] == reflect.Slice {
|
||||||
|
var temp map[string]interface{}
|
||||||
|
tempVal := reflect.ValueOf(temp)
|
||||||
|
result := reflect.MakeMap(
|
||||||
|
reflect.MapOf(
|
||||||
|
reflect.TypeOf(""),
|
||||||
|
tempVal.Type().Elem()))
|
||||||
|
|
||||||
|
set = result
|
||||||
|
} else {
|
||||||
|
var temp []map[string]interface{}
|
||||||
|
tempVal := reflect.ValueOf(temp)
|
||||||
|
result := reflect.MakeSlice(
|
||||||
|
reflect.SliceOf(tempVal.Type().Elem()), 0, len(n.Items))
|
||||||
|
set = result
|
||||||
|
}
|
||||||
|
case *ast.ObjectType:
|
||||||
|
// If we're at the root or we're directly within a slice, then we
|
||||||
|
// decode objects into map[string]interface{}, otherwise we decode
|
||||||
|
// them into lists.
|
||||||
|
if len(d.stack) == 0 || d.stack[len(d.stack)-1] == reflect.Slice {
|
||||||
|
var temp map[string]interface{}
|
||||||
|
tempVal := reflect.ValueOf(temp)
|
||||||
|
result := reflect.MakeMap(
|
||||||
|
reflect.MapOf(
|
||||||
|
reflect.TypeOf(""),
|
||||||
|
tempVal.Type().Elem()))
|
||||||
|
|
||||||
|
set = result
|
||||||
|
} else {
|
||||||
|
var temp []map[string]interface{}
|
||||||
|
tempVal := reflect.ValueOf(temp)
|
||||||
|
result := reflect.MakeSlice(
|
||||||
|
reflect.SliceOf(tempVal.Type().Elem()), 0, 1)
|
||||||
|
set = result
|
||||||
|
}
|
||||||
|
case *ast.ListType:
|
||||||
|
var temp []interface{}
|
||||||
|
tempVal := reflect.ValueOf(temp)
|
||||||
|
result := reflect.MakeSlice(
|
||||||
|
reflect.SliceOf(tempVal.Type().Elem()), 0, 0)
|
||||||
|
set = result
|
||||||
|
case *ast.LiteralType:
|
||||||
|
switch n.Token.Type {
|
||||||
|
case token.BOOL:
|
||||||
|
var result bool
|
||||||
|
set = reflect.Indirect(reflect.New(reflect.TypeOf(result)))
|
||||||
|
case token.FLOAT:
|
||||||
|
var result float64
|
||||||
|
set = reflect.Indirect(reflect.New(reflect.TypeOf(result)))
|
||||||
|
case token.NUMBER:
|
||||||
|
var result int
|
||||||
|
set = reflect.Indirect(reflect.New(reflect.TypeOf(result)))
|
||||||
|
case token.STRING, token.HEREDOC:
|
||||||
|
set = reflect.Indirect(reflect.New(reflect.TypeOf("")))
|
||||||
|
default:
|
||||||
|
return &parser.PosError{
|
||||||
|
Pos: node.Pos(),
|
||||||
|
Err: fmt.Errorf("%s: cannot decode into interface: %T", name, node),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return fmt.Errorf(
|
||||||
|
"%s: cannot decode into interface: %T",
|
||||||
|
name, node)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set the result to what its supposed to be, then reset
|
||||||
|
// result so we don't reflect into this method anymore.
|
||||||
|
result.Set(set)
|
||||||
|
|
||||||
|
if redecode {
|
||||||
|
// Revisit the node so that we can use the newly instantiated
|
||||||
|
// thing and populate it.
|
||||||
|
if err := d.decode(name, node, result); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *decoder) decodeMap(name string, node ast.Node, result reflect.Value) error {
|
||||||
|
if item, ok := node.(*ast.ObjectItem); ok {
|
||||||
|
node = &ast.ObjectList{Items: []*ast.ObjectItem{item}}
|
||||||
|
}
|
||||||
|
|
||||||
|
if ot, ok := node.(*ast.ObjectType); ok {
|
||||||
|
node = ot.List
|
||||||
|
}
|
||||||
|
|
||||||
|
n, ok := node.(*ast.ObjectList)
|
||||||
|
if !ok {
|
||||||
|
return &parser.PosError{
|
||||||
|
Pos: node.Pos(),
|
||||||
|
Err: fmt.Errorf("%s: not an object type for map (%T)", name, node),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we have an interface, then we can address the interface,
|
||||||
|
// but not the slice itself, so get the element but set the interface
|
||||||
|
set := result
|
||||||
|
if result.Kind() == reflect.Interface {
|
||||||
|
result = result.Elem()
|
||||||
|
}
|
||||||
|
|
||||||
|
resultType := result.Type()
|
||||||
|
resultElemType := resultType.Elem()
|
||||||
|
resultKeyType := resultType.Key()
|
||||||
|
if resultKeyType.Kind() != reflect.String {
|
||||||
|
return &parser.PosError{
|
||||||
|
Pos: node.Pos(),
|
||||||
|
Err: fmt.Errorf("%s: map must have string keys", name),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make a map if it is nil
|
||||||
|
resultMap := result
|
||||||
|
if result.IsNil() {
|
||||||
|
resultMap = reflect.MakeMap(
|
||||||
|
reflect.MapOf(resultKeyType, resultElemType))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Go through each element and decode it.
|
||||||
|
done := make(map[string]struct{})
|
||||||
|
for _, item := range n.Items {
|
||||||
|
if item.Val == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// github.com/hashicorp/terraform/issue/5740
|
||||||
|
if len(item.Keys) == 0 {
|
||||||
|
return &parser.PosError{
|
||||||
|
Pos: node.Pos(),
|
||||||
|
Err: fmt.Errorf("%s: map must have string keys", name),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get the key we're dealing with, which is the first item
|
||||||
|
keyStr := item.Keys[0].Token.Value().(string)
|
||||||
|
|
||||||
|
// If we've already processed this key, then ignore it
|
||||||
|
if _, ok := done[keyStr]; ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Determine the value. If we have more than one key, then we
|
||||||
|
// get the objectlist of only these keys.
|
||||||
|
itemVal := item.Val
|
||||||
|
if len(item.Keys) > 1 {
|
||||||
|
itemVal = n.Filter(keyStr)
|
||||||
|
done[keyStr] = struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make the field name
|
||||||
|
fieldName := fmt.Sprintf("%s.%s", name, keyStr)
|
||||||
|
|
||||||
|
// Get the key/value as reflection values
|
||||||
|
key := reflect.ValueOf(keyStr)
|
||||||
|
val := reflect.Indirect(reflect.New(resultElemType))
|
||||||
|
|
||||||
|
// If we have a pre-existing value in the map, use that
|
||||||
|
oldVal := resultMap.MapIndex(key)
|
||||||
|
if oldVal.IsValid() {
|
||||||
|
val.Set(oldVal)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decode!
|
||||||
|
if err := d.decode(fieldName, itemVal, val); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set the value on the map
|
||||||
|
resultMap.SetMapIndex(key, val)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set the final map if we can
|
||||||
|
set.Set(resultMap)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *decoder) decodePtr(name string, node ast.Node, result reflect.Value) error {
|
||||||
|
// Create an element of the concrete (non pointer) type and decode
|
||||||
|
// into that. Then set the value of the pointer to this type.
|
||||||
|
resultType := result.Type()
|
||||||
|
resultElemType := resultType.Elem()
|
||||||
|
val := reflect.New(resultElemType)
|
||||||
|
if err := d.decode(name, node, reflect.Indirect(val)); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
result.Set(val)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *decoder) decodeSlice(name string, node ast.Node, result reflect.Value) error {
|
||||||
|
// If we have an interface, then we can address the interface,
|
||||||
|
// but not the slice itself, so get the element but set the interface
|
||||||
|
set := result
|
||||||
|
if result.Kind() == reflect.Interface {
|
||||||
|
result = result.Elem()
|
||||||
|
}
|
||||||
|
// Create the slice if it isn't nil
|
||||||
|
resultType := result.Type()
|
||||||
|
resultElemType := resultType.Elem()
|
||||||
|
if result.IsNil() {
|
||||||
|
resultSliceType := reflect.SliceOf(resultElemType)
|
||||||
|
result = reflect.MakeSlice(
|
||||||
|
resultSliceType, 0, 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Figure out the items we'll be copying into the slice
|
||||||
|
var items []ast.Node
|
||||||
|
switch n := node.(type) {
|
||||||
|
case *ast.ObjectList:
|
||||||
|
items = make([]ast.Node, len(n.Items))
|
||||||
|
for i, item := range n.Items {
|
||||||
|
items[i] = item
|
||||||
|
}
|
||||||
|
case *ast.ObjectType:
|
||||||
|
items = []ast.Node{n}
|
||||||
|
case *ast.ListType:
|
||||||
|
items = n.List
|
||||||
|
default:
|
||||||
|
return &parser.PosError{
|
||||||
|
Pos: node.Pos(),
|
||||||
|
Err: fmt.Errorf("unknown slice type: %T", node),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, item := range items {
|
||||||
|
fieldName := fmt.Sprintf("%s[%d]", name, i)
|
||||||
|
|
||||||
|
// Decode
|
||||||
|
val := reflect.Indirect(reflect.New(resultElemType))
|
||||||
|
|
||||||
|
// if item is an object that was decoded from ambiguous JSON and
|
||||||
|
// flattened, make sure it's expanded if it needs to decode into a
|
||||||
|
// defined structure.
|
||||||
|
item := expandObject(item, val)
|
||||||
|
|
||||||
|
if err := d.decode(fieldName, item, val); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Append it onto the slice
|
||||||
|
result = reflect.Append(result, val)
|
||||||
|
}
|
||||||
|
|
||||||
|
set.Set(result)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// expandObject detects if an ambiguous JSON object was flattened to a List which
|
||||||
|
// should be decoded into a struct, and expands the ast to properly deocode.
|
||||||
|
func expandObject(node ast.Node, result reflect.Value) ast.Node {
|
||||||
|
item, ok := node.(*ast.ObjectItem)
|
||||||
|
if !ok {
|
||||||
|
return node
|
||||||
|
}
|
||||||
|
|
||||||
|
elemType := result.Type()
|
||||||
|
|
||||||
|
// our target type must be a struct
|
||||||
|
switch elemType.Kind() {
|
||||||
|
case reflect.Ptr:
|
||||||
|
switch elemType.Elem().Kind() {
|
||||||
|
case reflect.Struct:
|
||||||
|
//OK
|
||||||
|
default:
|
||||||
|
return node
|
||||||
|
}
|
||||||
|
case reflect.Struct:
|
||||||
|
//OK
|
||||||
|
default:
|
||||||
|
return node
|
||||||
|
}
|
||||||
|
|
||||||
|
// A list value will have a key and field name. If it had more fields,
|
||||||
|
// it wouldn't have been flattened.
|
||||||
|
if len(item.Keys) != 2 {
|
||||||
|
return node
|
||||||
|
}
|
||||||
|
|
||||||
|
keyToken := item.Keys[0].Token
|
||||||
|
item.Keys = item.Keys[1:]
|
||||||
|
|
||||||
|
// we need to un-flatten the ast enough to decode
|
||||||
|
newNode := &ast.ObjectItem{
|
||||||
|
Keys: []*ast.ObjectKey{
|
||||||
|
&ast.ObjectKey{
|
||||||
|
Token: keyToken,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Val: &ast.ObjectType{
|
||||||
|
List: &ast.ObjectList{
|
||||||
|
Items: []*ast.ObjectItem{item},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
return newNode
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *decoder) decodeString(name string, node ast.Node, result reflect.Value) error {
|
||||||
|
switch n := node.(type) {
|
||||||
|
case *ast.LiteralType:
|
||||||
|
switch n.Token.Type {
|
||||||
|
case token.NUMBER:
|
||||||
|
result.Set(reflect.ValueOf(n.Token.Text).Convert(result.Type()))
|
||||||
|
return nil
|
||||||
|
case token.STRING, token.HEREDOC:
|
||||||
|
result.Set(reflect.ValueOf(n.Token.Value()).Convert(result.Type()))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return &parser.PosError{
|
||||||
|
Pos: node.Pos(),
|
||||||
|
Err: fmt.Errorf("%s: unknown type for string %T", name, node),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *decoder) decodeStruct(name string, node ast.Node, result reflect.Value) error {
|
||||||
|
var item *ast.ObjectItem
|
||||||
|
if it, ok := node.(*ast.ObjectItem); ok {
|
||||||
|
item = it
|
||||||
|
node = it.Val
|
||||||
|
}
|
||||||
|
|
||||||
|
if ot, ok := node.(*ast.ObjectType); ok {
|
||||||
|
node = ot.List
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle the special case where the object itself is a literal. Previously
|
||||||
|
// the yacc parser would always ensure top-level elements were arrays. The new
|
||||||
|
// parser does not make the same guarantees, thus we need to convert any
|
||||||
|
// top-level literal elements into a list.
|
||||||
|
if _, ok := node.(*ast.LiteralType); ok && item != nil {
|
||||||
|
node = &ast.ObjectList{Items: []*ast.ObjectItem{item}}
|
||||||
|
}
|
||||||
|
|
||||||
|
list, ok := node.(*ast.ObjectList)
|
||||||
|
if !ok {
|
||||||
|
return &parser.PosError{
|
||||||
|
Pos: node.Pos(),
|
||||||
|
Err: fmt.Errorf("%s: not an object type for struct (%T)", name, node),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// This slice will keep track of all the structs we'll be decoding.
|
||||||
|
// There can be more than one struct if there are embedded structs
|
||||||
|
// that are squashed.
|
||||||
|
structs := make([]reflect.Value, 1, 5)
|
||||||
|
structs[0] = result
|
||||||
|
|
||||||
|
// Compile the list of all the fields that we're going to be decoding
|
||||||
|
// from all the structs.
|
||||||
|
type field struct {
|
||||||
|
field reflect.StructField
|
||||||
|
val reflect.Value
|
||||||
|
}
|
||||||
|
fields := []field{}
|
||||||
|
for len(structs) > 0 {
|
||||||
|
structVal := structs[0]
|
||||||
|
structs = structs[1:]
|
||||||
|
|
||||||
|
structType := structVal.Type()
|
||||||
|
for i := 0; i < structType.NumField(); i++ {
|
||||||
|
fieldType := structType.Field(i)
|
||||||
|
tagParts := strings.Split(fieldType.Tag.Get(tagName), ",")
|
||||||
|
|
||||||
|
// Ignore fields with tag name "-"
|
||||||
|
if tagParts[0] == "-" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if fieldType.Anonymous {
|
||||||
|
fieldKind := fieldType.Type.Kind()
|
||||||
|
if fieldKind != reflect.Struct {
|
||||||
|
return &parser.PosError{
|
||||||
|
Pos: node.Pos(),
|
||||||
|
Err: fmt.Errorf("%s: unsupported type to struct: %s",
|
||||||
|
fieldType.Name, fieldKind),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// We have an embedded field. We "squash" the fields down
|
||||||
|
// if specified in the tag.
|
||||||
|
squash := false
|
||||||
|
for _, tag := range tagParts[1:] {
|
||||||
|
if tag == "squash" {
|
||||||
|
squash = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if squash {
|
||||||
|
structs = append(
|
||||||
|
structs, result.FieldByName(fieldType.Name))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Normal struct field, store it away
|
||||||
|
fields = append(fields, field{fieldType, structVal.Field(i)})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
usedKeys := make(map[string]struct{})
|
||||||
|
decodedFields := make([]string, 0, len(fields))
|
||||||
|
decodedFieldsVal := make([]reflect.Value, 0)
|
||||||
|
unusedKeysVal := make([]reflect.Value, 0)
|
||||||
|
for _, f := range fields {
|
||||||
|
field, fieldValue := f.field, f.val
|
||||||
|
if !fieldValue.IsValid() {
|
||||||
|
// This should never happen
|
||||||
|
panic("field is not valid")
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we can't set the field, then it is unexported or something,
|
||||||
|
// and we just continue onwards.
|
||||||
|
if !fieldValue.CanSet() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
fieldName := field.Name
|
||||||
|
|
||||||
|
tagValue := field.Tag.Get(tagName)
|
||||||
|
tagParts := strings.SplitN(tagValue, ",", 2)
|
||||||
|
if len(tagParts) >= 2 {
|
||||||
|
switch tagParts[1] {
|
||||||
|
case "decodedFields":
|
||||||
|
decodedFieldsVal = append(decodedFieldsVal, fieldValue)
|
||||||
|
continue
|
||||||
|
case "key":
|
||||||
|
if item == nil {
|
||||||
|
return &parser.PosError{
|
||||||
|
Pos: node.Pos(),
|
||||||
|
Err: fmt.Errorf("%s: %s asked for 'key', impossible",
|
||||||
|
name, fieldName),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fieldValue.SetString(item.Keys[0].Token.Value().(string))
|
||||||
|
continue
|
||||||
|
case "unusedKeys":
|
||||||
|
unusedKeysVal = append(unusedKeysVal, fieldValue)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if tagParts[0] != "" {
|
||||||
|
fieldName = tagParts[0]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Determine the element we'll use to decode. If it is a single
|
||||||
|
// match (only object with the field), then we decode it exactly.
|
||||||
|
// If it is a prefix match, then we decode the matches.
|
||||||
|
filter := list.Filter(fieldName)
|
||||||
|
|
||||||
|
prefixMatches := filter.Children()
|
||||||
|
matches := filter.Elem()
|
||||||
|
if len(matches.Items) == 0 && len(prefixMatches.Items) == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Track the used key
|
||||||
|
usedKeys[fieldName] = struct{}{}
|
||||||
|
|
||||||
|
// Create the field name and decode. We range over the elements
|
||||||
|
// because we actually want the value.
|
||||||
|
fieldName = fmt.Sprintf("%s.%s", name, fieldName)
|
||||||
|
if len(prefixMatches.Items) > 0 {
|
||||||
|
if err := d.decode(fieldName, prefixMatches, fieldValue); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, match := range matches.Items {
|
||||||
|
var decodeNode ast.Node = match.Val
|
||||||
|
if ot, ok := decodeNode.(*ast.ObjectType); ok {
|
||||||
|
decodeNode = &ast.ObjectList{Items: ot.List.Items}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := d.decode(fieldName, decodeNode, fieldValue); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
decodedFields = append(decodedFields, field.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(decodedFieldsVal) > 0 {
|
||||||
|
// Sort it so that it is deterministic
|
||||||
|
sort.Strings(decodedFields)
|
||||||
|
|
||||||
|
for _, v := range decodedFieldsVal {
|
||||||
|
v.Set(reflect.ValueOf(decodedFields))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// findNodeType returns the type of ast.Node
|
||||||
|
func findNodeType() reflect.Type {
|
||||||
|
var nodeContainer struct {
|
||||||
|
Node ast.Node
|
||||||
|
}
|
||||||
|
value := reflect.ValueOf(nodeContainer).FieldByName("Node")
|
||||||
|
return value.Type()
|
||||||
|
}
|
11
vendor/github.com/hashicorp/hcl/hcl.go
generated
vendored
Normal file
11
vendor/github.com/hashicorp/hcl/hcl.go
generated
vendored
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
// Package hcl decodes HCL into usable Go structures.
|
||||||
|
//
|
||||||
|
// hcl input can come in either pure HCL format or JSON format.
|
||||||
|
// It can be parsed into an AST, and then decoded into a structure,
|
||||||
|
// or it can be decoded directly from a string into a structure.
|
||||||
|
//
|
||||||
|
// If you choose to parse HCL into a raw AST, the benefit is that you
|
||||||
|
// can write custom visitor implementations to implement custom
|
||||||
|
// semantic checks. By default, HCL does not perform any semantic
|
||||||
|
// checks.
|
||||||
|
package hcl
|
219
vendor/github.com/hashicorp/hcl/hcl/ast/ast.go
generated
vendored
Normal file
219
vendor/github.com/hashicorp/hcl/hcl/ast/ast.go
generated
vendored
Normal file
@ -0,0 +1,219 @@
|
|||||||
|
// Package ast declares the types used to represent syntax trees for HCL
|
||||||
|
// (HashiCorp Configuration Language)
|
||||||
|
package ast
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/hashicorp/hcl/hcl/token"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Node is an element in the abstract syntax tree.
|
||||||
|
type Node interface {
|
||||||
|
node()
|
||||||
|
Pos() token.Pos
|
||||||
|
}
|
||||||
|
|
||||||
|
func (File) node() {}
|
||||||
|
func (ObjectList) node() {}
|
||||||
|
func (ObjectKey) node() {}
|
||||||
|
func (ObjectItem) node() {}
|
||||||
|
func (Comment) node() {}
|
||||||
|
func (CommentGroup) node() {}
|
||||||
|
func (ObjectType) node() {}
|
||||||
|
func (LiteralType) node() {}
|
||||||
|
func (ListType) node() {}
|
||||||
|
|
||||||
|
// File represents a single HCL file
|
||||||
|
type File struct {
|
||||||
|
Node Node // usually a *ObjectList
|
||||||
|
Comments []*CommentGroup // list of all comments in the source
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *File) Pos() token.Pos {
|
||||||
|
return f.Node.Pos()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ObjectList represents a list of ObjectItems. An HCL file itself is an
|
||||||
|
// ObjectList.
|
||||||
|
type ObjectList struct {
|
||||||
|
Items []*ObjectItem
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o *ObjectList) Add(item *ObjectItem) {
|
||||||
|
o.Items = append(o.Items, item)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Filter filters out the objects with the given key list as a prefix.
|
||||||
|
//
|
||||||
|
// The returned list of objects contain ObjectItems where the keys have
|
||||||
|
// this prefix already stripped off. This might result in objects with
|
||||||
|
// zero-length key lists if they have no children.
|
||||||
|
//
|
||||||
|
// If no matches are found, an empty ObjectList (non-nil) is returned.
|
||||||
|
func (o *ObjectList) Filter(keys ...string) *ObjectList {
|
||||||
|
var result ObjectList
|
||||||
|
for _, item := range o.Items {
|
||||||
|
// If there aren't enough keys, then ignore this
|
||||||
|
if len(item.Keys) < len(keys) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
match := true
|
||||||
|
for i, key := range item.Keys[:len(keys)] {
|
||||||
|
key := key.Token.Value().(string)
|
||||||
|
if key != keys[i] && !strings.EqualFold(key, keys[i]) {
|
||||||
|
match = false
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !match {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Strip off the prefix from the children
|
||||||
|
newItem := *item
|
||||||
|
newItem.Keys = newItem.Keys[len(keys):]
|
||||||
|
result.Add(&newItem)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &result
|
||||||
|
}
|
||||||
|
|
||||||
|
// Children returns further nested objects (key length > 0) within this
|
||||||
|
// ObjectList. This should be used with Filter to get at child items.
|
||||||
|
func (o *ObjectList) Children() *ObjectList {
|
||||||
|
var result ObjectList
|
||||||
|
for _, item := range o.Items {
|
||||||
|
if len(item.Keys) > 0 {
|
||||||
|
result.Add(item)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return &result
|
||||||
|
}
|
||||||
|
|
||||||
|
// Elem returns items in the list that are direct element assignments
|
||||||
|
// (key length == 0). This should be used with Filter to get at elements.
|
||||||
|
func (o *ObjectList) Elem() *ObjectList {
|
||||||
|
var result ObjectList
|
||||||
|
for _, item := range o.Items {
|
||||||
|
if len(item.Keys) == 0 {
|
||||||
|
result.Add(item)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return &result
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o *ObjectList) Pos() token.Pos {
|
||||||
|
// always returns the uninitiliazed position
|
||||||
|
return o.Items[0].Pos()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ObjectItem represents a HCL Object Item. An item is represented with a key
|
||||||
|
// (or keys). It can be an assignment or an object (both normal and nested)
|
||||||
|
type ObjectItem struct {
|
||||||
|
// keys is only one length long if it's of type assignment. If it's a
|
||||||
|
// nested object it can be larger than one. In that case "assign" is
|
||||||
|
// invalid as there is no assignments for a nested object.
|
||||||
|
Keys []*ObjectKey
|
||||||
|
|
||||||
|
// assign contains the position of "=", if any
|
||||||
|
Assign token.Pos
|
||||||
|
|
||||||
|
// val is the item itself. It can be an object,list, number, bool or a
|
||||||
|
// string. If key length is larger than one, val can be only of type
|
||||||
|
// Object.
|
||||||
|
Val Node
|
||||||
|
|
||||||
|
LeadComment *CommentGroup // associated lead comment
|
||||||
|
LineComment *CommentGroup // associated line comment
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o *ObjectItem) Pos() token.Pos {
|
||||||
|
// I'm not entirely sure what causes this, but removing this causes
|
||||||
|
// a test failure. We should investigate at some point.
|
||||||
|
if len(o.Keys) == 0 {
|
||||||
|
return token.Pos{}
|
||||||
|
}
|
||||||
|
|
||||||
|
return o.Keys[0].Pos()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ObjectKeys are either an identifier or of type string.
|
||||||
|
type ObjectKey struct {
|
||||||
|
Token token.Token
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o *ObjectKey) Pos() token.Pos {
|
||||||
|
return o.Token.Pos
|
||||||
|
}
|
||||||
|
|
||||||
|
// LiteralType represents a literal of basic type. Valid types are:
|
||||||
|
// token.NUMBER, token.FLOAT, token.BOOL and token.STRING
|
||||||
|
type LiteralType struct {
|
||||||
|
Token token.Token
|
||||||
|
|
||||||
|
// comment types, only used when in a list
|
||||||
|
LeadComment *CommentGroup
|
||||||
|
LineComment *CommentGroup
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *LiteralType) Pos() token.Pos {
|
||||||
|
return l.Token.Pos
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListStatement represents a HCL List type
|
||||||
|
type ListType struct {
|
||||||
|
Lbrack token.Pos // position of "["
|
||||||
|
Rbrack token.Pos // position of "]"
|
||||||
|
List []Node // the elements in lexical order
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *ListType) Pos() token.Pos {
|
||||||
|
return l.Lbrack
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *ListType) Add(node Node) {
|
||||||
|
l.List = append(l.List, node)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ObjectType represents a HCL Object Type
|
||||||
|
type ObjectType struct {
|
||||||
|
Lbrace token.Pos // position of "{"
|
||||||
|
Rbrace token.Pos // position of "}"
|
||||||
|
List *ObjectList // the nodes in lexical order
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o *ObjectType) Pos() token.Pos {
|
||||||
|
return o.Lbrace
|
||||||
|
}
|
||||||
|
|
||||||
|
// Comment node represents a single //, # style or /*- style commment
|
||||||
|
type Comment struct {
|
||||||
|
Start token.Pos // position of / or #
|
||||||
|
Text string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Comment) Pos() token.Pos {
|
||||||
|
return c.Start
|
||||||
|
}
|
||||||
|
|
||||||
|
// CommentGroup node represents a sequence of comments with no other tokens and
|
||||||
|
// no empty lines between.
|
||||||
|
type CommentGroup struct {
|
||||||
|
List []*Comment // len(List) > 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *CommentGroup) Pos() token.Pos {
|
||||||
|
return c.List[0].Pos()
|
||||||
|
}
|
||||||
|
|
||||||
|
//-------------------------------------------------------------------
|
||||||
|
// GoStringer
|
||||||
|
//-------------------------------------------------------------------
|
||||||
|
|
||||||
|
func (o *ObjectKey) GoString() string { return fmt.Sprintf("*%#v", *o) }
|
||||||
|
func (o *ObjectList) GoString() string { return fmt.Sprintf("*%#v", *o) }
|
52
vendor/github.com/hashicorp/hcl/hcl/ast/walk.go
generated
vendored
Normal file
52
vendor/github.com/hashicorp/hcl/hcl/ast/walk.go
generated
vendored
Normal file
@ -0,0 +1,52 @@
|
|||||||
|
package ast
|
||||||
|
|
||||||
|
import "fmt"
|
||||||
|
|
||||||
|
// WalkFunc describes a function to be called for each node during a Walk. The
|
||||||
|
// returned node can be used to rewrite the AST. Walking stops the returned
|
||||||
|
// bool is false.
|
||||||
|
type WalkFunc func(Node) (Node, bool)
|
||||||
|
|
||||||
|
// Walk traverses an AST in depth-first order: It starts by calling fn(node);
|
||||||
|
// node must not be nil. If fn returns true, Walk invokes fn recursively for
|
||||||
|
// each of the non-nil children of node, followed by a call of fn(nil). The
|
||||||
|
// returned node of fn can be used to rewrite the passed node to fn.
|
||||||
|
func Walk(node Node, fn WalkFunc) Node {
|
||||||
|
rewritten, ok := fn(node)
|
||||||
|
if !ok {
|
||||||
|
return rewritten
|
||||||
|
}
|
||||||
|
|
||||||
|
switch n := node.(type) {
|
||||||
|
case *File:
|
||||||
|
n.Node = Walk(n.Node, fn)
|
||||||
|
case *ObjectList:
|
||||||
|
for i, item := range n.Items {
|
||||||
|
n.Items[i] = Walk(item, fn).(*ObjectItem)
|
||||||
|
}
|
||||||
|
case *ObjectKey:
|
||||||
|
// nothing to do
|
||||||
|
case *ObjectItem:
|
||||||
|
for i, k := range n.Keys {
|
||||||
|
n.Keys[i] = Walk(k, fn).(*ObjectKey)
|
||||||
|
}
|
||||||
|
|
||||||
|
if n.Val != nil {
|
||||||
|
n.Val = Walk(n.Val, fn)
|
||||||
|
}
|
||||||
|
case *LiteralType:
|
||||||
|
// nothing to do
|
||||||
|
case *ListType:
|
||||||
|
for i, l := range n.List {
|
||||||
|
n.List[i] = Walk(l, fn)
|
||||||
|
}
|
||||||
|
case *ObjectType:
|
||||||
|
n.List = Walk(n.List, fn).(*ObjectList)
|
||||||
|
default:
|
||||||
|
// should we panic here?
|
||||||
|
fmt.Printf("unknown type: %T\n", n)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn(nil)
|
||||||
|
return rewritten
|
||||||
|
}
|
162
vendor/github.com/hashicorp/hcl/hcl/fmtcmd/fmtcmd.go
generated
vendored
Normal file
162
vendor/github.com/hashicorp/hcl/hcl/fmtcmd/fmtcmd.go
generated
vendored
Normal file
@ -0,0 +1,162 @@
|
|||||||
|
// Derivative work from:
|
||||||
|
// - https://golang.org/src/cmd/gofmt/gofmt.go
|
||||||
|
// - https://github.com/fatih/hclfmt
|
||||||
|
|
||||||
|
package fmtcmd
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/hashicorp/hcl/hcl/printer"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
ErrWriteStdin = errors.New("cannot use write option with standard input")
|
||||||
|
)
|
||||||
|
|
||||||
|
type Options struct {
|
||||||
|
List bool // list files whose formatting differs
|
||||||
|
Write bool // write result to (source) file instead of stdout
|
||||||
|
Diff bool // display diffs of formatting changes
|
||||||
|
}
|
||||||
|
|
||||||
|
func isValidFile(f os.FileInfo, extensions []string) bool {
|
||||||
|
if !f.IsDir() && !strings.HasPrefix(f.Name(), ".") {
|
||||||
|
for _, ext := range extensions {
|
||||||
|
if strings.HasSuffix(f.Name(), "."+ext) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// If in == nil, the source is the contents of the file with the given filename.
|
||||||
|
func processFile(filename string, in io.Reader, out io.Writer, stdin bool, opts Options) error {
|
||||||
|
if in == nil {
|
||||||
|
f, err := os.Open(filename)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
in = f
|
||||||
|
}
|
||||||
|
|
||||||
|
src, err := ioutil.ReadAll(in)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
res, err := printer.Format(src)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("In %s: %s", filename, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !bytes.Equal(src, res) {
|
||||||
|
// formatting has changed
|
||||||
|
if opts.List {
|
||||||
|
fmt.Fprintln(out, filename)
|
||||||
|
}
|
||||||
|
if opts.Write {
|
||||||
|
err = ioutil.WriteFile(filename, res, 0644)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if opts.Diff {
|
||||||
|
data, err := diff(src, res)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("computing diff: %s", err)
|
||||||
|
}
|
||||||
|
fmt.Fprintf(out, "diff a/%s b/%s\n", filename, filename)
|
||||||
|
out.Write(data)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !opts.List && !opts.Write && !opts.Diff {
|
||||||
|
_, err = out.Write(res)
|
||||||
|
}
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func walkDir(path string, extensions []string, stdout io.Writer, opts Options) error {
|
||||||
|
visitFile := func(path string, f os.FileInfo, err error) error {
|
||||||
|
if err == nil && isValidFile(f, extensions) {
|
||||||
|
err = processFile(path, nil, stdout, false, opts)
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return filepath.Walk(path, visitFile)
|
||||||
|
}
|
||||||
|
|
||||||
|
func Run(
|
||||||
|
paths, extensions []string,
|
||||||
|
stdin io.Reader,
|
||||||
|
stdout io.Writer,
|
||||||
|
opts Options,
|
||||||
|
) error {
|
||||||
|
if len(paths) == 0 {
|
||||||
|
if opts.Write {
|
||||||
|
return ErrWriteStdin
|
||||||
|
}
|
||||||
|
if err := processFile("<standard input>", stdin, stdout, true, opts); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, path := range paths {
|
||||||
|
switch dir, err := os.Stat(path); {
|
||||||
|
case err != nil:
|
||||||
|
return err
|
||||||
|
case dir.IsDir():
|
||||||
|
if err := walkDir(path, extensions, stdout, opts); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
if err := processFile(path, nil, stdout, false, opts); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func diff(b1, b2 []byte) (data []byte, err error) {
|
||||||
|
f1, err := ioutil.TempFile("", "")
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer os.Remove(f1.Name())
|
||||||
|
defer f1.Close()
|
||||||
|
|
||||||
|
f2, err := ioutil.TempFile("", "")
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer os.Remove(f2.Name())
|
||||||
|
defer f2.Close()
|
||||||
|
|
||||||
|
f1.Write(b1)
|
||||||
|
f2.Write(b2)
|
||||||
|
|
||||||
|
data, err = exec.Command("diff", "-u", f1.Name(), f2.Name()).CombinedOutput()
|
||||||
|
if len(data) > 0 {
|
||||||
|
// diff exits with a non-zero status when the files don't match.
|
||||||
|
// Ignore that failure as long as we get output.
|
||||||
|
err = nil
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
17
vendor/github.com/hashicorp/hcl/hcl/parser/error.go
generated
vendored
Normal file
17
vendor/github.com/hashicorp/hcl/hcl/parser/error.go
generated
vendored
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
package parser
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/hashicorp/hcl/hcl/token"
|
||||||
|
)
|
||||||
|
|
||||||
|
// PosError is a parse error that contains a position.
|
||||||
|
type PosError struct {
|
||||||
|
Pos token.Pos
|
||||||
|
Err error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *PosError) Error() string {
|
||||||
|
return fmt.Sprintf("At %s: %s", e.Pos, e.Err)
|
||||||
|
}
|
526
vendor/github.com/hashicorp/hcl/hcl/parser/parser.go
generated
vendored
Normal file
526
vendor/github.com/hashicorp/hcl/hcl/parser/parser.go
generated
vendored
Normal file
@ -0,0 +1,526 @@
|
|||||||
|
// Package parser implements a parser for HCL (HashiCorp Configuration
|
||||||
|
// Language)
|
||||||
|
package parser
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/hashicorp/hcl/hcl/ast"
|
||||||
|
"github.com/hashicorp/hcl/hcl/scanner"
|
||||||
|
"github.com/hashicorp/hcl/hcl/token"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Parser struct {
|
||||||
|
sc *scanner.Scanner
|
||||||
|
|
||||||
|
// Last read token
|
||||||
|
tok token.Token
|
||||||
|
commaPrev token.Token
|
||||||
|
|
||||||
|
comments []*ast.CommentGroup
|
||||||
|
leadComment *ast.CommentGroup // last lead comment
|
||||||
|
lineComment *ast.CommentGroup // last line comment
|
||||||
|
|
||||||
|
enableTrace bool
|
||||||
|
indent int
|
||||||
|
n int // buffer size (max = 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func newParser(src []byte) *Parser {
|
||||||
|
return &Parser{
|
||||||
|
sc: scanner.New(src),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse returns the fully parsed source and returns the abstract syntax tree.
|
||||||
|
func Parse(src []byte) (*ast.File, error) {
|
||||||
|
// normalize all line endings
|
||||||
|
// since the scanner and output only work with "\n" line endings, we may
|
||||||
|
// end up with dangling "\r" characters in the parsed data.
|
||||||
|
src = bytes.Replace(src, []byte("\r\n"), []byte("\n"), -1)
|
||||||
|
|
||||||
|
p := newParser(src)
|
||||||
|
return p.Parse()
|
||||||
|
}
|
||||||
|
|
||||||
|
var errEofToken = errors.New("EOF token found")
|
||||||
|
|
||||||
|
// Parse returns the fully parsed source and returns the abstract syntax tree.
|
||||||
|
func (p *Parser) Parse() (*ast.File, error) {
|
||||||
|
f := &ast.File{}
|
||||||
|
var err, scerr error
|
||||||
|
p.sc.Error = func(pos token.Pos, msg string) {
|
||||||
|
scerr = &PosError{Pos: pos, Err: errors.New(msg)}
|
||||||
|
}
|
||||||
|
|
||||||
|
f.Node, err = p.objectList(false)
|
||||||
|
if scerr != nil {
|
||||||
|
return nil, scerr
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
f.Comments = p.comments
|
||||||
|
return f, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// objectList parses a list of items within an object (generally k/v pairs).
|
||||||
|
// The parameter" obj" tells this whether to we are within an object (braces:
|
||||||
|
// '{', '}') or just at the top level. If we're within an object, we end
|
||||||
|
// at an RBRACE.
|
||||||
|
func (p *Parser) objectList(obj bool) (*ast.ObjectList, error) {
|
||||||
|
defer un(trace(p, "ParseObjectList"))
|
||||||
|
node := &ast.ObjectList{}
|
||||||
|
|
||||||
|
for {
|
||||||
|
if obj {
|
||||||
|
tok := p.scan()
|
||||||
|
p.unscan()
|
||||||
|
if tok.Type == token.RBRACE {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
n, err := p.objectItem()
|
||||||
|
if err == errEofToken {
|
||||||
|
break // we are finished
|
||||||
|
}
|
||||||
|
|
||||||
|
// we don't return a nil node, because might want to use already
|
||||||
|
// collected items.
|
||||||
|
if err != nil {
|
||||||
|
return node, err
|
||||||
|
}
|
||||||
|
|
||||||
|
node.Add(n)
|
||||||
|
|
||||||
|
// object lists can be optionally comma-delimited e.g. when a list of maps
|
||||||
|
// is being expressed, so a comma is allowed here - it's simply consumed
|
||||||
|
tok := p.scan()
|
||||||
|
if tok.Type != token.COMMA {
|
||||||
|
p.unscan()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return node, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Parser) consumeComment() (comment *ast.Comment, endline int) {
|
||||||
|
endline = p.tok.Pos.Line
|
||||||
|
|
||||||
|
// count the endline if it's multiline comment, ie starting with /*
|
||||||
|
if len(p.tok.Text) > 1 && p.tok.Text[1] == '*' {
|
||||||
|
// don't use range here - no need to decode Unicode code points
|
||||||
|
for i := 0; i < len(p.tok.Text); i++ {
|
||||||
|
if p.tok.Text[i] == '\n' {
|
||||||
|
endline++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
comment = &ast.Comment{Start: p.tok.Pos, Text: p.tok.Text}
|
||||||
|
p.tok = p.sc.Scan()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Parser) consumeCommentGroup(n int) (comments *ast.CommentGroup, endline int) {
|
||||||
|
var list []*ast.Comment
|
||||||
|
endline = p.tok.Pos.Line
|
||||||
|
|
||||||
|
for p.tok.Type == token.COMMENT && p.tok.Pos.Line <= endline+n {
|
||||||
|
var comment *ast.Comment
|
||||||
|
comment, endline = p.consumeComment()
|
||||||
|
list = append(list, comment)
|
||||||
|
}
|
||||||
|
|
||||||
|
// add comment group to the comments list
|
||||||
|
comments = &ast.CommentGroup{List: list}
|
||||||
|
p.comments = append(p.comments, comments)
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// objectItem parses a single object item
|
||||||
|
func (p *Parser) objectItem() (*ast.ObjectItem, error) {
|
||||||
|
defer un(trace(p, "ParseObjectItem"))
|
||||||
|
|
||||||
|
keys, err := p.objectKey()
|
||||||
|
if len(keys) > 0 && err == errEofToken {
|
||||||
|
// We ignore eof token here since it is an error if we didn't
|
||||||
|
// receive a value (but we did receive a key) for the item.
|
||||||
|
err = nil
|
||||||
|
}
|
||||||
|
if len(keys) > 0 && err != nil && p.tok.Type == token.RBRACE {
|
||||||
|
// This is a strange boolean statement, but what it means is:
|
||||||
|
// We have keys with no value, and we're likely in an object
|
||||||
|
// (since RBrace ends an object). For this, we set err to nil so
|
||||||
|
// we continue and get the error below of having the wrong value
|
||||||
|
// type.
|
||||||
|
err = nil
|
||||||
|
|
||||||
|
// Reset the token type so we don't think it completed fine. See
|
||||||
|
// objectType which uses p.tok.Type to check if we're done with
|
||||||
|
// the object.
|
||||||
|
p.tok.Type = token.EOF
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
o := &ast.ObjectItem{
|
||||||
|
Keys: keys,
|
||||||
|
}
|
||||||
|
|
||||||
|
if p.leadComment != nil {
|
||||||
|
o.LeadComment = p.leadComment
|
||||||
|
p.leadComment = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
switch p.tok.Type {
|
||||||
|
case token.ASSIGN:
|
||||||
|
o.Assign = p.tok.Pos
|
||||||
|
o.Val, err = p.object()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
case token.LBRACE:
|
||||||
|
o.Val, err = p.objectType()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
keyStr := make([]string, 0, len(keys))
|
||||||
|
for _, k := range keys {
|
||||||
|
keyStr = append(keyStr, k.Token.Text)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, &PosError{
|
||||||
|
Pos: p.tok.Pos,
|
||||||
|
Err: fmt.Errorf(
|
||||||
|
"key '%s' expected start of object ('{') or assignment ('=')",
|
||||||
|
strings.Join(keyStr, " ")),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// do a look-ahead for line comment
|
||||||
|
p.scan()
|
||||||
|
if len(keys) > 0 && o.Val.Pos().Line == keys[0].Pos().Line && p.lineComment != nil {
|
||||||
|
o.LineComment = p.lineComment
|
||||||
|
p.lineComment = nil
|
||||||
|
}
|
||||||
|
p.unscan()
|
||||||
|
return o, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// objectKey parses an object key and returns a ObjectKey AST
|
||||||
|
func (p *Parser) objectKey() ([]*ast.ObjectKey, error) {
|
||||||
|
keyCount := 0
|
||||||
|
keys := make([]*ast.ObjectKey, 0)
|
||||||
|
|
||||||
|
for {
|
||||||
|
tok := p.scan()
|
||||||
|
switch tok.Type {
|
||||||
|
case token.EOF:
|
||||||
|
// It is very important to also return the keys here as well as
|
||||||
|
// the error. This is because we need to be able to tell if we
|
||||||
|
// did parse keys prior to finding the EOF, or if we just found
|
||||||
|
// a bare EOF.
|
||||||
|
return keys, errEofToken
|
||||||
|
case token.ASSIGN:
|
||||||
|
// assignment or object only, but not nested objects. this is not
|
||||||
|
// allowed: `foo bar = {}`
|
||||||
|
if keyCount > 1 {
|
||||||
|
return nil, &PosError{
|
||||||
|
Pos: p.tok.Pos,
|
||||||
|
Err: fmt.Errorf("nested object expected: LBRACE got: %s", p.tok.Type),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if keyCount == 0 {
|
||||||
|
return nil, &PosError{
|
||||||
|
Pos: p.tok.Pos,
|
||||||
|
Err: errors.New("no object keys found!"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return keys, nil
|
||||||
|
case token.LBRACE:
|
||||||
|
var err error
|
||||||
|
|
||||||
|
// If we have no keys, then it is a syntax error. i.e. {{}} is not
|
||||||
|
// allowed.
|
||||||
|
if len(keys) == 0 {
|
||||||
|
err = &PosError{
|
||||||
|
Pos: p.tok.Pos,
|
||||||
|
Err: fmt.Errorf("expected: IDENT | STRING got: %s", p.tok.Type),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// object
|
||||||
|
return keys, err
|
||||||
|
case token.IDENT, token.STRING:
|
||||||
|
keyCount++
|
||||||
|
keys = append(keys, &ast.ObjectKey{Token: p.tok})
|
||||||
|
case token.ILLEGAL:
|
||||||
|
return keys, &PosError{
|
||||||
|
Pos: p.tok.Pos,
|
||||||
|
Err: fmt.Errorf("illegal character"),
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return keys, &PosError{
|
||||||
|
Pos: p.tok.Pos,
|
||||||
|
Err: fmt.Errorf("expected: IDENT | STRING | ASSIGN | LBRACE got: %s", p.tok.Type),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// object parses any type of object, such as number, bool, string, object or
|
||||||
|
// list.
|
||||||
|
func (p *Parser) object() (ast.Node, error) {
|
||||||
|
defer un(trace(p, "ParseType"))
|
||||||
|
tok := p.scan()
|
||||||
|
|
||||||
|
switch tok.Type {
|
||||||
|
case token.NUMBER, token.FLOAT, token.BOOL, token.STRING, token.HEREDOC:
|
||||||
|
return p.literalType()
|
||||||
|
case token.LBRACE:
|
||||||
|
return p.objectType()
|
||||||
|
case token.LBRACK:
|
||||||
|
return p.listType()
|
||||||
|
case token.COMMENT:
|
||||||
|
// implement comment
|
||||||
|
case token.EOF:
|
||||||
|
return nil, errEofToken
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, &PosError{
|
||||||
|
Pos: tok.Pos,
|
||||||
|
Err: fmt.Errorf("Unknown token: %+v", tok),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// objectType parses an object type and returns a ObjectType AST
|
||||||
|
func (p *Parser) objectType() (*ast.ObjectType, error) {
|
||||||
|
defer un(trace(p, "ParseObjectType"))
|
||||||
|
|
||||||
|
// we assume that the currently scanned token is a LBRACE
|
||||||
|
o := &ast.ObjectType{
|
||||||
|
Lbrace: p.tok.Pos,
|
||||||
|
}
|
||||||
|
|
||||||
|
l, err := p.objectList(true)
|
||||||
|
|
||||||
|
// if we hit RBRACE, we are good to go (means we parsed all Items), if it's
|
||||||
|
// not a RBRACE, it's an syntax error and we just return it.
|
||||||
|
if err != nil && p.tok.Type != token.RBRACE {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// No error, scan and expect the ending to be a brace
|
||||||
|
if tok := p.scan(); tok.Type != token.RBRACE {
|
||||||
|
return nil, &PosError{
|
||||||
|
Pos: tok.Pos,
|
||||||
|
Err: fmt.Errorf("object expected closing RBRACE got: %s", tok.Type),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
o.List = l
|
||||||
|
o.Rbrace = p.tok.Pos // advanced via parseObjectList
|
||||||
|
return o, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// listType parses a list type and returns a ListType AST
|
||||||
|
func (p *Parser) listType() (*ast.ListType, error) {
|
||||||
|
defer un(trace(p, "ParseListType"))
|
||||||
|
|
||||||
|
// we assume that the currently scanned token is a LBRACK
|
||||||
|
l := &ast.ListType{
|
||||||
|
Lbrack: p.tok.Pos,
|
||||||
|
}
|
||||||
|
|
||||||
|
needComma := false
|
||||||
|
for {
|
||||||
|
tok := p.scan()
|
||||||
|
if needComma {
|
||||||
|
switch tok.Type {
|
||||||
|
case token.COMMA, token.RBRACK:
|
||||||
|
default:
|
||||||
|
return nil, &PosError{
|
||||||
|
Pos: tok.Pos,
|
||||||
|
Err: fmt.Errorf(
|
||||||
|
"error parsing list, expected comma or list end, got: %s",
|
||||||
|
tok.Type),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
switch tok.Type {
|
||||||
|
case token.BOOL, token.NUMBER, token.FLOAT, token.STRING, token.HEREDOC:
|
||||||
|
node, err := p.literalType()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// If there is a lead comment, apply it
|
||||||
|
if p.leadComment != nil {
|
||||||
|
node.LeadComment = p.leadComment
|
||||||
|
p.leadComment = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
l.Add(node)
|
||||||
|
needComma = true
|
||||||
|
case token.COMMA:
|
||||||
|
// get next list item or we are at the end
|
||||||
|
// do a look-ahead for line comment
|
||||||
|
p.scan()
|
||||||
|
if p.lineComment != nil && len(l.List) > 0 {
|
||||||
|
lit, ok := l.List[len(l.List)-1].(*ast.LiteralType)
|
||||||
|
if ok {
|
||||||
|
lit.LineComment = p.lineComment
|
||||||
|
l.List[len(l.List)-1] = lit
|
||||||
|
p.lineComment = nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
p.unscan()
|
||||||
|
|
||||||
|
needComma = false
|
||||||
|
continue
|
||||||
|
case token.LBRACE:
|
||||||
|
// Looks like a nested object, so parse it out
|
||||||
|
node, err := p.objectType()
|
||||||
|
if err != nil {
|
||||||
|
return nil, &PosError{
|
||||||
|
Pos: tok.Pos,
|
||||||
|
Err: fmt.Errorf(
|
||||||
|
"error while trying to parse object within list: %s", err),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
l.Add(node)
|
||||||
|
needComma = true
|
||||||
|
case token.LBRACK:
|
||||||
|
node, err := p.listType()
|
||||||
|
if err != nil {
|
||||||
|
return nil, &PosError{
|
||||||
|
Pos: tok.Pos,
|
||||||
|
Err: fmt.Errorf(
|
||||||
|
"error while trying to parse list within list: %s", err),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
l.Add(node)
|
||||||
|
case token.RBRACK:
|
||||||
|
// finished
|
||||||
|
l.Rbrack = p.tok.Pos
|
||||||
|
return l, nil
|
||||||
|
default:
|
||||||
|
return nil, &PosError{
|
||||||
|
Pos: tok.Pos,
|
||||||
|
Err: fmt.Errorf("unexpected token while parsing list: %s", tok.Type),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// literalType parses a literal type and returns a LiteralType AST
|
||||||
|
func (p *Parser) literalType() (*ast.LiteralType, error) {
|
||||||
|
defer un(trace(p, "ParseLiteral"))
|
||||||
|
|
||||||
|
return &ast.LiteralType{
|
||||||
|
Token: p.tok,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// scan returns the next token from the underlying scanner. If a token has
|
||||||
|
// been unscanned then read that instead. In the process, it collects any
|
||||||
|
// comment groups encountered, and remembers the last lead and line comments.
|
||||||
|
func (p *Parser) scan() token.Token {
|
||||||
|
// If we have a token on the buffer, then return it.
|
||||||
|
if p.n != 0 {
|
||||||
|
p.n = 0
|
||||||
|
return p.tok
|
||||||
|
}
|
||||||
|
|
||||||
|
// Otherwise read the next token from the scanner and Save it to the buffer
|
||||||
|
// in case we unscan later.
|
||||||
|
prev := p.tok
|
||||||
|
p.tok = p.sc.Scan()
|
||||||
|
|
||||||
|
if p.tok.Type == token.COMMENT {
|
||||||
|
var comment *ast.CommentGroup
|
||||||
|
var endline int
|
||||||
|
|
||||||
|
// fmt.Printf("p.tok.Pos.Line = %+v prev: %d endline %d \n",
|
||||||
|
// p.tok.Pos.Line, prev.Pos.Line, endline)
|
||||||
|
if p.tok.Pos.Line == prev.Pos.Line {
|
||||||
|
// The comment is on same line as the previous token; it
|
||||||
|
// cannot be a lead comment but may be a line comment.
|
||||||
|
comment, endline = p.consumeCommentGroup(0)
|
||||||
|
if p.tok.Pos.Line != endline {
|
||||||
|
// The next token is on a different line, thus
|
||||||
|
// the last comment group is a line comment.
|
||||||
|
p.lineComment = comment
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// consume successor comments, if any
|
||||||
|
endline = -1
|
||||||
|
for p.tok.Type == token.COMMENT {
|
||||||
|
comment, endline = p.consumeCommentGroup(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
if endline+1 == p.tok.Pos.Line && p.tok.Type != token.RBRACE {
|
||||||
|
switch p.tok.Type {
|
||||||
|
case token.RBRACE, token.RBRACK:
|
||||||
|
// Do not count for these cases
|
||||||
|
default:
|
||||||
|
// The next token is following on the line immediately after the
|
||||||
|
// comment group, thus the last comment group is a lead comment.
|
||||||
|
p.leadComment = comment
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
return p.tok
|
||||||
|
}
|
||||||
|
|
||||||
|
// unscan pushes the previously read token back onto the buffer.
|
||||||
|
func (p *Parser) unscan() {
|
||||||
|
p.n = 1
|
||||||
|
}
|
||||||
|
|
||||||
|
// ----------------------------------------------------------------------------
|
||||||
|
// Parsing support
|
||||||
|
|
||||||
|
func (p *Parser) printTrace(a ...interface{}) {
|
||||||
|
if !p.enableTrace {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . "
|
||||||
|
const n = len(dots)
|
||||||
|
fmt.Printf("%5d:%3d: ", p.tok.Pos.Line, p.tok.Pos.Column)
|
||||||
|
|
||||||
|
i := 2 * p.indent
|
||||||
|
for i > n {
|
||||||
|
fmt.Print(dots)
|
||||||
|
i -= n
|
||||||
|
}
|
||||||
|
// i <= n
|
||||||
|
fmt.Print(dots[0:i])
|
||||||
|
fmt.Println(a...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func trace(p *Parser, msg string) *Parser {
|
||||||
|
p.printTrace(msg, "(")
|
||||||
|
p.indent++
|
||||||
|
return p
|
||||||
|
}
|
||||||
|
|
||||||
|
// Usage pattern: defer un(trace(p, "..."))
|
||||||
|
func un(p *Parser) {
|
||||||
|
p.indent--
|
||||||
|
p.printTrace(")")
|
||||||
|
}
|
779
vendor/github.com/hashicorp/hcl/hcl/printer/nodes.go
generated
vendored
Normal file
779
vendor/github.com/hashicorp/hcl/hcl/printer/nodes.go
generated
vendored
Normal file
@ -0,0 +1,779 @@
|
|||||||
|
package printer
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"sort"
|
||||||
|
|
||||||
|
"github.com/hashicorp/hcl/hcl/ast"
|
||||||
|
"github.com/hashicorp/hcl/hcl/token"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
blank = byte(' ')
|
||||||
|
newline = byte('\n')
|
||||||
|
tab = byte('\t')
|
||||||
|
infinity = 1 << 30 // offset or line
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
unindent = []byte("\uE123") // in the private use space
|
||||||
|
)
|
||||||
|
|
||||||
|
type printer struct {
|
||||||
|
cfg Config
|
||||||
|
prev token.Pos
|
||||||
|
|
||||||
|
comments []*ast.CommentGroup // may be nil, contains all comments
|
||||||
|
standaloneComments []*ast.CommentGroup // contains all standalone comments (not assigned to any node)
|
||||||
|
|
||||||
|
enableTrace bool
|
||||||
|
indentTrace int
|
||||||
|
}
|
||||||
|
|
||||||
|
type ByPosition []*ast.CommentGroup
|
||||||
|
|
||||||
|
func (b ByPosition) Len() int { return len(b) }
|
||||||
|
func (b ByPosition) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
|
||||||
|
func (b ByPosition) Less(i, j int) bool { return b[i].Pos().Before(b[j].Pos()) }
|
||||||
|
|
||||||
|
// collectComments comments all standalone comments which are not lead or line
|
||||||
|
// comment
|
||||||
|
func (p *printer) collectComments(node ast.Node) {
|
||||||
|
// first collect all comments. This is already stored in
|
||||||
|
// ast.File.(comments)
|
||||||
|
ast.Walk(node, func(nn ast.Node) (ast.Node, bool) {
|
||||||
|
switch t := nn.(type) {
|
||||||
|
case *ast.File:
|
||||||
|
p.comments = t.Comments
|
||||||
|
return nn, false
|
||||||
|
}
|
||||||
|
return nn, true
|
||||||
|
})
|
||||||
|
|
||||||
|
standaloneComments := make(map[token.Pos]*ast.CommentGroup, 0)
|
||||||
|
for _, c := range p.comments {
|
||||||
|
standaloneComments[c.Pos()] = c
|
||||||
|
}
|
||||||
|
|
||||||
|
// next remove all lead and line comments from the overall comment map.
|
||||||
|
// This will give us comments which are standalone, comments which are not
|
||||||
|
// assigned to any kind of node.
|
||||||
|
ast.Walk(node, func(nn ast.Node) (ast.Node, bool) {
|
||||||
|
switch t := nn.(type) {
|
||||||
|
case *ast.LiteralType:
|
||||||
|
if t.LeadComment != nil {
|
||||||
|
for _, comment := range t.LeadComment.List {
|
||||||
|
if _, ok := standaloneComments[comment.Pos()]; ok {
|
||||||
|
delete(standaloneComments, comment.Pos())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if t.LineComment != nil {
|
||||||
|
for _, comment := range t.LineComment.List {
|
||||||
|
if _, ok := standaloneComments[comment.Pos()]; ok {
|
||||||
|
delete(standaloneComments, comment.Pos())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case *ast.ObjectItem:
|
||||||
|
if t.LeadComment != nil {
|
||||||
|
for _, comment := range t.LeadComment.List {
|
||||||
|
if _, ok := standaloneComments[comment.Pos()]; ok {
|
||||||
|
delete(standaloneComments, comment.Pos())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if t.LineComment != nil {
|
||||||
|
for _, comment := range t.LineComment.List {
|
||||||
|
if _, ok := standaloneComments[comment.Pos()]; ok {
|
||||||
|
delete(standaloneComments, comment.Pos())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nn, true
|
||||||
|
})
|
||||||
|
|
||||||
|
for _, c := range standaloneComments {
|
||||||
|
p.standaloneComments = append(p.standaloneComments, c)
|
||||||
|
}
|
||||||
|
|
||||||
|
sort.Sort(ByPosition(p.standaloneComments))
|
||||||
|
}
|
||||||
|
|
||||||
|
// output prints creates b printable HCL output and returns it.
|
||||||
|
func (p *printer) output(n interface{}) []byte {
|
||||||
|
var buf bytes.Buffer
|
||||||
|
|
||||||
|
switch t := n.(type) {
|
||||||
|
case *ast.File:
|
||||||
|
// File doesn't trace so we add the tracing here
|
||||||
|
defer un(trace(p, "File"))
|
||||||
|
return p.output(t.Node)
|
||||||
|
case *ast.ObjectList:
|
||||||
|
defer un(trace(p, "ObjectList"))
|
||||||
|
|
||||||
|
var index int
|
||||||
|
for {
|
||||||
|
// Determine the location of the next actual non-comment
|
||||||
|
// item. If we're at the end, the next item is at "infinity"
|
||||||
|
var nextItem token.Pos
|
||||||
|
if index != len(t.Items) {
|
||||||
|
nextItem = t.Items[index].Pos()
|
||||||
|
} else {
|
||||||
|
nextItem = token.Pos{Offset: infinity, Line: infinity}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Go through the standalone comments in the file and print out
|
||||||
|
// the comments that we should be for this object item.
|
||||||
|
for _, c := range p.standaloneComments {
|
||||||
|
// Go through all the comments in the group. The group
|
||||||
|
// should be printed together, not separated by double newlines.
|
||||||
|
printed := false
|
||||||
|
newlinePrinted := false
|
||||||
|
for _, comment := range c.List {
|
||||||
|
// We only care about comments after the previous item
|
||||||
|
// we've printed so that comments are printed in the
|
||||||
|
// correct locations (between two objects for example).
|
||||||
|
// And before the next item.
|
||||||
|
if comment.Pos().After(p.prev) && comment.Pos().Before(nextItem) {
|
||||||
|
// if we hit the end add newlines so we can print the comment
|
||||||
|
// we don't do this if prev is invalid which means the
|
||||||
|
// beginning of the file since the first comment should
|
||||||
|
// be at the first line.
|
||||||
|
if !newlinePrinted && p.prev.IsValid() && index == len(t.Items) {
|
||||||
|
buf.Write([]byte{newline, newline})
|
||||||
|
newlinePrinted = true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write the actual comment.
|
||||||
|
buf.WriteString(comment.Text)
|
||||||
|
buf.WriteByte(newline)
|
||||||
|
|
||||||
|
// Set printed to true to note that we printed something
|
||||||
|
printed = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we're not at the last item, write a new line so
|
||||||
|
// that there is a newline separating this comment from
|
||||||
|
// the next object.
|
||||||
|
if printed && index != len(t.Items) {
|
||||||
|
buf.WriteByte(newline)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if index == len(t.Items) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
buf.Write(p.output(t.Items[index]))
|
||||||
|
if index != len(t.Items)-1 {
|
||||||
|
// Always write a newline to separate us from the next item
|
||||||
|
buf.WriteByte(newline)
|
||||||
|
|
||||||
|
// Need to determine if we're going to separate the next item
|
||||||
|
// with a blank line. The logic here is simple, though there
|
||||||
|
// are a few conditions:
|
||||||
|
//
|
||||||
|
// 1. The next object is more than one line away anyways,
|
||||||
|
// so we need an empty line.
|
||||||
|
//
|
||||||
|
// 2. The next object is not a "single line" object, so
|
||||||
|
// we need an empty line.
|
||||||
|
//
|
||||||
|
// 3. This current object is not a single line object,
|
||||||
|
// so we need an empty line.
|
||||||
|
current := t.Items[index]
|
||||||
|
next := t.Items[index+1]
|
||||||
|
if next.Pos().Line != t.Items[index].Pos().Line+1 ||
|
||||||
|
!p.isSingleLineObject(next) ||
|
||||||
|
!p.isSingleLineObject(current) {
|
||||||
|
buf.WriteByte(newline)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
index++
|
||||||
|
}
|
||||||
|
case *ast.ObjectKey:
|
||||||
|
buf.WriteString(t.Token.Text)
|
||||||
|
case *ast.ObjectItem:
|
||||||
|
p.prev = t.Pos()
|
||||||
|
buf.Write(p.objectItem(t))
|
||||||
|
case *ast.LiteralType:
|
||||||
|
buf.Write(p.literalType(t))
|
||||||
|
case *ast.ListType:
|
||||||
|
buf.Write(p.list(t))
|
||||||
|
case *ast.ObjectType:
|
||||||
|
buf.Write(p.objectType(t))
|
||||||
|
default:
|
||||||
|
fmt.Printf(" unknown type: %T\n", n)
|
||||||
|
}
|
||||||
|
|
||||||
|
return buf.Bytes()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *printer) literalType(lit *ast.LiteralType) []byte {
|
||||||
|
result := []byte(lit.Token.Text)
|
||||||
|
switch lit.Token.Type {
|
||||||
|
case token.HEREDOC:
|
||||||
|
// Clear the trailing newline from heredocs
|
||||||
|
if result[len(result)-1] == '\n' {
|
||||||
|
result = result[:len(result)-1]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Poison lines 2+ so that we don't indent them
|
||||||
|
result = p.heredocIndent(result)
|
||||||
|
case token.STRING:
|
||||||
|
// If this is a multiline string, poison lines 2+ so we don't
|
||||||
|
// indent them.
|
||||||
|
if bytes.IndexRune(result, '\n') >= 0 {
|
||||||
|
result = p.heredocIndent(result)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
// objectItem returns the printable HCL form of an object item. An object type
|
||||||
|
// starts with one/multiple keys and has a value. The value might be of any
|
||||||
|
// type.
|
||||||
|
func (p *printer) objectItem(o *ast.ObjectItem) []byte {
|
||||||
|
defer un(trace(p, fmt.Sprintf("ObjectItem: %s", o.Keys[0].Token.Text)))
|
||||||
|
var buf bytes.Buffer
|
||||||
|
|
||||||
|
if o.LeadComment != nil {
|
||||||
|
for _, comment := range o.LeadComment.List {
|
||||||
|
buf.WriteString(comment.Text)
|
||||||
|
buf.WriteByte(newline)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, k := range o.Keys {
|
||||||
|
buf.WriteString(k.Token.Text)
|
||||||
|
buf.WriteByte(blank)
|
||||||
|
|
||||||
|
// reach end of key
|
||||||
|
if o.Assign.IsValid() && i == len(o.Keys)-1 && len(o.Keys) == 1 {
|
||||||
|
buf.WriteString("=")
|
||||||
|
buf.WriteByte(blank)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
buf.Write(p.output(o.Val))
|
||||||
|
|
||||||
|
if o.Val.Pos().Line == o.Keys[0].Pos().Line && o.LineComment != nil {
|
||||||
|
buf.WriteByte(blank)
|
||||||
|
for _, comment := range o.LineComment.List {
|
||||||
|
buf.WriteString(comment.Text)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return buf.Bytes()
|
||||||
|
}
|
||||||
|
|
||||||
|
// objectType returns the printable HCL form of an object type. An object type
|
||||||
|
// begins with a brace and ends with a brace.
|
||||||
|
func (p *printer) objectType(o *ast.ObjectType) []byte {
|
||||||
|
defer un(trace(p, "ObjectType"))
|
||||||
|
var buf bytes.Buffer
|
||||||
|
buf.WriteString("{")
|
||||||
|
|
||||||
|
var index int
|
||||||
|
var nextItem token.Pos
|
||||||
|
var commented, newlinePrinted bool
|
||||||
|
for {
|
||||||
|
// Determine the location of the next actual non-comment
|
||||||
|
// item. If we're at the end, the next item is the closing brace
|
||||||
|
if index != len(o.List.Items) {
|
||||||
|
nextItem = o.List.Items[index].Pos()
|
||||||
|
} else {
|
||||||
|
nextItem = o.Rbrace
|
||||||
|
}
|
||||||
|
|
||||||
|
// Go through the standalone comments in the file and print out
|
||||||
|
// the comments that we should be for this object item.
|
||||||
|
for _, c := range p.standaloneComments {
|
||||||
|
printed := false
|
||||||
|
var lastCommentPos token.Pos
|
||||||
|
for _, comment := range c.List {
|
||||||
|
// We only care about comments after the previous item
|
||||||
|
// we've printed so that comments are printed in the
|
||||||
|
// correct locations (between two objects for example).
|
||||||
|
// And before the next item.
|
||||||
|
if comment.Pos().After(p.prev) && comment.Pos().Before(nextItem) {
|
||||||
|
// If there are standalone comments and the initial newline has not
|
||||||
|
// been printed yet, do it now.
|
||||||
|
if !newlinePrinted {
|
||||||
|
newlinePrinted = true
|
||||||
|
buf.WriteByte(newline)
|
||||||
|
}
|
||||||
|
|
||||||
|
// add newline if it's between other printed nodes
|
||||||
|
if index > 0 {
|
||||||
|
commented = true
|
||||||
|
buf.WriteByte(newline)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Store this position
|
||||||
|
lastCommentPos = comment.Pos()
|
||||||
|
|
||||||
|
// output the comment itself
|
||||||
|
buf.Write(p.indent(p.heredocIndent([]byte(comment.Text))))
|
||||||
|
|
||||||
|
// Set printed to true to note that we printed something
|
||||||
|
printed = true
|
||||||
|
|
||||||
|
/*
|
||||||
|
if index != len(o.List.Items) {
|
||||||
|
buf.WriteByte(newline) // do not print on the end
|
||||||
|
}
|
||||||
|
*/
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stuff to do if we had comments
|
||||||
|
if printed {
|
||||||
|
// Always write a newline
|
||||||
|
buf.WriteByte(newline)
|
||||||
|
|
||||||
|
// If there is another item in the object and our comment
|
||||||
|
// didn't hug it directly, then make sure there is a blank
|
||||||
|
// line separating them.
|
||||||
|
if nextItem != o.Rbrace && nextItem.Line != lastCommentPos.Line+1 {
|
||||||
|
buf.WriteByte(newline)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if index == len(o.List.Items) {
|
||||||
|
p.prev = o.Rbrace
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
// At this point we are sure that it's not a totally empty block: print
|
||||||
|
// the initial newline if it hasn't been printed yet by the previous
|
||||||
|
// block about standalone comments.
|
||||||
|
if !newlinePrinted {
|
||||||
|
buf.WriteByte(newline)
|
||||||
|
newlinePrinted = true
|
||||||
|
}
|
||||||
|
|
||||||
|
// check if we have adjacent one liner items. If yes we'll going to align
|
||||||
|
// the comments.
|
||||||
|
var aligned []*ast.ObjectItem
|
||||||
|
for _, item := range o.List.Items[index:] {
|
||||||
|
// we don't group one line lists
|
||||||
|
if len(o.List.Items) == 1 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
// one means a oneliner with out any lead comment
|
||||||
|
// two means a oneliner with lead comment
|
||||||
|
// anything else might be something else
|
||||||
|
cur := lines(string(p.objectItem(item)))
|
||||||
|
if cur > 2 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
curPos := item.Pos()
|
||||||
|
|
||||||
|
nextPos := token.Pos{}
|
||||||
|
if index != len(o.List.Items)-1 {
|
||||||
|
nextPos = o.List.Items[index+1].Pos()
|
||||||
|
}
|
||||||
|
|
||||||
|
prevPos := token.Pos{}
|
||||||
|
if index != 0 {
|
||||||
|
prevPos = o.List.Items[index-1].Pos()
|
||||||
|
}
|
||||||
|
|
||||||
|
// fmt.Println("DEBUG ----------------")
|
||||||
|
// fmt.Printf("prev = %+v prevPos: %s\n", prev, prevPos)
|
||||||
|
// fmt.Printf("cur = %+v curPos: %s\n", cur, curPos)
|
||||||
|
// fmt.Printf("next = %+v nextPos: %s\n", next, nextPos)
|
||||||
|
|
||||||
|
if curPos.Line+1 == nextPos.Line {
|
||||||
|
aligned = append(aligned, item)
|
||||||
|
index++
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if curPos.Line-1 == prevPos.Line {
|
||||||
|
aligned = append(aligned, item)
|
||||||
|
index++
|
||||||
|
|
||||||
|
// finish if we have a new line or comment next. This happens
|
||||||
|
// if the next item is not adjacent
|
||||||
|
if curPos.Line+1 != nextPos.Line {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
// put newlines if the items are between other non aligned items.
|
||||||
|
// newlines are also added if there is a standalone comment already, so
|
||||||
|
// check it too
|
||||||
|
if !commented && index != len(aligned) {
|
||||||
|
buf.WriteByte(newline)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(aligned) >= 1 {
|
||||||
|
p.prev = aligned[len(aligned)-1].Pos()
|
||||||
|
|
||||||
|
items := p.alignedItems(aligned)
|
||||||
|
buf.Write(p.indent(items))
|
||||||
|
} else {
|
||||||
|
p.prev = o.List.Items[index].Pos()
|
||||||
|
|
||||||
|
buf.Write(p.indent(p.objectItem(o.List.Items[index])))
|
||||||
|
index++
|
||||||
|
}
|
||||||
|
|
||||||
|
buf.WriteByte(newline)
|
||||||
|
}
|
||||||
|
|
||||||
|
buf.WriteString("}")
|
||||||
|
return buf.Bytes()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *printer) alignedItems(items []*ast.ObjectItem) []byte {
|
||||||
|
var buf bytes.Buffer
|
||||||
|
|
||||||
|
// find the longest key and value length, needed for alignment
|
||||||
|
var longestKeyLen int // longest key length
|
||||||
|
var longestValLen int // longest value length
|
||||||
|
for _, item := range items {
|
||||||
|
key := len(item.Keys[0].Token.Text)
|
||||||
|
val := len(p.output(item.Val))
|
||||||
|
|
||||||
|
if key > longestKeyLen {
|
||||||
|
longestKeyLen = key
|
||||||
|
}
|
||||||
|
|
||||||
|
if val > longestValLen {
|
||||||
|
longestValLen = val
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, item := range items {
|
||||||
|
if item.LeadComment != nil {
|
||||||
|
for _, comment := range item.LeadComment.List {
|
||||||
|
buf.WriteString(comment.Text)
|
||||||
|
buf.WriteByte(newline)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, k := range item.Keys {
|
||||||
|
keyLen := len(k.Token.Text)
|
||||||
|
buf.WriteString(k.Token.Text)
|
||||||
|
for i := 0; i < longestKeyLen-keyLen+1; i++ {
|
||||||
|
buf.WriteByte(blank)
|
||||||
|
}
|
||||||
|
|
||||||
|
// reach end of key
|
||||||
|
if i == len(item.Keys)-1 && len(item.Keys) == 1 {
|
||||||
|
buf.WriteString("=")
|
||||||
|
buf.WriteByte(blank)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
val := p.output(item.Val)
|
||||||
|
valLen := len(val)
|
||||||
|
buf.Write(val)
|
||||||
|
|
||||||
|
if item.Val.Pos().Line == item.Keys[0].Pos().Line && item.LineComment != nil {
|
||||||
|
for i := 0; i < longestValLen-valLen+1; i++ {
|
||||||
|
buf.WriteByte(blank)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, comment := range item.LineComment.List {
|
||||||
|
buf.WriteString(comment.Text)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// do not print for the last item
|
||||||
|
if i != len(items)-1 {
|
||||||
|
buf.WriteByte(newline)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return buf.Bytes()
|
||||||
|
}
|
||||||
|
|
||||||
|
// list returns the printable HCL form of an list type.
|
||||||
|
func (p *printer) list(l *ast.ListType) []byte {
|
||||||
|
var buf bytes.Buffer
|
||||||
|
buf.WriteString("[")
|
||||||
|
|
||||||
|
var longestLine int
|
||||||
|
for _, item := range l.List {
|
||||||
|
// for now we assume that the list only contains literal types
|
||||||
|
if lit, ok := item.(*ast.LiteralType); ok {
|
||||||
|
lineLen := len(lit.Token.Text)
|
||||||
|
if lineLen > longestLine {
|
||||||
|
longestLine = lineLen
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
insertSpaceBeforeItem := false
|
||||||
|
lastHadLeadComment := false
|
||||||
|
for i, item := range l.List {
|
||||||
|
// Keep track of whether this item is a heredoc since that has
|
||||||
|
// unique behavior.
|
||||||
|
heredoc := false
|
||||||
|
if lit, ok := item.(*ast.LiteralType); ok && lit.Token.Type == token.HEREDOC {
|
||||||
|
heredoc = true
|
||||||
|
}
|
||||||
|
|
||||||
|
if item.Pos().Line != l.Lbrack.Line {
|
||||||
|
// multiline list, add newline before we add each item
|
||||||
|
buf.WriteByte(newline)
|
||||||
|
insertSpaceBeforeItem = false
|
||||||
|
|
||||||
|
// If we have a lead comment, then we want to write that first
|
||||||
|
leadComment := false
|
||||||
|
if lit, ok := item.(*ast.LiteralType); ok && lit.LeadComment != nil {
|
||||||
|
leadComment = true
|
||||||
|
|
||||||
|
// If this isn't the first item and the previous element
|
||||||
|
// didn't have a lead comment, then we need to add an extra
|
||||||
|
// newline to properly space things out. If it did have a
|
||||||
|
// lead comment previously then this would be done
|
||||||
|
// automatically.
|
||||||
|
if i > 0 && !lastHadLeadComment {
|
||||||
|
buf.WriteByte(newline)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, comment := range lit.LeadComment.List {
|
||||||
|
buf.Write(p.indent([]byte(comment.Text)))
|
||||||
|
buf.WriteByte(newline)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// also indent each line
|
||||||
|
val := p.output(item)
|
||||||
|
curLen := len(val)
|
||||||
|
buf.Write(p.indent(val))
|
||||||
|
|
||||||
|
// if this item is a heredoc, then we output the comma on
|
||||||
|
// the next line. This is the only case this happens.
|
||||||
|
comma := []byte{','}
|
||||||
|
if heredoc {
|
||||||
|
buf.WriteByte(newline)
|
||||||
|
comma = p.indent(comma)
|
||||||
|
}
|
||||||
|
|
||||||
|
buf.Write(comma)
|
||||||
|
|
||||||
|
if lit, ok := item.(*ast.LiteralType); ok && lit.LineComment != nil {
|
||||||
|
// if the next item doesn't have any comments, do not align
|
||||||
|
buf.WriteByte(blank) // align one space
|
||||||
|
for i := 0; i < longestLine-curLen; i++ {
|
||||||
|
buf.WriteByte(blank)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, comment := range lit.LineComment.List {
|
||||||
|
buf.WriteString(comment.Text)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
lastItem := i == len(l.List)-1
|
||||||
|
if lastItem {
|
||||||
|
buf.WriteByte(newline)
|
||||||
|
}
|
||||||
|
|
||||||
|
if leadComment && !lastItem {
|
||||||
|
buf.WriteByte(newline)
|
||||||
|
}
|
||||||
|
|
||||||
|
lastHadLeadComment = leadComment
|
||||||
|
} else {
|
||||||
|
if insertSpaceBeforeItem {
|
||||||
|
buf.WriteByte(blank)
|
||||||
|
insertSpaceBeforeItem = false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Output the item itself
|
||||||
|
// also indent each line
|
||||||
|
val := p.output(item)
|
||||||
|
curLen := len(val)
|
||||||
|
buf.Write(val)
|
||||||
|
|
||||||
|
// If this is a heredoc item we always have to output a newline
|
||||||
|
// so that it parses properly.
|
||||||
|
if heredoc {
|
||||||
|
buf.WriteByte(newline)
|
||||||
|
}
|
||||||
|
|
||||||
|
// If this isn't the last element, write a comma.
|
||||||
|
if i != len(l.List)-1 {
|
||||||
|
buf.WriteString(",")
|
||||||
|
insertSpaceBeforeItem = true
|
||||||
|
}
|
||||||
|
|
||||||
|
if lit, ok := item.(*ast.LiteralType); ok && lit.LineComment != nil {
|
||||||
|
// if the next item doesn't have any comments, do not align
|
||||||
|
buf.WriteByte(blank) // align one space
|
||||||
|
for i := 0; i < longestLine-curLen; i++ {
|
||||||
|
buf.WriteByte(blank)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, comment := range lit.LineComment.List {
|
||||||
|
buf.WriteString(comment.Text)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
buf.WriteString("]")
|
||||||
|
return buf.Bytes()
|
||||||
|
}
|
||||||
|
|
||||||
|
// indent indents the lines of the given buffer for each non-empty line
|
||||||
|
func (p *printer) indent(buf []byte) []byte {
|
||||||
|
var prefix []byte
|
||||||
|
if p.cfg.SpacesWidth != 0 {
|
||||||
|
for i := 0; i < p.cfg.SpacesWidth; i++ {
|
||||||
|
prefix = append(prefix, blank)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
prefix = []byte{tab}
|
||||||
|
}
|
||||||
|
|
||||||
|
var res []byte
|
||||||
|
bol := true
|
||||||
|
for _, c := range buf {
|
||||||
|
if bol && c != '\n' {
|
||||||
|
res = append(res, prefix...)
|
||||||
|
}
|
||||||
|
|
||||||
|
res = append(res, c)
|
||||||
|
bol = c == '\n'
|
||||||
|
}
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
|
||||||
|
// unindent removes all the indentation from the tombstoned lines
|
||||||
|
func (p *printer) unindent(buf []byte) []byte {
|
||||||
|
var res []byte
|
||||||
|
for i := 0; i < len(buf); i++ {
|
||||||
|
skip := len(buf)-i <= len(unindent)
|
||||||
|
if !skip {
|
||||||
|
skip = !bytes.Equal(unindent, buf[i:i+len(unindent)])
|
||||||
|
}
|
||||||
|
if skip {
|
||||||
|
res = append(res, buf[i])
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// We have a marker. we have to backtrace here and clean out
|
||||||
|
// any whitespace ahead of our tombstone up to a \n
|
||||||
|
for j := len(res) - 1; j >= 0; j-- {
|
||||||
|
if res[j] == '\n' {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
res = res[:j]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Skip the entire unindent marker
|
||||||
|
i += len(unindent) - 1
|
||||||
|
}
|
||||||
|
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
|
||||||
|
// heredocIndent marks all the 2nd and further lines as unindentable
|
||||||
|
func (p *printer) heredocIndent(buf []byte) []byte {
|
||||||
|
var res []byte
|
||||||
|
bol := false
|
||||||
|
for _, c := range buf {
|
||||||
|
if bol && c != '\n' {
|
||||||
|
res = append(res, unindent...)
|
||||||
|
}
|
||||||
|
res = append(res, c)
|
||||||
|
bol = c == '\n'
|
||||||
|
}
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
|
||||||
|
// isSingleLineObject tells whether the given object item is a single
|
||||||
|
// line object such as "obj {}".
|
||||||
|
//
|
||||||
|
// A single line object:
|
||||||
|
//
|
||||||
|
// * has no lead comments (hence multi-line)
|
||||||
|
// * has no assignment
|
||||||
|
// * has no values in the stanza (within {})
|
||||||
|
//
|
||||||
|
func (p *printer) isSingleLineObject(val *ast.ObjectItem) bool {
|
||||||
|
// If there is a lead comment, can't be one line
|
||||||
|
if val.LeadComment != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// If there is assignment, we always break by line
|
||||||
|
if val.Assign.IsValid() {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// If it isn't an object type, then its not a single line object
|
||||||
|
ot, ok := val.Val.(*ast.ObjectType)
|
||||||
|
if !ok {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the object has no items, it is single line!
|
||||||
|
return len(ot.List.Items) == 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func lines(txt string) int {
|
||||||
|
endline := 1
|
||||||
|
for i := 0; i < len(txt); i++ {
|
||||||
|
if txt[i] == '\n' {
|
||||||
|
endline++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return endline
|
||||||
|
}
|
||||||
|
|
||||||
|
// ----------------------------------------------------------------------------
|
||||||
|
// Tracing support
|
||||||
|
|
||||||
|
func (p *printer) printTrace(a ...interface{}) {
|
||||||
|
if !p.enableTrace {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . "
|
||||||
|
const n = len(dots)
|
||||||
|
i := 2 * p.indentTrace
|
||||||
|
for i > n {
|
||||||
|
fmt.Print(dots)
|
||||||
|
i -= n
|
||||||
|
}
|
||||||
|
// i <= n
|
||||||
|
fmt.Print(dots[0:i])
|
||||||
|
fmt.Println(a...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func trace(p *printer, msg string) *printer {
|
||||||
|
p.printTrace(msg, "(")
|
||||||
|
p.indentTrace++
|
||||||
|
return p
|
||||||
|
}
|
||||||
|
|
||||||
|
// Usage pattern: defer un(trace(p, "..."))
|
||||||
|
func un(p *printer) {
|
||||||
|
p.indentTrace--
|
||||||
|
p.printTrace(")")
|
||||||
|
}
|
66
vendor/github.com/hashicorp/hcl/hcl/printer/printer.go
generated
vendored
Normal file
66
vendor/github.com/hashicorp/hcl/hcl/printer/printer.go
generated
vendored
Normal file
@ -0,0 +1,66 @@
|
|||||||
|
// Package printer implements printing of AST nodes to HCL format.
|
||||||
|
package printer
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"io"
|
||||||
|
"text/tabwriter"
|
||||||
|
|
||||||
|
"github.com/hashicorp/hcl/hcl/ast"
|
||||||
|
"github.com/hashicorp/hcl/hcl/parser"
|
||||||
|
)
|
||||||
|
|
||||||
|
var DefaultConfig = Config{
|
||||||
|
SpacesWidth: 2,
|
||||||
|
}
|
||||||
|
|
||||||
|
// A Config node controls the output of Fprint.
|
||||||
|
type Config struct {
|
||||||
|
SpacesWidth int // if set, it will use spaces instead of tabs for alignment
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Config) Fprint(output io.Writer, node ast.Node) error {
|
||||||
|
p := &printer{
|
||||||
|
cfg: *c,
|
||||||
|
comments: make([]*ast.CommentGroup, 0),
|
||||||
|
standaloneComments: make([]*ast.CommentGroup, 0),
|
||||||
|
// enableTrace: true,
|
||||||
|
}
|
||||||
|
|
||||||
|
p.collectComments(node)
|
||||||
|
|
||||||
|
if _, err := output.Write(p.unindent(p.output(node))); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// flush tabwriter, if any
|
||||||
|
var err error
|
||||||
|
if tw, _ := output.(*tabwriter.Writer); tw != nil {
|
||||||
|
err = tw.Flush()
|
||||||
|
}
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fprint "pretty-prints" an HCL node to output
|
||||||
|
// It calls Config.Fprint with default settings.
|
||||||
|
func Fprint(output io.Writer, node ast.Node) error {
|
||||||
|
return DefaultConfig.Fprint(output, node)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Format formats src HCL and returns the result.
|
||||||
|
func Format(src []byte) ([]byte, error) {
|
||||||
|
node, err := parser.Parse(src)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var buf bytes.Buffer
|
||||||
|
if err := DefaultConfig.Fprint(&buf, node); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add trailing newline to result
|
||||||
|
buf.WriteString("\n")
|
||||||
|
return buf.Bytes(), nil
|
||||||
|
}
|
651
vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go
generated
vendored
Normal file
651
vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go
generated
vendored
Normal file
@ -0,0 +1,651 @@
|
|||||||
|
// Package scanner implements a scanner for HCL (HashiCorp Configuration
|
||||||
|
// Language) source text.
|
||||||
|
package scanner
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"regexp"
|
||||||
|
"unicode"
|
||||||
|
"unicode/utf8"
|
||||||
|
|
||||||
|
"github.com/hashicorp/hcl/hcl/token"
|
||||||
|
)
|
||||||
|
|
||||||
|
// eof represents a marker rune for the end of the reader.
|
||||||
|
const eof = rune(0)
|
||||||
|
|
||||||
|
// Scanner defines a lexical scanner
|
||||||
|
type Scanner struct {
|
||||||
|
buf *bytes.Buffer // Source buffer for advancing and scanning
|
||||||
|
src []byte // Source buffer for immutable access
|
||||||
|
|
||||||
|
// Source Position
|
||||||
|
srcPos token.Pos // current position
|
||||||
|
prevPos token.Pos // previous position, used for peek() method
|
||||||
|
|
||||||
|
lastCharLen int // length of last character in bytes
|
||||||
|
lastLineLen int // length of last line in characters (for correct column reporting)
|
||||||
|
|
||||||
|
tokStart int // token text start position
|
||||||
|
tokEnd int // token text end position
|
||||||
|
|
||||||
|
// Error is called for each error encountered. If no Error
|
||||||
|
// function is set, the error is reported to os.Stderr.
|
||||||
|
Error func(pos token.Pos, msg string)
|
||||||
|
|
||||||
|
// ErrorCount is incremented by one for each error encountered.
|
||||||
|
ErrorCount int
|
||||||
|
|
||||||
|
// tokPos is the start position of most recently scanned token; set by
|
||||||
|
// Scan. The Filename field is always left untouched by the Scanner. If
|
||||||
|
// an error is reported (via Error) and Position is invalid, the scanner is
|
||||||
|
// not inside a token.
|
||||||
|
tokPos token.Pos
|
||||||
|
}
|
||||||
|
|
||||||
|
// New creates and initializes a new instance of Scanner using src as
|
||||||
|
// its source content.
|
||||||
|
func New(src []byte) *Scanner {
|
||||||
|
// even though we accept a src, we read from a io.Reader compatible type
|
||||||
|
// (*bytes.Buffer). So in the future we might easily change it to streaming
|
||||||
|
// read.
|
||||||
|
b := bytes.NewBuffer(src)
|
||||||
|
s := &Scanner{
|
||||||
|
buf: b,
|
||||||
|
src: src,
|
||||||
|
}
|
||||||
|
|
||||||
|
// srcPosition always starts with 1
|
||||||
|
s.srcPos.Line = 1
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// next reads the next rune from the bufferred reader. Returns the rune(0) if
|
||||||
|
// an error occurs (or io.EOF is returned).
|
||||||
|
func (s *Scanner) next() rune {
|
||||||
|
ch, size, err := s.buf.ReadRune()
|
||||||
|
if err != nil {
|
||||||
|
// advance for error reporting
|
||||||
|
s.srcPos.Column++
|
||||||
|
s.srcPos.Offset += size
|
||||||
|
s.lastCharLen = size
|
||||||
|
return eof
|
||||||
|
}
|
||||||
|
|
||||||
|
if ch == utf8.RuneError && size == 1 {
|
||||||
|
s.srcPos.Column++
|
||||||
|
s.srcPos.Offset += size
|
||||||
|
s.lastCharLen = size
|
||||||
|
s.err("illegal UTF-8 encoding")
|
||||||
|
return ch
|
||||||
|
}
|
||||||
|
|
||||||
|
// remember last position
|
||||||
|
s.prevPos = s.srcPos
|
||||||
|
|
||||||
|
s.srcPos.Column++
|
||||||
|
s.lastCharLen = size
|
||||||
|
s.srcPos.Offset += size
|
||||||
|
|
||||||
|
if ch == '\n' {
|
||||||
|
s.srcPos.Line++
|
||||||
|
s.lastLineLen = s.srcPos.Column
|
||||||
|
s.srcPos.Column = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we see a null character with data left, then that is an error
|
||||||
|
if ch == '\x00' && s.buf.Len() > 0 {
|
||||||
|
s.err("unexpected null character (0x00)")
|
||||||
|
return eof
|
||||||
|
}
|
||||||
|
|
||||||
|
// debug
|
||||||
|
// fmt.Printf("ch: %q, offset:column: %d:%d\n", ch, s.srcPos.Offset, s.srcPos.Column)
|
||||||
|
return ch
|
||||||
|
}
|
||||||
|
|
||||||
|
// unread unreads the previous read Rune and updates the source position
|
||||||
|
func (s *Scanner) unread() {
|
||||||
|
if err := s.buf.UnreadRune(); err != nil {
|
||||||
|
panic(err) // this is user fault, we should catch it
|
||||||
|
}
|
||||||
|
s.srcPos = s.prevPos // put back last position
|
||||||
|
}
|
||||||
|
|
||||||
|
// peek returns the next rune without advancing the reader.
|
||||||
|
func (s *Scanner) peek() rune {
|
||||||
|
peek, _, err := s.buf.ReadRune()
|
||||||
|
if err != nil {
|
||||||
|
return eof
|
||||||
|
}
|
||||||
|
|
||||||
|
s.buf.UnreadRune()
|
||||||
|
return peek
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scan scans the next token and returns the token.
|
||||||
|
func (s *Scanner) Scan() token.Token {
|
||||||
|
ch := s.next()
|
||||||
|
|
||||||
|
// skip white space
|
||||||
|
for isWhitespace(ch) {
|
||||||
|
ch = s.next()
|
||||||
|
}
|
||||||
|
|
||||||
|
var tok token.Type
|
||||||
|
|
||||||
|
// token text markings
|
||||||
|
s.tokStart = s.srcPos.Offset - s.lastCharLen
|
||||||
|
|
||||||
|
// token position, initial next() is moving the offset by one(size of rune
|
||||||
|
// actually), though we are interested with the starting point
|
||||||
|
s.tokPos.Offset = s.srcPos.Offset - s.lastCharLen
|
||||||
|
if s.srcPos.Column > 0 {
|
||||||
|
// common case: last character was not a '\n'
|
||||||
|
s.tokPos.Line = s.srcPos.Line
|
||||||
|
s.tokPos.Column = s.srcPos.Column
|
||||||
|
} else {
|
||||||
|
// last character was a '\n'
|
||||||
|
// (we cannot be at the beginning of the source
|
||||||
|
// since we have called next() at least once)
|
||||||
|
s.tokPos.Line = s.srcPos.Line - 1
|
||||||
|
s.tokPos.Column = s.lastLineLen
|
||||||
|
}
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case isLetter(ch):
|
||||||
|
tok = token.IDENT
|
||||||
|
lit := s.scanIdentifier()
|
||||||
|
if lit == "true" || lit == "false" {
|
||||||
|
tok = token.BOOL
|
||||||
|
}
|
||||||
|
case isDecimal(ch):
|
||||||
|
tok = s.scanNumber(ch)
|
||||||
|
default:
|
||||||
|
switch ch {
|
||||||
|
case eof:
|
||||||
|
tok = token.EOF
|
||||||
|
case '"':
|
||||||
|
tok = token.STRING
|
||||||
|
s.scanString()
|
||||||
|
case '#', '/':
|
||||||
|
tok = token.COMMENT
|
||||||
|
s.scanComment(ch)
|
||||||
|
case '.':
|
||||||
|
tok = token.PERIOD
|
||||||
|
ch = s.peek()
|
||||||
|
if isDecimal(ch) {
|
||||||
|
tok = token.FLOAT
|
||||||
|
ch = s.scanMantissa(ch)
|
||||||
|
ch = s.scanExponent(ch)
|
||||||
|
}
|
||||||
|
case '<':
|
||||||
|
tok = token.HEREDOC
|
||||||
|
s.scanHeredoc()
|
||||||
|
case '[':
|
||||||
|
tok = token.LBRACK
|
||||||
|
case ']':
|
||||||
|
tok = token.RBRACK
|
||||||
|
case '{':
|
||||||
|
tok = token.LBRACE
|
||||||
|
case '}':
|
||||||
|
tok = token.RBRACE
|
||||||
|
case ',':
|
||||||
|
tok = token.COMMA
|
||||||
|
case '=':
|
||||||
|
tok = token.ASSIGN
|
||||||
|
case '+':
|
||||||
|
tok = token.ADD
|
||||||
|
case '-':
|
||||||
|
if isDecimal(s.peek()) {
|
||||||
|
ch := s.next()
|
||||||
|
tok = s.scanNumber(ch)
|
||||||
|
} else {
|
||||||
|
tok = token.SUB
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
s.err("illegal char")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// finish token ending
|
||||||
|
s.tokEnd = s.srcPos.Offset
|
||||||
|
|
||||||
|
// create token literal
|
||||||
|
var tokenText string
|
||||||
|
if s.tokStart >= 0 {
|
||||||
|
tokenText = string(s.src[s.tokStart:s.tokEnd])
|
||||||
|
}
|
||||||
|
s.tokStart = s.tokEnd // ensure idempotency of tokenText() call
|
||||||
|
|
||||||
|
return token.Token{
|
||||||
|
Type: tok,
|
||||||
|
Pos: s.tokPos,
|
||||||
|
Text: tokenText,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Scanner) scanComment(ch rune) {
|
||||||
|
// single line comments
|
||||||
|
if ch == '#' || (ch == '/' && s.peek() != '*') {
|
||||||
|
if ch == '/' && s.peek() != '/' {
|
||||||
|
s.err("expected '/' for comment")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
ch = s.next()
|
||||||
|
for ch != '\n' && ch >= 0 && ch != eof {
|
||||||
|
ch = s.next()
|
||||||
|
}
|
||||||
|
if ch != eof && ch >= 0 {
|
||||||
|
s.unread()
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// be sure we get the character after /* This allows us to find comment's
|
||||||
|
// that are not erminated
|
||||||
|
if ch == '/' {
|
||||||
|
s.next()
|
||||||
|
ch = s.next() // read character after "/*"
|
||||||
|
}
|
||||||
|
|
||||||
|
// look for /* - style comments
|
||||||
|
for {
|
||||||
|
if ch < 0 || ch == eof {
|
||||||
|
s.err("comment not terminated")
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
ch0 := ch
|
||||||
|
ch = s.next()
|
||||||
|
if ch0 == '*' && ch == '/' {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// scanNumber scans a HCL number definition starting with the given rune
|
||||||
|
func (s *Scanner) scanNumber(ch rune) token.Type {
|
||||||
|
if ch == '0' {
|
||||||
|
// check for hexadecimal, octal or float
|
||||||
|
ch = s.next()
|
||||||
|
if ch == 'x' || ch == 'X' {
|
||||||
|
// hexadecimal
|
||||||
|
ch = s.next()
|
||||||
|
found := false
|
||||||
|
for isHexadecimal(ch) {
|
||||||
|
ch = s.next()
|
||||||
|
found = true
|
||||||
|
}
|
||||||
|
|
||||||
|
if !found {
|
||||||
|
s.err("illegal hexadecimal number")
|
||||||
|
}
|
||||||
|
|
||||||
|
if ch != eof {
|
||||||
|
s.unread()
|
||||||
|
}
|
||||||
|
|
||||||
|
return token.NUMBER
|
||||||
|
}
|
||||||
|
|
||||||
|
// now it's either something like: 0421(octal) or 0.1231(float)
|
||||||
|
illegalOctal := false
|
||||||
|
for isDecimal(ch) {
|
||||||
|
ch = s.next()
|
||||||
|
if ch == '8' || ch == '9' {
|
||||||
|
// this is just a possibility. For example 0159 is illegal, but
|
||||||
|
// 0159.23 is valid. So we mark a possible illegal octal. If
|
||||||
|
// the next character is not a period, we'll print the error.
|
||||||
|
illegalOctal = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if ch == 'e' || ch == 'E' {
|
||||||
|
ch = s.scanExponent(ch)
|
||||||
|
return token.FLOAT
|
||||||
|
}
|
||||||
|
|
||||||
|
if ch == '.' {
|
||||||
|
ch = s.scanFraction(ch)
|
||||||
|
|
||||||
|
if ch == 'e' || ch == 'E' {
|
||||||
|
ch = s.next()
|
||||||
|
ch = s.scanExponent(ch)
|
||||||
|
}
|
||||||
|
return token.FLOAT
|
||||||
|
}
|
||||||
|
|
||||||
|
if illegalOctal {
|
||||||
|
s.err("illegal octal number")
|
||||||
|
}
|
||||||
|
|
||||||
|
if ch != eof {
|
||||||
|
s.unread()
|
||||||
|
}
|
||||||
|
return token.NUMBER
|
||||||
|
}
|
||||||
|
|
||||||
|
s.scanMantissa(ch)
|
||||||
|
ch = s.next() // seek forward
|
||||||
|
if ch == 'e' || ch == 'E' {
|
||||||
|
ch = s.scanExponent(ch)
|
||||||
|
return token.FLOAT
|
||||||
|
}
|
||||||
|
|
||||||
|
if ch == '.' {
|
||||||
|
ch = s.scanFraction(ch)
|
||||||
|
if ch == 'e' || ch == 'E' {
|
||||||
|
ch = s.next()
|
||||||
|
ch = s.scanExponent(ch)
|
||||||
|
}
|
||||||
|
return token.FLOAT
|
||||||
|
}
|
||||||
|
|
||||||
|
if ch != eof {
|
||||||
|
s.unread()
|
||||||
|
}
|
||||||
|
return token.NUMBER
|
||||||
|
}
|
||||||
|
|
||||||
|
// scanMantissa scans the mantissa beginning from the rune. It returns the next
|
||||||
|
// non decimal rune. It's used to determine wheter it's a fraction or exponent.
|
||||||
|
func (s *Scanner) scanMantissa(ch rune) rune {
|
||||||
|
scanned := false
|
||||||
|
for isDecimal(ch) {
|
||||||
|
ch = s.next()
|
||||||
|
scanned = true
|
||||||
|
}
|
||||||
|
|
||||||
|
if scanned && ch != eof {
|
||||||
|
s.unread()
|
||||||
|
}
|
||||||
|
return ch
|
||||||
|
}
|
||||||
|
|
||||||
|
// scanFraction scans the fraction after the '.' rune
|
||||||
|
func (s *Scanner) scanFraction(ch rune) rune {
|
||||||
|
if ch == '.' {
|
||||||
|
ch = s.peek() // we peek just to see if we can move forward
|
||||||
|
ch = s.scanMantissa(ch)
|
||||||
|
}
|
||||||
|
return ch
|
||||||
|
}
|
||||||
|
|
||||||
|
// scanExponent scans the remaining parts of an exponent after the 'e' or 'E'
|
||||||
|
// rune.
|
||||||
|
func (s *Scanner) scanExponent(ch rune) rune {
|
||||||
|
if ch == 'e' || ch == 'E' {
|
||||||
|
ch = s.next()
|
||||||
|
if ch == '-' || ch == '+' {
|
||||||
|
ch = s.next()
|
||||||
|
}
|
||||||
|
ch = s.scanMantissa(ch)
|
||||||
|
}
|
||||||
|
return ch
|
||||||
|
}
|
||||||
|
|
||||||
|
// scanHeredoc scans a heredoc string
|
||||||
|
func (s *Scanner) scanHeredoc() {
|
||||||
|
// Scan the second '<' in example: '<<EOF'
|
||||||
|
if s.next() != '<' {
|
||||||
|
s.err("heredoc expected second '<', didn't see it")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get the original offset so we can read just the heredoc ident
|
||||||
|
offs := s.srcPos.Offset
|
||||||
|
|
||||||
|
// Scan the identifier
|
||||||
|
ch := s.next()
|
||||||
|
|
||||||
|
// Indented heredoc syntax
|
||||||
|
if ch == '-' {
|
||||||
|
ch = s.next()
|
||||||
|
}
|
||||||
|
|
||||||
|
for isLetter(ch) || isDigit(ch) {
|
||||||
|
ch = s.next()
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we reached an EOF then that is not good
|
||||||
|
if ch == eof {
|
||||||
|
s.err("heredoc not terminated")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ignore the '\r' in Windows line endings
|
||||||
|
if ch == '\r' {
|
||||||
|
if s.peek() == '\n' {
|
||||||
|
ch = s.next()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we didn't reach a newline then that is also not good
|
||||||
|
if ch != '\n' {
|
||||||
|
s.err("invalid characters in heredoc anchor")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read the identifier
|
||||||
|
identBytes := s.src[offs : s.srcPos.Offset-s.lastCharLen]
|
||||||
|
if len(identBytes) == 0 {
|
||||||
|
s.err("zero-length heredoc anchor")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var identRegexp *regexp.Regexp
|
||||||
|
if identBytes[0] == '-' {
|
||||||
|
identRegexp = regexp.MustCompile(fmt.Sprintf(`[[:space:]]*%s\z`, identBytes[1:]))
|
||||||
|
} else {
|
||||||
|
identRegexp = regexp.MustCompile(fmt.Sprintf(`[[:space:]]*%s\z`, identBytes))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read the actual string value
|
||||||
|
lineStart := s.srcPos.Offset
|
||||||
|
for {
|
||||||
|
ch := s.next()
|
||||||
|
|
||||||
|
// Special newline handling.
|
||||||
|
if ch == '\n' {
|
||||||
|
// Math is fast, so we first compare the byte counts to see if we have a chance
|
||||||
|
// of seeing the same identifier - if the length is less than the number of bytes
|
||||||
|
// in the identifier, this cannot be a valid terminator.
|
||||||
|
lineBytesLen := s.srcPos.Offset - s.lastCharLen - lineStart
|
||||||
|
if lineBytesLen >= len(identBytes) && identRegexp.Match(s.src[lineStart:s.srcPos.Offset-s.lastCharLen]) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
// Not an anchor match, record the start of a new line
|
||||||
|
lineStart = s.srcPos.Offset
|
||||||
|
}
|
||||||
|
|
||||||
|
if ch == eof {
|
||||||
|
s.err("heredoc not terminated")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// scanString scans a quoted string
|
||||||
|
func (s *Scanner) scanString() {
|
||||||
|
braces := 0
|
||||||
|
for {
|
||||||
|
// '"' opening already consumed
|
||||||
|
// read character after quote
|
||||||
|
ch := s.next()
|
||||||
|
|
||||||
|
if (ch == '\n' && braces == 0) || ch < 0 || ch == eof {
|
||||||
|
s.err("literal not terminated")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if ch == '"' && braces == 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we're going into a ${} then we can ignore quotes for awhile
|
||||||
|
if braces == 0 && ch == '$' && s.peek() == '{' {
|
||||||
|
braces++
|
||||||
|
s.next()
|
||||||
|
} else if braces > 0 && ch == '{' {
|
||||||
|
braces++
|
||||||
|
}
|
||||||
|
if braces > 0 && ch == '}' {
|
||||||
|
braces--
|
||||||
|
}
|
||||||
|
|
||||||
|
if ch == '\\' {
|
||||||
|
s.scanEscape()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// scanEscape scans an escape sequence
|
||||||
|
func (s *Scanner) scanEscape() rune {
|
||||||
|
// http://en.cppreference.com/w/cpp/language/escape
|
||||||
|
ch := s.next() // read character after '/'
|
||||||
|
switch ch {
|
||||||
|
case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', '"':
|
||||||
|
// nothing to do
|
||||||
|
case '0', '1', '2', '3', '4', '5', '6', '7':
|
||||||
|
// octal notation
|
||||||
|
ch = s.scanDigits(ch, 8, 3)
|
||||||
|
case 'x':
|
||||||
|
// hexademical notation
|
||||||
|
ch = s.scanDigits(s.next(), 16, 2)
|
||||||
|
case 'u':
|
||||||
|
// universal character name
|
||||||
|
ch = s.scanDigits(s.next(), 16, 4)
|
||||||
|
case 'U':
|
||||||
|
// universal character name
|
||||||
|
ch = s.scanDigits(s.next(), 16, 8)
|
||||||
|
default:
|
||||||
|
s.err("illegal char escape")
|
||||||
|
}
|
||||||
|
return ch
|
||||||
|
}
|
||||||
|
|
||||||
|
// scanDigits scans a rune with the given base for n times. For example an
|
||||||
|
// octal notation \184 would yield in scanDigits(ch, 8, 3)
|
||||||
|
func (s *Scanner) scanDigits(ch rune, base, n int) rune {
|
||||||
|
start := n
|
||||||
|
for n > 0 && digitVal(ch) < base {
|
||||||
|
ch = s.next()
|
||||||
|
if ch == eof {
|
||||||
|
// If we see an EOF, we halt any more scanning of digits
|
||||||
|
// immediately.
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
n--
|
||||||
|
}
|
||||||
|
if n > 0 {
|
||||||
|
s.err("illegal char escape")
|
||||||
|
}
|
||||||
|
|
||||||
|
if n != start {
|
||||||
|
// we scanned all digits, put the last non digit char back,
|
||||||
|
// only if we read anything at all
|
||||||
|
s.unread()
|
||||||
|
}
|
||||||
|
|
||||||
|
return ch
|
||||||
|
}
|
||||||
|
|
||||||
|
// scanIdentifier scans an identifier and returns the literal string
|
||||||
|
func (s *Scanner) scanIdentifier() string {
|
||||||
|
offs := s.srcPos.Offset - s.lastCharLen
|
||||||
|
ch := s.next()
|
||||||
|
for isLetter(ch) || isDigit(ch) || ch == '-' || ch == '.' {
|
||||||
|
ch = s.next()
|
||||||
|
}
|
||||||
|
|
||||||
|
if ch != eof {
|
||||||
|
s.unread() // we got identifier, put back latest char
|
||||||
|
}
|
||||||
|
|
||||||
|
return string(s.src[offs:s.srcPos.Offset])
|
||||||
|
}
|
||||||
|
|
||||||
|
// recentPosition returns the position of the character immediately after the
|
||||||
|
// character or token returned by the last call to Scan.
|
||||||
|
func (s *Scanner) recentPosition() (pos token.Pos) {
|
||||||
|
pos.Offset = s.srcPos.Offset - s.lastCharLen
|
||||||
|
switch {
|
||||||
|
case s.srcPos.Column > 0:
|
||||||
|
// common case: last character was not a '\n'
|
||||||
|
pos.Line = s.srcPos.Line
|
||||||
|
pos.Column = s.srcPos.Column
|
||||||
|
case s.lastLineLen > 0:
|
||||||
|
// last character was a '\n'
|
||||||
|
// (we cannot be at the beginning of the source
|
||||||
|
// since we have called next() at least once)
|
||||||
|
pos.Line = s.srcPos.Line - 1
|
||||||
|
pos.Column = s.lastLineLen
|
||||||
|
default:
|
||||||
|
// at the beginning of the source
|
||||||
|
pos.Line = 1
|
||||||
|
pos.Column = 1
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// err prints the error of any scanning to s.Error function. If the function is
|
||||||
|
// not defined, by default it prints them to os.Stderr
|
||||||
|
func (s *Scanner) err(msg string) {
|
||||||
|
s.ErrorCount++
|
||||||
|
pos := s.recentPosition()
|
||||||
|
|
||||||
|
if s.Error != nil {
|
||||||
|
s.Error(pos, msg)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Fprintf(os.Stderr, "%s: %s\n", pos, msg)
|
||||||
|
}
|
||||||
|
|
||||||
|
// isHexadecimal returns true if the given rune is a letter
|
||||||
|
func isLetter(ch rune) bool {
|
||||||
|
return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= 0x80 && unicode.IsLetter(ch)
|
||||||
|
}
|
||||||
|
|
||||||
|
// isDigit returns true if the given rune is a decimal digit
|
||||||
|
func isDigit(ch rune) bool {
|
||||||
|
return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch)
|
||||||
|
}
|
||||||
|
|
||||||
|
// isDecimal returns true if the given rune is a decimal number
|
||||||
|
func isDecimal(ch rune) bool {
|
||||||
|
return '0' <= ch && ch <= '9'
|
||||||
|
}
|
||||||
|
|
||||||
|
// isHexadecimal returns true if the given rune is an hexadecimal number
|
||||||
|
func isHexadecimal(ch rune) bool {
|
||||||
|
return '0' <= ch && ch <= '9' || 'a' <= ch && ch <= 'f' || 'A' <= ch && ch <= 'F'
|
||||||
|
}
|
||||||
|
|
||||||
|
// isWhitespace returns true if the rune is a space, tab, newline or carriage return
|
||||||
|
func isWhitespace(ch rune) bool {
|
||||||
|
return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r'
|
||||||
|
}
|
||||||
|
|
||||||
|
// digitVal returns the integer value of a given octal,decimal or hexadecimal rune
|
||||||
|
func digitVal(ch rune) int {
|
||||||
|
switch {
|
||||||
|
case '0' <= ch && ch <= '9':
|
||||||
|
return int(ch - '0')
|
||||||
|
case 'a' <= ch && ch <= 'f':
|
||||||
|
return int(ch - 'a' + 10)
|
||||||
|
case 'A' <= ch && ch <= 'F':
|
||||||
|
return int(ch - 'A' + 10)
|
||||||
|
}
|
||||||
|
return 16 // larger than any legal digit val
|
||||||
|
}
|
241
vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go
generated
vendored
Normal file
241
vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go
generated
vendored
Normal file
@ -0,0 +1,241 @@
|
|||||||
|
package strconv
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"unicode/utf8"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ErrSyntax indicates that a value does not have the right syntax for the target type.
|
||||||
|
var ErrSyntax = errors.New("invalid syntax")
|
||||||
|
|
||||||
|
// Unquote interprets s as a single-quoted, double-quoted,
|
||||||
|
// or backquoted Go string literal, returning the string value
|
||||||
|
// that s quotes. (If s is single-quoted, it would be a Go
|
||||||
|
// character literal; Unquote returns the corresponding
|
||||||
|
// one-character string.)
|
||||||
|
func Unquote(s string) (t string, err error) {
|
||||||
|
n := len(s)
|
||||||
|
if n < 2 {
|
||||||
|
return "", ErrSyntax
|
||||||
|
}
|
||||||
|
quote := s[0]
|
||||||
|
if quote != s[n-1] {
|
||||||
|
return "", ErrSyntax
|
||||||
|
}
|
||||||
|
s = s[1 : n-1]
|
||||||
|
|
||||||
|
if quote != '"' {
|
||||||
|
return "", ErrSyntax
|
||||||
|
}
|
||||||
|
if !contains(s, '$') && !contains(s, '{') && contains(s, '\n') {
|
||||||
|
return "", ErrSyntax
|
||||||
|
}
|
||||||
|
|
||||||
|
// Is it trivial? Avoid allocation.
|
||||||
|
if !contains(s, '\\') && !contains(s, quote) && !contains(s, '$') {
|
||||||
|
switch quote {
|
||||||
|
case '"':
|
||||||
|
return s, nil
|
||||||
|
case '\'':
|
||||||
|
r, size := utf8.DecodeRuneInString(s)
|
||||||
|
if size == len(s) && (r != utf8.RuneError || size != 1) {
|
||||||
|
return s, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var runeTmp [utf8.UTFMax]byte
|
||||||
|
buf := make([]byte, 0, 3*len(s)/2) // Try to avoid more allocations.
|
||||||
|
for len(s) > 0 {
|
||||||
|
// If we're starting a '${}' then let it through un-unquoted.
|
||||||
|
// Specifically: we don't unquote any characters within the `${}`
|
||||||
|
// section.
|
||||||
|
if s[0] == '$' && len(s) > 1 && s[1] == '{' {
|
||||||
|
buf = append(buf, '$', '{')
|
||||||
|
s = s[2:]
|
||||||
|
|
||||||
|
// Continue reading until we find the closing brace, copying as-is
|
||||||
|
braces := 1
|
||||||
|
for len(s) > 0 && braces > 0 {
|
||||||
|
r, size := utf8.DecodeRuneInString(s)
|
||||||
|
if r == utf8.RuneError {
|
||||||
|
return "", ErrSyntax
|
||||||
|
}
|
||||||
|
|
||||||
|
s = s[size:]
|
||||||
|
|
||||||
|
n := utf8.EncodeRune(runeTmp[:], r)
|
||||||
|
buf = append(buf, runeTmp[:n]...)
|
||||||
|
|
||||||
|
switch r {
|
||||||
|
case '{':
|
||||||
|
braces++
|
||||||
|
case '}':
|
||||||
|
braces--
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if braces != 0 {
|
||||||
|
return "", ErrSyntax
|
||||||
|
}
|
||||||
|
if len(s) == 0 {
|
||||||
|
// If there's no string left, we're done!
|
||||||
|
break
|
||||||
|
} else {
|
||||||
|
// If there's more left, we need to pop back up to the top of the loop
|
||||||
|
// in case there's another interpolation in this string.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if s[0] == '\n' {
|
||||||
|
return "", ErrSyntax
|
||||||
|
}
|
||||||
|
|
||||||
|
c, multibyte, ss, err := unquoteChar(s, quote)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
s = ss
|
||||||
|
if c < utf8.RuneSelf || !multibyte {
|
||||||
|
buf = append(buf, byte(c))
|
||||||
|
} else {
|
||||||
|
n := utf8.EncodeRune(runeTmp[:], c)
|
||||||
|
buf = append(buf, runeTmp[:n]...)
|
||||||
|
}
|
||||||
|
if quote == '\'' && len(s) != 0 {
|
||||||
|
// single-quoted must be single character
|
||||||
|
return "", ErrSyntax
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return string(buf), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// contains reports whether the string contains the byte c.
|
||||||
|
func contains(s string, c byte) bool {
|
||||||
|
for i := 0; i < len(s); i++ {
|
||||||
|
if s[i] == c {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func unhex(b byte) (v rune, ok bool) {
|
||||||
|
c := rune(b)
|
||||||
|
switch {
|
||||||
|
case '0' <= c && c <= '9':
|
||||||
|
return c - '0', true
|
||||||
|
case 'a' <= c && c <= 'f':
|
||||||
|
return c - 'a' + 10, true
|
||||||
|
case 'A' <= c && c <= 'F':
|
||||||
|
return c - 'A' + 10, true
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func unquoteChar(s string, quote byte) (value rune, multibyte bool, tail string, err error) {
|
||||||
|
// easy cases
|
||||||
|
switch c := s[0]; {
|
||||||
|
case c == quote && (quote == '\'' || quote == '"'):
|
||||||
|
err = ErrSyntax
|
||||||
|
return
|
||||||
|
case c >= utf8.RuneSelf:
|
||||||
|
r, size := utf8.DecodeRuneInString(s)
|
||||||
|
return r, true, s[size:], nil
|
||||||
|
case c != '\\':
|
||||||
|
return rune(s[0]), false, s[1:], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// hard case: c is backslash
|
||||||
|
if len(s) <= 1 {
|
||||||
|
err = ErrSyntax
|
||||||
|
return
|
||||||
|
}
|
||||||
|
c := s[1]
|
||||||
|
s = s[2:]
|
||||||
|
|
||||||
|
switch c {
|
||||||
|
case 'a':
|
||||||
|
value = '\a'
|
||||||
|
case 'b':
|
||||||
|
value = '\b'
|
||||||
|
case 'f':
|
||||||
|
value = '\f'
|
||||||
|
case 'n':
|
||||||
|
value = '\n'
|
||||||
|
case 'r':
|
||||||
|
value = '\r'
|
||||||
|
case 't':
|
||||||
|
value = '\t'
|
||||||
|
case 'v':
|
||||||
|
value = '\v'
|
||||||
|
case 'x', 'u', 'U':
|
||||||
|
n := 0
|
||||||
|
switch c {
|
||||||
|
case 'x':
|
||||||
|
n = 2
|
||||||
|
case 'u':
|
||||||
|
n = 4
|
||||||
|
case 'U':
|
||||||
|
n = 8
|
||||||
|
}
|
||||||
|
var v rune
|
||||||
|
if len(s) < n {
|
||||||
|
err = ErrSyntax
|
||||||
|
return
|
||||||
|
}
|
||||||
|
for j := 0; j < n; j++ {
|
||||||
|
x, ok := unhex(s[j])
|
||||||
|
if !ok {
|
||||||
|
err = ErrSyntax
|
||||||
|
return
|
||||||
|
}
|
||||||
|
v = v<<4 | x
|
||||||
|
}
|
||||||
|
s = s[n:]
|
||||||
|
if c == 'x' {
|
||||||
|
// single-byte string, possibly not UTF-8
|
||||||
|
value = v
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if v > utf8.MaxRune {
|
||||||
|
err = ErrSyntax
|
||||||
|
return
|
||||||
|
}
|
||||||
|
value = v
|
||||||
|
multibyte = true
|
||||||
|
case '0', '1', '2', '3', '4', '5', '6', '7':
|
||||||
|
v := rune(c) - '0'
|
||||||
|
if len(s) < 2 {
|
||||||
|
err = ErrSyntax
|
||||||
|
return
|
||||||
|
}
|
||||||
|
for j := 0; j < 2; j++ { // one digit already; two more
|
||||||
|
x := rune(s[j]) - '0'
|
||||||
|
if x < 0 || x > 7 {
|
||||||
|
err = ErrSyntax
|
||||||
|
return
|
||||||
|
}
|
||||||
|
v = (v << 3) | x
|
||||||
|
}
|
||||||
|
s = s[2:]
|
||||||
|
if v > 255 {
|
||||||
|
err = ErrSyntax
|
||||||
|
return
|
||||||
|
}
|
||||||
|
value = v
|
||||||
|
case '\\':
|
||||||
|
value = '\\'
|
||||||
|
case '\'', '"':
|
||||||
|
if c != quote {
|
||||||
|
err = ErrSyntax
|
||||||
|
return
|
||||||
|
}
|
||||||
|
value = rune(c)
|
||||||
|
default:
|
||||||
|
err = ErrSyntax
|
||||||
|
return
|
||||||
|
}
|
||||||
|
tail = s
|
||||||
|
return
|
||||||
|
}
|
46
vendor/github.com/hashicorp/hcl/hcl/token/position.go
generated
vendored
Normal file
46
vendor/github.com/hashicorp/hcl/hcl/token/position.go
generated
vendored
Normal file
@ -0,0 +1,46 @@
|
|||||||
|
package token
|
||||||
|
|
||||||
|
import "fmt"
|
||||||
|
|
||||||
|
// Pos describes an arbitrary source position
|
||||||
|
// including the file, line, and column location.
|
||||||
|
// A Position is valid if the line number is > 0.
|
||||||
|
type Pos struct {
|
||||||
|
Filename string // filename, if any
|
||||||
|
Offset int // offset, starting at 0
|
||||||
|
Line int // line number, starting at 1
|
||||||
|
Column int // column number, starting at 1 (character count)
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsValid returns true if the position is valid.
|
||||||
|
func (p *Pos) IsValid() bool { return p.Line > 0 }
|
||||||
|
|
||||||
|
// String returns a string in one of several forms:
|
||||||
|
//
|
||||||
|
// file:line:column valid position with file name
|
||||||
|
// line:column valid position without file name
|
||||||
|
// file invalid position with file name
|
||||||
|
// - invalid position without file name
|
||||||
|
func (p Pos) String() string {
|
||||||
|
s := p.Filename
|
||||||
|
if p.IsValid() {
|
||||||
|
if s != "" {
|
||||||
|
s += ":"
|
||||||
|
}
|
||||||
|
s += fmt.Sprintf("%d:%d", p.Line, p.Column)
|
||||||
|
}
|
||||||
|
if s == "" {
|
||||||
|
s = "-"
|
||||||
|
}
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Before reports whether the position p is before u.
|
||||||
|
func (p Pos) Before(u Pos) bool {
|
||||||
|
return u.Offset > p.Offset || u.Line > p.Line
|
||||||
|
}
|
||||||
|
|
||||||
|
// After reports whether the position p is after u.
|
||||||
|
func (p Pos) After(u Pos) bool {
|
||||||
|
return u.Offset < p.Offset || u.Line < p.Line
|
||||||
|
}
|
219
vendor/github.com/hashicorp/hcl/hcl/token/token.go
generated
vendored
Normal file
219
vendor/github.com/hashicorp/hcl/hcl/token/token.go
generated
vendored
Normal file
@ -0,0 +1,219 @@
|
|||||||
|
// Package token defines constants representing the lexical tokens for HCL
|
||||||
|
// (HashiCorp Configuration Language)
|
||||||
|
package token
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
hclstrconv "github.com/hashicorp/hcl/hcl/strconv"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Token defines a single HCL token which can be obtained via the Scanner
|
||||||
|
type Token struct {
|
||||||
|
Type Type
|
||||||
|
Pos Pos
|
||||||
|
Text string
|
||||||
|
JSON bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// Type is the set of lexical tokens of the HCL (HashiCorp Configuration Language)
|
||||||
|
type Type int
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Special tokens
|
||||||
|
ILLEGAL Type = iota
|
||||||
|
EOF
|
||||||
|
COMMENT
|
||||||
|
|
||||||
|
identifier_beg
|
||||||
|
IDENT // literals
|
||||||
|
literal_beg
|
||||||
|
NUMBER // 12345
|
||||||
|
FLOAT // 123.45
|
||||||
|
BOOL // true,false
|
||||||
|
STRING // "abc"
|
||||||
|
HEREDOC // <<FOO\nbar\nFOO
|
||||||
|
literal_end
|
||||||
|
identifier_end
|
||||||
|
|
||||||
|
operator_beg
|
||||||
|
LBRACK // [
|
||||||
|
LBRACE // {
|
||||||
|
COMMA // ,
|
||||||
|
PERIOD // .
|
||||||
|
|
||||||
|
RBRACK // ]
|
||||||
|
RBRACE // }
|
||||||
|
|
||||||
|
ASSIGN // =
|
||||||
|
ADD // +
|
||||||
|
SUB // -
|
||||||
|
operator_end
|
||||||
|
)
|
||||||
|
|
||||||
|
var tokens = [...]string{
|
||||||
|
ILLEGAL: "ILLEGAL",
|
||||||
|
|
||||||
|
EOF: "EOF",
|
||||||
|
COMMENT: "COMMENT",
|
||||||
|
|
||||||
|
IDENT: "IDENT",
|
||||||
|
NUMBER: "NUMBER",
|
||||||
|
FLOAT: "FLOAT",
|
||||||
|
BOOL: "BOOL",
|
||||||
|
STRING: "STRING",
|
||||||
|
|
||||||
|
LBRACK: "LBRACK",
|
||||||
|
LBRACE: "LBRACE",
|
||||||
|
COMMA: "COMMA",
|
||||||
|
PERIOD: "PERIOD",
|
||||||
|
HEREDOC: "HEREDOC",
|
||||||
|
|
||||||
|
RBRACK: "RBRACK",
|
||||||
|
RBRACE: "RBRACE",
|
||||||
|
|
||||||
|
ASSIGN: "ASSIGN",
|
||||||
|
ADD: "ADD",
|
||||||
|
SUB: "SUB",
|
||||||
|
}
|
||||||
|
|
||||||
|
// String returns the string corresponding to the token tok.
|
||||||
|
func (t Type) String() string {
|
||||||
|
s := ""
|
||||||
|
if 0 <= t && t < Type(len(tokens)) {
|
||||||
|
s = tokens[t]
|
||||||
|
}
|
||||||
|
if s == "" {
|
||||||
|
s = "token(" + strconv.Itoa(int(t)) + ")"
|
||||||
|
}
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsIdentifier returns true for tokens corresponding to identifiers and basic
|
||||||
|
// type literals; it returns false otherwise.
|
||||||
|
func (t Type) IsIdentifier() bool { return identifier_beg < t && t < identifier_end }
|
||||||
|
|
||||||
|
// IsLiteral returns true for tokens corresponding to basic type literals; it
|
||||||
|
// returns false otherwise.
|
||||||
|
func (t Type) IsLiteral() bool { return literal_beg < t && t < literal_end }
|
||||||
|
|
||||||
|
// IsOperator returns true for tokens corresponding to operators and
|
||||||
|
// delimiters; it returns false otherwise.
|
||||||
|
func (t Type) IsOperator() bool { return operator_beg < t && t < operator_end }
|
||||||
|
|
||||||
|
// String returns the token's literal text. Note that this is only
|
||||||
|
// applicable for certain token types, such as token.IDENT,
|
||||||
|
// token.STRING, etc..
|
||||||
|
func (t Token) String() string {
|
||||||
|
return fmt.Sprintf("%s %s %s", t.Pos.String(), t.Type.String(), t.Text)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Value returns the properly typed value for this token. The type of
|
||||||
|
// the returned interface{} is guaranteed based on the Type field.
|
||||||
|
//
|
||||||
|
// This can only be called for literal types. If it is called for any other
|
||||||
|
// type, this will panic.
|
||||||
|
func (t Token) Value() interface{} {
|
||||||
|
switch t.Type {
|
||||||
|
case BOOL:
|
||||||
|
if t.Text == "true" {
|
||||||
|
return true
|
||||||
|
} else if t.Text == "false" {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
panic("unknown bool value: " + t.Text)
|
||||||
|
case FLOAT:
|
||||||
|
v, err := strconv.ParseFloat(t.Text, 64)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return float64(v)
|
||||||
|
case NUMBER:
|
||||||
|
v, err := strconv.ParseInt(t.Text, 0, 64)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return int64(v)
|
||||||
|
case IDENT:
|
||||||
|
return t.Text
|
||||||
|
case HEREDOC:
|
||||||
|
return unindentHeredoc(t.Text)
|
||||||
|
case STRING:
|
||||||
|
// Determine the Unquote method to use. If it came from JSON,
|
||||||
|
// then we need to use the built-in unquote since we have to
|
||||||
|
// escape interpolations there.
|
||||||
|
f := hclstrconv.Unquote
|
||||||
|
if t.JSON {
|
||||||
|
f = strconv.Unquote
|
||||||
|
}
|
||||||
|
|
||||||
|
// This case occurs if json null is used
|
||||||
|
if t.Text == "" {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
v, err := f(t.Text)
|
||||||
|
if err != nil {
|
||||||
|
panic(fmt.Sprintf("unquote %s err: %s", t.Text, err))
|
||||||
|
}
|
||||||
|
|
||||||
|
return v
|
||||||
|
default:
|
||||||
|
panic(fmt.Sprintf("unimplemented Value for type: %s", t.Type))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// unindentHeredoc returns the string content of a HEREDOC if it is started with <<
|
||||||
|
// and the content of a HEREDOC with the hanging indent removed if it is started with
|
||||||
|
// a <<-, and the terminating line is at least as indented as the least indented line.
|
||||||
|
func unindentHeredoc(heredoc string) string {
|
||||||
|
// We need to find the end of the marker
|
||||||
|
idx := strings.IndexByte(heredoc, '\n')
|
||||||
|
if idx == -1 {
|
||||||
|
panic("heredoc doesn't contain newline")
|
||||||
|
}
|
||||||
|
|
||||||
|
unindent := heredoc[2] == '-'
|
||||||
|
|
||||||
|
// We can optimize if the heredoc isn't marked for indentation
|
||||||
|
if !unindent {
|
||||||
|
return string(heredoc[idx+1 : len(heredoc)-idx+1])
|
||||||
|
}
|
||||||
|
|
||||||
|
// We need to unindent each line based on the indentation level of the marker
|
||||||
|
lines := strings.Split(string(heredoc[idx+1:len(heredoc)-idx+2]), "\n")
|
||||||
|
whitespacePrefix := lines[len(lines)-1]
|
||||||
|
|
||||||
|
isIndented := true
|
||||||
|
for _, v := range lines {
|
||||||
|
if strings.HasPrefix(v, whitespacePrefix) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
isIndented = false
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
// If all lines are not at least as indented as the terminating mark, return the
|
||||||
|
// heredoc as is, but trim the leading space from the marker on the final line.
|
||||||
|
if !isIndented {
|
||||||
|
return strings.TrimRight(string(heredoc[idx+1:len(heredoc)-idx+1]), " \t")
|
||||||
|
}
|
||||||
|
|
||||||
|
unindentedLines := make([]string, len(lines))
|
||||||
|
for k, v := range lines {
|
||||||
|
if k == len(lines)-1 {
|
||||||
|
unindentedLines[k] = ""
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
unindentedLines[k] = strings.TrimPrefix(v, whitespacePrefix)
|
||||||
|
}
|
||||||
|
|
||||||
|
return strings.Join(unindentedLines, "\n")
|
||||||
|
}
|
117
vendor/github.com/hashicorp/hcl/json/parser/flatten.go
generated
vendored
Normal file
117
vendor/github.com/hashicorp/hcl/json/parser/flatten.go
generated
vendored
Normal file
@ -0,0 +1,117 @@
|
|||||||
|
package parser
|
||||||
|
|
||||||
|
import "github.com/hashicorp/hcl/hcl/ast"
|
||||||
|
|
||||||
|
// flattenObjects takes an AST node, walks it, and flattens
|
||||||
|
func flattenObjects(node ast.Node) {
|
||||||
|
ast.Walk(node, func(n ast.Node) (ast.Node, bool) {
|
||||||
|
// We only care about lists, because this is what we modify
|
||||||
|
list, ok := n.(*ast.ObjectList)
|
||||||
|
if !ok {
|
||||||
|
return n, true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Rebuild the item list
|
||||||
|
items := make([]*ast.ObjectItem, 0, len(list.Items))
|
||||||
|
frontier := make([]*ast.ObjectItem, len(list.Items))
|
||||||
|
copy(frontier, list.Items)
|
||||||
|
for len(frontier) > 0 {
|
||||||
|
// Pop the current item
|
||||||
|
n := len(frontier)
|
||||||
|
item := frontier[n-1]
|
||||||
|
frontier = frontier[:n-1]
|
||||||
|
|
||||||
|
switch v := item.Val.(type) {
|
||||||
|
case *ast.ObjectType:
|
||||||
|
items, frontier = flattenObjectType(v, item, items, frontier)
|
||||||
|
case *ast.ListType:
|
||||||
|
items, frontier = flattenListType(v, item, items, frontier)
|
||||||
|
default:
|
||||||
|
items = append(items, item)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reverse the list since the frontier model runs things backwards
|
||||||
|
for i := len(items)/2 - 1; i >= 0; i-- {
|
||||||
|
opp := len(items) - 1 - i
|
||||||
|
items[i], items[opp] = items[opp], items[i]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Done! Set the original items
|
||||||
|
list.Items = items
|
||||||
|
return n, true
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func flattenListType(
|
||||||
|
ot *ast.ListType,
|
||||||
|
item *ast.ObjectItem,
|
||||||
|
items []*ast.ObjectItem,
|
||||||
|
frontier []*ast.ObjectItem) ([]*ast.ObjectItem, []*ast.ObjectItem) {
|
||||||
|
// If the list is empty, keep the original list
|
||||||
|
if len(ot.List) == 0 {
|
||||||
|
items = append(items, item)
|
||||||
|
return items, frontier
|
||||||
|
}
|
||||||
|
|
||||||
|
// All the elements of this object must also be objects!
|
||||||
|
for _, subitem := range ot.List {
|
||||||
|
if _, ok := subitem.(*ast.ObjectType); !ok {
|
||||||
|
items = append(items, item)
|
||||||
|
return items, frontier
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Great! We have a match go through all the items and flatten
|
||||||
|
for _, elem := range ot.List {
|
||||||
|
// Add it to the frontier so that we can recurse
|
||||||
|
frontier = append(frontier, &ast.ObjectItem{
|
||||||
|
Keys: item.Keys,
|
||||||
|
Assign: item.Assign,
|
||||||
|
Val: elem,
|
||||||
|
LeadComment: item.LeadComment,
|
||||||
|
LineComment: item.LineComment,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return items, frontier
|
||||||
|
}
|
||||||
|
|
||||||
|
func flattenObjectType(
|
||||||
|
ot *ast.ObjectType,
|
||||||
|
item *ast.ObjectItem,
|
||||||
|
items []*ast.ObjectItem,
|
||||||
|
frontier []*ast.ObjectItem) ([]*ast.ObjectItem, []*ast.ObjectItem) {
|
||||||
|
// If the list has no items we do not have to flatten anything
|
||||||
|
if ot.List.Items == nil {
|
||||||
|
items = append(items, item)
|
||||||
|
return items, frontier
|
||||||
|
}
|
||||||
|
|
||||||
|
// All the elements of this object must also be objects!
|
||||||
|
for _, subitem := range ot.List.Items {
|
||||||
|
if _, ok := subitem.Val.(*ast.ObjectType); !ok {
|
||||||
|
items = append(items, item)
|
||||||
|
return items, frontier
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Great! We have a match go through all the items and flatten
|
||||||
|
for _, subitem := range ot.List.Items {
|
||||||
|
// Copy the new key
|
||||||
|
keys := make([]*ast.ObjectKey, len(item.Keys)+len(subitem.Keys))
|
||||||
|
copy(keys, item.Keys)
|
||||||
|
copy(keys[len(item.Keys):], subitem.Keys)
|
||||||
|
|
||||||
|
// Add it to the frontier so that we can recurse
|
||||||
|
frontier = append(frontier, &ast.ObjectItem{
|
||||||
|
Keys: keys,
|
||||||
|
Assign: item.Assign,
|
||||||
|
Val: subitem.Val,
|
||||||
|
LeadComment: item.LeadComment,
|
||||||
|
LineComment: item.LineComment,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return items, frontier
|
||||||
|
}
|
313
vendor/github.com/hashicorp/hcl/json/parser/parser.go
generated
vendored
Normal file
313
vendor/github.com/hashicorp/hcl/json/parser/parser.go
generated
vendored
Normal file
@ -0,0 +1,313 @@
|
|||||||
|
package parser
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/hashicorp/hcl/hcl/ast"
|
||||||
|
hcltoken "github.com/hashicorp/hcl/hcl/token"
|
||||||
|
"github.com/hashicorp/hcl/json/scanner"
|
||||||
|
"github.com/hashicorp/hcl/json/token"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Parser struct {
|
||||||
|
sc *scanner.Scanner
|
||||||
|
|
||||||
|
// Last read token
|
||||||
|
tok token.Token
|
||||||
|
commaPrev token.Token
|
||||||
|
|
||||||
|
enableTrace bool
|
||||||
|
indent int
|
||||||
|
n int // buffer size (max = 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func newParser(src []byte) *Parser {
|
||||||
|
return &Parser{
|
||||||
|
sc: scanner.New(src),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse returns the fully parsed source and returns the abstract syntax tree.
|
||||||
|
func Parse(src []byte) (*ast.File, error) {
|
||||||
|
p := newParser(src)
|
||||||
|
return p.Parse()
|
||||||
|
}
|
||||||
|
|
||||||
|
var errEofToken = errors.New("EOF token found")
|
||||||
|
|
||||||
|
// Parse returns the fully parsed source and returns the abstract syntax tree.
|
||||||
|
func (p *Parser) Parse() (*ast.File, error) {
|
||||||
|
f := &ast.File{}
|
||||||
|
var err, scerr error
|
||||||
|
p.sc.Error = func(pos token.Pos, msg string) {
|
||||||
|
scerr = fmt.Errorf("%s: %s", pos, msg)
|
||||||
|
}
|
||||||
|
|
||||||
|
// The root must be an object in JSON
|
||||||
|
object, err := p.object()
|
||||||
|
if scerr != nil {
|
||||||
|
return nil, scerr
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// We make our final node an object list so it is more HCL compatible
|
||||||
|
f.Node = object.List
|
||||||
|
|
||||||
|
// Flatten it, which finds patterns and turns them into more HCL-like
|
||||||
|
// AST trees.
|
||||||
|
flattenObjects(f.Node)
|
||||||
|
|
||||||
|
return f, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Parser) objectList() (*ast.ObjectList, error) {
|
||||||
|
defer un(trace(p, "ParseObjectList"))
|
||||||
|
node := &ast.ObjectList{}
|
||||||
|
|
||||||
|
for {
|
||||||
|
n, err := p.objectItem()
|
||||||
|
if err == errEofToken {
|
||||||
|
break // we are finished
|
||||||
|
}
|
||||||
|
|
||||||
|
// we don't return a nil node, because might want to use already
|
||||||
|
// collected items.
|
||||||
|
if err != nil {
|
||||||
|
return node, err
|
||||||
|
}
|
||||||
|
|
||||||
|
node.Add(n)
|
||||||
|
|
||||||
|
// Check for a followup comma. If it isn't a comma, then we're done
|
||||||
|
if tok := p.scan(); tok.Type != token.COMMA {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return node, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// objectItem parses a single object item
|
||||||
|
func (p *Parser) objectItem() (*ast.ObjectItem, error) {
|
||||||
|
defer un(trace(p, "ParseObjectItem"))
|
||||||
|
|
||||||
|
keys, err := p.objectKey()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
o := &ast.ObjectItem{
|
||||||
|
Keys: keys,
|
||||||
|
}
|
||||||
|
|
||||||
|
switch p.tok.Type {
|
||||||
|
case token.COLON:
|
||||||
|
pos := p.tok.Pos
|
||||||
|
o.Assign = hcltoken.Pos{
|
||||||
|
Filename: pos.Filename,
|
||||||
|
Offset: pos.Offset,
|
||||||
|
Line: pos.Line,
|
||||||
|
Column: pos.Column,
|
||||||
|
}
|
||||||
|
|
||||||
|
o.Val, err = p.objectValue()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return o, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// objectKey parses an object key and returns a ObjectKey AST
|
||||||
|
func (p *Parser) objectKey() ([]*ast.ObjectKey, error) {
|
||||||
|
keyCount := 0
|
||||||
|
keys := make([]*ast.ObjectKey, 0)
|
||||||
|
|
||||||
|
for {
|
||||||
|
tok := p.scan()
|
||||||
|
switch tok.Type {
|
||||||
|
case token.EOF:
|
||||||
|
return nil, errEofToken
|
||||||
|
case token.STRING:
|
||||||
|
keyCount++
|
||||||
|
keys = append(keys, &ast.ObjectKey{
|
||||||
|
Token: p.tok.HCLToken(),
|
||||||
|
})
|
||||||
|
case token.COLON:
|
||||||
|
// If we have a zero keycount it means that we never got
|
||||||
|
// an object key, i.e. `{ :`. This is a syntax error.
|
||||||
|
if keyCount == 0 {
|
||||||
|
return nil, fmt.Errorf("expected: STRING got: %s", p.tok.Type)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Done
|
||||||
|
return keys, nil
|
||||||
|
case token.ILLEGAL:
|
||||||
|
return nil, errors.New("illegal")
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("expected: STRING got: %s", p.tok.Type)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// object parses any type of object, such as number, bool, string, object or
|
||||||
|
// list.
|
||||||
|
func (p *Parser) objectValue() (ast.Node, error) {
|
||||||
|
defer un(trace(p, "ParseObjectValue"))
|
||||||
|
tok := p.scan()
|
||||||
|
|
||||||
|
switch tok.Type {
|
||||||
|
case token.NUMBER, token.FLOAT, token.BOOL, token.NULL, token.STRING:
|
||||||
|
return p.literalType()
|
||||||
|
case token.LBRACE:
|
||||||
|
return p.objectType()
|
||||||
|
case token.LBRACK:
|
||||||
|
return p.listType()
|
||||||
|
case token.EOF:
|
||||||
|
return nil, errEofToken
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, fmt.Errorf("Expected object value, got unknown token: %+v", tok)
|
||||||
|
}
|
||||||
|
|
||||||
|
// object parses any type of object, such as number, bool, string, object or
|
||||||
|
// list.
|
||||||
|
func (p *Parser) object() (*ast.ObjectType, error) {
|
||||||
|
defer un(trace(p, "ParseType"))
|
||||||
|
tok := p.scan()
|
||||||
|
|
||||||
|
switch tok.Type {
|
||||||
|
case token.LBRACE:
|
||||||
|
return p.objectType()
|
||||||
|
case token.EOF:
|
||||||
|
return nil, errEofToken
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, fmt.Errorf("Expected object, got unknown token: %+v", tok)
|
||||||
|
}
|
||||||
|
|
||||||
|
// objectType parses an object type and returns a ObjectType AST
|
||||||
|
func (p *Parser) objectType() (*ast.ObjectType, error) {
|
||||||
|
defer un(trace(p, "ParseObjectType"))
|
||||||
|
|
||||||
|
// we assume that the currently scanned token is a LBRACE
|
||||||
|
o := &ast.ObjectType{}
|
||||||
|
|
||||||
|
l, err := p.objectList()
|
||||||
|
|
||||||
|
// if we hit RBRACE, we are good to go (means we parsed all Items), if it's
|
||||||
|
// not a RBRACE, it's an syntax error and we just return it.
|
||||||
|
if err != nil && p.tok.Type != token.RBRACE {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
o.List = l
|
||||||
|
return o, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// listType parses a list type and returns a ListType AST
|
||||||
|
func (p *Parser) listType() (*ast.ListType, error) {
|
||||||
|
defer un(trace(p, "ParseListType"))
|
||||||
|
|
||||||
|
// we assume that the currently scanned token is a LBRACK
|
||||||
|
l := &ast.ListType{}
|
||||||
|
|
||||||
|
for {
|
||||||
|
tok := p.scan()
|
||||||
|
switch tok.Type {
|
||||||
|
case token.NUMBER, token.FLOAT, token.STRING:
|
||||||
|
node, err := p.literalType()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
l.Add(node)
|
||||||
|
case token.COMMA:
|
||||||
|
continue
|
||||||
|
case token.LBRACE:
|
||||||
|
node, err := p.objectType()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
l.Add(node)
|
||||||
|
case token.BOOL:
|
||||||
|
// TODO(arslan) should we support? not supported by HCL yet
|
||||||
|
case token.LBRACK:
|
||||||
|
// TODO(arslan) should we support nested lists? Even though it's
|
||||||
|
// written in README of HCL, it's not a part of the grammar
|
||||||
|
// (not defined in parse.y)
|
||||||
|
case token.RBRACK:
|
||||||
|
// finished
|
||||||
|
return l, nil
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("unexpected token while parsing list: %s", tok.Type)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// literalType parses a literal type and returns a LiteralType AST
|
||||||
|
func (p *Parser) literalType() (*ast.LiteralType, error) {
|
||||||
|
defer un(trace(p, "ParseLiteral"))
|
||||||
|
|
||||||
|
return &ast.LiteralType{
|
||||||
|
Token: p.tok.HCLToken(),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// scan returns the next token from the underlying scanner. If a token has
|
||||||
|
// been unscanned then read that instead.
|
||||||
|
func (p *Parser) scan() token.Token {
|
||||||
|
// If we have a token on the buffer, then return it.
|
||||||
|
if p.n != 0 {
|
||||||
|
p.n = 0
|
||||||
|
return p.tok
|
||||||
|
}
|
||||||
|
|
||||||
|
p.tok = p.sc.Scan()
|
||||||
|
return p.tok
|
||||||
|
}
|
||||||
|
|
||||||
|
// unscan pushes the previously read token back onto the buffer.
|
||||||
|
func (p *Parser) unscan() {
|
||||||
|
p.n = 1
|
||||||
|
}
|
||||||
|
|
||||||
|
// ----------------------------------------------------------------------------
|
||||||
|
// Parsing support
|
||||||
|
|
||||||
|
func (p *Parser) printTrace(a ...interface{}) {
|
||||||
|
if !p.enableTrace {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . "
|
||||||
|
const n = len(dots)
|
||||||
|
fmt.Printf("%5d:%3d: ", p.tok.Pos.Line, p.tok.Pos.Column)
|
||||||
|
|
||||||
|
i := 2 * p.indent
|
||||||
|
for i > n {
|
||||||
|
fmt.Print(dots)
|
||||||
|
i -= n
|
||||||
|
}
|
||||||
|
// i <= n
|
||||||
|
fmt.Print(dots[0:i])
|
||||||
|
fmt.Println(a...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func trace(p *Parser, msg string) *Parser {
|
||||||
|
p.printTrace(msg, "(")
|
||||||
|
p.indent++
|
||||||
|
return p
|
||||||
|
}
|
||||||
|
|
||||||
|
// Usage pattern: defer un(trace(p, "..."))
|
||||||
|
func un(p *Parser) {
|
||||||
|
p.indent--
|
||||||
|
p.printTrace(")")
|
||||||
|
}
|
451
vendor/github.com/hashicorp/hcl/json/scanner/scanner.go
generated
vendored
Normal file
451
vendor/github.com/hashicorp/hcl/json/scanner/scanner.go
generated
vendored
Normal file
@ -0,0 +1,451 @@
|
|||||||
|
package scanner
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"unicode"
|
||||||
|
"unicode/utf8"
|
||||||
|
|
||||||
|
"github.com/hashicorp/hcl/json/token"
|
||||||
|
)
|
||||||
|
|
||||||
|
// eof represents a marker rune for the end of the reader.
|
||||||
|
const eof = rune(0)
|
||||||
|
|
||||||
|
// Scanner defines a lexical scanner
|
||||||
|
type Scanner struct {
|
||||||
|
buf *bytes.Buffer // Source buffer for advancing and scanning
|
||||||
|
src []byte // Source buffer for immutable access
|
||||||
|
|
||||||
|
// Source Position
|
||||||
|
srcPos token.Pos // current position
|
||||||
|
prevPos token.Pos // previous position, used for peek() method
|
||||||
|
|
||||||
|
lastCharLen int // length of last character in bytes
|
||||||
|
lastLineLen int // length of last line in characters (for correct column reporting)
|
||||||
|
|
||||||
|
tokStart int // token text start position
|
||||||
|
tokEnd int // token text end position
|
||||||
|
|
||||||
|
// Error is called for each error encountered. If no Error
|
||||||
|
// function is set, the error is reported to os.Stderr.
|
||||||
|
Error func(pos token.Pos, msg string)
|
||||||
|
|
||||||
|
// ErrorCount is incremented by one for each error encountered.
|
||||||
|
ErrorCount int
|
||||||
|
|
||||||
|
// tokPos is the start position of most recently scanned token; set by
|
||||||
|
// Scan. The Filename field is always left untouched by the Scanner. If
|
||||||
|
// an error is reported (via Error) and Position is invalid, the scanner is
|
||||||
|
// not inside a token.
|
||||||
|
tokPos token.Pos
|
||||||
|
}
|
||||||
|
|
||||||
|
// New creates and initializes a new instance of Scanner using src as
|
||||||
|
// its source content.
|
||||||
|
func New(src []byte) *Scanner {
|
||||||
|
// even though we accept a src, we read from a io.Reader compatible type
|
||||||
|
// (*bytes.Buffer). So in the future we might easily change it to streaming
|
||||||
|
// read.
|
||||||
|
b := bytes.NewBuffer(src)
|
||||||
|
s := &Scanner{
|
||||||
|
buf: b,
|
||||||
|
src: src,
|
||||||
|
}
|
||||||
|
|
||||||
|
// srcPosition always starts with 1
|
||||||
|
s.srcPos.Line = 1
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// next reads the next rune from the bufferred reader. Returns the rune(0) if
|
||||||
|
// an error occurs (or io.EOF is returned).
|
||||||
|
func (s *Scanner) next() rune {
|
||||||
|
ch, size, err := s.buf.ReadRune()
|
||||||
|
if err != nil {
|
||||||
|
// advance for error reporting
|
||||||
|
s.srcPos.Column++
|
||||||
|
s.srcPos.Offset += size
|
||||||
|
s.lastCharLen = size
|
||||||
|
return eof
|
||||||
|
}
|
||||||
|
|
||||||
|
if ch == utf8.RuneError && size == 1 {
|
||||||
|
s.srcPos.Column++
|
||||||
|
s.srcPos.Offset += size
|
||||||
|
s.lastCharLen = size
|
||||||
|
s.err("illegal UTF-8 encoding")
|
||||||
|
return ch
|
||||||
|
}
|
||||||
|
|
||||||
|
// remember last position
|
||||||
|
s.prevPos = s.srcPos
|
||||||
|
|
||||||
|
s.srcPos.Column++
|
||||||
|
s.lastCharLen = size
|
||||||
|
s.srcPos.Offset += size
|
||||||
|
|
||||||
|
if ch == '\n' {
|
||||||
|
s.srcPos.Line++
|
||||||
|
s.lastLineLen = s.srcPos.Column
|
||||||
|
s.srcPos.Column = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// debug
|
||||||
|
// fmt.Printf("ch: %q, offset:column: %d:%d\n", ch, s.srcPos.Offset, s.srcPos.Column)
|
||||||
|
return ch
|
||||||
|
}
|
||||||
|
|
||||||
|
// unread unreads the previous read Rune and updates the source position
|
||||||
|
func (s *Scanner) unread() {
|
||||||
|
if err := s.buf.UnreadRune(); err != nil {
|
||||||
|
panic(err) // this is user fault, we should catch it
|
||||||
|
}
|
||||||
|
s.srcPos = s.prevPos // put back last position
|
||||||
|
}
|
||||||
|
|
||||||
|
// peek returns the next rune without advancing the reader.
|
||||||
|
func (s *Scanner) peek() rune {
|
||||||
|
peek, _, err := s.buf.ReadRune()
|
||||||
|
if err != nil {
|
||||||
|
return eof
|
||||||
|
}
|
||||||
|
|
||||||
|
s.buf.UnreadRune()
|
||||||
|
return peek
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scan scans the next token and returns the token.
|
||||||
|
func (s *Scanner) Scan() token.Token {
|
||||||
|
ch := s.next()
|
||||||
|
|
||||||
|
// skip white space
|
||||||
|
for isWhitespace(ch) {
|
||||||
|
ch = s.next()
|
||||||
|
}
|
||||||
|
|
||||||
|
var tok token.Type
|
||||||
|
|
||||||
|
// token text markings
|
||||||
|
s.tokStart = s.srcPos.Offset - s.lastCharLen
|
||||||
|
|
||||||
|
// token position, initial next() is moving the offset by one(size of rune
|
||||||
|
// actually), though we are interested with the starting point
|
||||||
|
s.tokPos.Offset = s.srcPos.Offset - s.lastCharLen
|
||||||
|
if s.srcPos.Column > 0 {
|
||||||
|
// common case: last character was not a '\n'
|
||||||
|
s.tokPos.Line = s.srcPos.Line
|
||||||
|
s.tokPos.Column = s.srcPos.Column
|
||||||
|
} else {
|
||||||
|
// last character was a '\n'
|
||||||
|
// (we cannot be at the beginning of the source
|
||||||
|
// since we have called next() at least once)
|
||||||
|
s.tokPos.Line = s.srcPos.Line - 1
|
||||||
|
s.tokPos.Column = s.lastLineLen
|
||||||
|
}
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case isLetter(ch):
|
||||||
|
lit := s.scanIdentifier()
|
||||||
|
if lit == "true" || lit == "false" {
|
||||||
|
tok = token.BOOL
|
||||||
|
} else if lit == "null" {
|
||||||
|
tok = token.NULL
|
||||||
|
} else {
|
||||||
|
s.err("illegal char")
|
||||||
|
}
|
||||||
|
case isDecimal(ch):
|
||||||
|
tok = s.scanNumber(ch)
|
||||||
|
default:
|
||||||
|
switch ch {
|
||||||
|
case eof:
|
||||||
|
tok = token.EOF
|
||||||
|
case '"':
|
||||||
|
tok = token.STRING
|
||||||
|
s.scanString()
|
||||||
|
case '.':
|
||||||
|
tok = token.PERIOD
|
||||||
|
ch = s.peek()
|
||||||
|
if isDecimal(ch) {
|
||||||
|
tok = token.FLOAT
|
||||||
|
ch = s.scanMantissa(ch)
|
||||||
|
ch = s.scanExponent(ch)
|
||||||
|
}
|
||||||
|
case '[':
|
||||||
|
tok = token.LBRACK
|
||||||
|
case ']':
|
||||||
|
tok = token.RBRACK
|
||||||
|
case '{':
|
||||||
|
tok = token.LBRACE
|
||||||
|
case '}':
|
||||||
|
tok = token.RBRACE
|
||||||
|
case ',':
|
||||||
|
tok = token.COMMA
|
||||||
|
case ':':
|
||||||
|
tok = token.COLON
|
||||||
|
case '-':
|
||||||
|
if isDecimal(s.peek()) {
|
||||||
|
ch := s.next()
|
||||||
|
tok = s.scanNumber(ch)
|
||||||
|
} else {
|
||||||
|
s.err("illegal char")
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
s.err("illegal char: " + string(ch))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// finish token ending
|
||||||
|
s.tokEnd = s.srcPos.Offset
|
||||||
|
|
||||||
|
// create token literal
|
||||||
|
var tokenText string
|
||||||
|
if s.tokStart >= 0 {
|
||||||
|
tokenText = string(s.src[s.tokStart:s.tokEnd])
|
||||||
|
}
|
||||||
|
s.tokStart = s.tokEnd // ensure idempotency of tokenText() call
|
||||||
|
|
||||||
|
return token.Token{
|
||||||
|
Type: tok,
|
||||||
|
Pos: s.tokPos,
|
||||||
|
Text: tokenText,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// scanNumber scans a HCL number definition starting with the given rune
|
||||||
|
func (s *Scanner) scanNumber(ch rune) token.Type {
|
||||||
|
zero := ch == '0'
|
||||||
|
pos := s.srcPos
|
||||||
|
|
||||||
|
s.scanMantissa(ch)
|
||||||
|
ch = s.next() // seek forward
|
||||||
|
if ch == 'e' || ch == 'E' {
|
||||||
|
ch = s.scanExponent(ch)
|
||||||
|
return token.FLOAT
|
||||||
|
}
|
||||||
|
|
||||||
|
if ch == '.' {
|
||||||
|
ch = s.scanFraction(ch)
|
||||||
|
if ch == 'e' || ch == 'E' {
|
||||||
|
ch = s.next()
|
||||||
|
ch = s.scanExponent(ch)
|
||||||
|
}
|
||||||
|
return token.FLOAT
|
||||||
|
}
|
||||||
|
|
||||||
|
if ch != eof {
|
||||||
|
s.unread()
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we have a larger number and this is zero, error
|
||||||
|
if zero && pos != s.srcPos {
|
||||||
|
s.err("numbers cannot start with 0")
|
||||||
|
}
|
||||||
|
|
||||||
|
return token.NUMBER
|
||||||
|
}
|
||||||
|
|
||||||
|
// scanMantissa scans the mantissa beginning from the rune. It returns the next
|
||||||
|
// non decimal rune. It's used to determine wheter it's a fraction or exponent.
|
||||||
|
func (s *Scanner) scanMantissa(ch rune) rune {
|
||||||
|
scanned := false
|
||||||
|
for isDecimal(ch) {
|
||||||
|
ch = s.next()
|
||||||
|
scanned = true
|
||||||
|
}
|
||||||
|
|
||||||
|
if scanned && ch != eof {
|
||||||
|
s.unread()
|
||||||
|
}
|
||||||
|
return ch
|
||||||
|
}
|
||||||
|
|
||||||
|
// scanFraction scans the fraction after the '.' rune
|
||||||
|
func (s *Scanner) scanFraction(ch rune) rune {
|
||||||
|
if ch == '.' {
|
||||||
|
ch = s.peek() // we peek just to see if we can move forward
|
||||||
|
ch = s.scanMantissa(ch)
|
||||||
|
}
|
||||||
|
return ch
|
||||||
|
}
|
||||||
|
|
||||||
|
// scanExponent scans the remaining parts of an exponent after the 'e' or 'E'
|
||||||
|
// rune.
|
||||||
|
func (s *Scanner) scanExponent(ch rune) rune {
|
||||||
|
if ch == 'e' || ch == 'E' {
|
||||||
|
ch = s.next()
|
||||||
|
if ch == '-' || ch == '+' {
|
||||||
|
ch = s.next()
|
||||||
|
}
|
||||||
|
ch = s.scanMantissa(ch)
|
||||||
|
}
|
||||||
|
return ch
|
||||||
|
}
|
||||||
|
|
||||||
|
// scanString scans a quoted string
|
||||||
|
func (s *Scanner) scanString() {
|
||||||
|
braces := 0
|
||||||
|
for {
|
||||||
|
// '"' opening already consumed
|
||||||
|
// read character after quote
|
||||||
|
ch := s.next()
|
||||||
|
|
||||||
|
if ch == '\n' || ch < 0 || ch == eof {
|
||||||
|
s.err("literal not terminated")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if ch == '"' {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we're going into a ${} then we can ignore quotes for awhile
|
||||||
|
if braces == 0 && ch == '$' && s.peek() == '{' {
|
||||||
|
braces++
|
||||||
|
s.next()
|
||||||
|
} else if braces > 0 && ch == '{' {
|
||||||
|
braces++
|
||||||
|
}
|
||||||
|
if braces > 0 && ch == '}' {
|
||||||
|
braces--
|
||||||
|
}
|
||||||
|
|
||||||
|
if ch == '\\' {
|
||||||
|
s.scanEscape()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// scanEscape scans an escape sequence
|
||||||
|
func (s *Scanner) scanEscape() rune {
|
||||||
|
// http://en.cppreference.com/w/cpp/language/escape
|
||||||
|
ch := s.next() // read character after '/'
|
||||||
|
switch ch {
|
||||||
|
case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', '"':
|
||||||
|
// nothing to do
|
||||||
|
case '0', '1', '2', '3', '4', '5', '6', '7':
|
||||||
|
// octal notation
|
||||||
|
ch = s.scanDigits(ch, 8, 3)
|
||||||
|
case 'x':
|
||||||
|
// hexademical notation
|
||||||
|
ch = s.scanDigits(s.next(), 16, 2)
|
||||||
|
case 'u':
|
||||||
|
// universal character name
|
||||||
|
ch = s.scanDigits(s.next(), 16, 4)
|
||||||
|
case 'U':
|
||||||
|
// universal character name
|
||||||
|
ch = s.scanDigits(s.next(), 16, 8)
|
||||||
|
default:
|
||||||
|
s.err("illegal char escape")
|
||||||
|
}
|
||||||
|
return ch
|
||||||
|
}
|
||||||
|
|
||||||
|
// scanDigits scans a rune with the given base for n times. For example an
|
||||||
|
// octal notation \184 would yield in scanDigits(ch, 8, 3)
|
||||||
|
func (s *Scanner) scanDigits(ch rune, base, n int) rune {
|
||||||
|
for n > 0 && digitVal(ch) < base {
|
||||||
|
ch = s.next()
|
||||||
|
n--
|
||||||
|
}
|
||||||
|
if n > 0 {
|
||||||
|
s.err("illegal char escape")
|
||||||
|
}
|
||||||
|
|
||||||
|
// we scanned all digits, put the last non digit char back
|
||||||
|
s.unread()
|
||||||
|
return ch
|
||||||
|
}
|
||||||
|
|
||||||
|
// scanIdentifier scans an identifier and returns the literal string
|
||||||
|
func (s *Scanner) scanIdentifier() string {
|
||||||
|
offs := s.srcPos.Offset - s.lastCharLen
|
||||||
|
ch := s.next()
|
||||||
|
for isLetter(ch) || isDigit(ch) || ch == '-' {
|
||||||
|
ch = s.next()
|
||||||
|
}
|
||||||
|
|
||||||
|
if ch != eof {
|
||||||
|
s.unread() // we got identifier, put back latest char
|
||||||
|
}
|
||||||
|
|
||||||
|
return string(s.src[offs:s.srcPos.Offset])
|
||||||
|
}
|
||||||
|
|
||||||
|
// recentPosition returns the position of the character immediately after the
|
||||||
|
// character or token returned by the last call to Scan.
|
||||||
|
func (s *Scanner) recentPosition() (pos token.Pos) {
|
||||||
|
pos.Offset = s.srcPos.Offset - s.lastCharLen
|
||||||
|
switch {
|
||||||
|
case s.srcPos.Column > 0:
|
||||||
|
// common case: last character was not a '\n'
|
||||||
|
pos.Line = s.srcPos.Line
|
||||||
|
pos.Column = s.srcPos.Column
|
||||||
|
case s.lastLineLen > 0:
|
||||||
|
// last character was a '\n'
|
||||||
|
// (we cannot be at the beginning of the source
|
||||||
|
// since we have called next() at least once)
|
||||||
|
pos.Line = s.srcPos.Line - 1
|
||||||
|
pos.Column = s.lastLineLen
|
||||||
|
default:
|
||||||
|
// at the beginning of the source
|
||||||
|
pos.Line = 1
|
||||||
|
pos.Column = 1
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// err prints the error of any scanning to s.Error function. If the function is
|
||||||
|
// not defined, by default it prints them to os.Stderr
|
||||||
|
func (s *Scanner) err(msg string) {
|
||||||
|
s.ErrorCount++
|
||||||
|
pos := s.recentPosition()
|
||||||
|
|
||||||
|
if s.Error != nil {
|
||||||
|
s.Error(pos, msg)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Fprintf(os.Stderr, "%s: %s\n", pos, msg)
|
||||||
|
}
|
||||||
|
|
||||||
|
// isHexadecimal returns true if the given rune is a letter
|
||||||
|
func isLetter(ch rune) bool {
|
||||||
|
return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= 0x80 && unicode.IsLetter(ch)
|
||||||
|
}
|
||||||
|
|
||||||
|
// isHexadecimal returns true if the given rune is a decimal digit
|
||||||
|
func isDigit(ch rune) bool {
|
||||||
|
return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch)
|
||||||
|
}
|
||||||
|
|
||||||
|
// isHexadecimal returns true if the given rune is a decimal number
|
||||||
|
func isDecimal(ch rune) bool {
|
||||||
|
return '0' <= ch && ch <= '9'
|
||||||
|
}
|
||||||
|
|
||||||
|
// isHexadecimal returns true if the given rune is an hexadecimal number
|
||||||
|
func isHexadecimal(ch rune) bool {
|
||||||
|
return '0' <= ch && ch <= '9' || 'a' <= ch && ch <= 'f' || 'A' <= ch && ch <= 'F'
|
||||||
|
}
|
||||||
|
|
||||||
|
// isWhitespace returns true if the rune is a space, tab, newline or carriage return
|
||||||
|
func isWhitespace(ch rune) bool {
|
||||||
|
return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r'
|
||||||
|
}
|
||||||
|
|
||||||
|
// digitVal returns the integer value of a given octal,decimal or hexadecimal rune
|
||||||
|
func digitVal(ch rune) int {
|
||||||
|
switch {
|
||||||
|
case '0' <= ch && ch <= '9':
|
||||||
|
return int(ch - '0')
|
||||||
|
case 'a' <= ch && ch <= 'f':
|
||||||
|
return int(ch - 'a' + 10)
|
||||||
|
case 'A' <= ch && ch <= 'F':
|
||||||
|
return int(ch - 'A' + 10)
|
||||||
|
}
|
||||||
|
return 16 // larger than any legal digit val
|
||||||
|
}
|
46
vendor/github.com/hashicorp/hcl/json/token/position.go
generated
vendored
Normal file
46
vendor/github.com/hashicorp/hcl/json/token/position.go
generated
vendored
Normal file
@ -0,0 +1,46 @@
|
|||||||
|
package token
|
||||||
|
|
||||||
|
import "fmt"
|
||||||
|
|
||||||
|
// Pos describes an arbitrary source position
|
||||||
|
// including the file, line, and column location.
|
||||||
|
// A Position is valid if the line number is > 0.
|
||||||
|
type Pos struct {
|
||||||
|
Filename string // filename, if any
|
||||||
|
Offset int // offset, starting at 0
|
||||||
|
Line int // line number, starting at 1
|
||||||
|
Column int // column number, starting at 1 (character count)
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsValid returns true if the position is valid.
|
||||||
|
func (p *Pos) IsValid() bool { return p.Line > 0 }
|
||||||
|
|
||||||
|
// String returns a string in one of several forms:
|
||||||
|
//
|
||||||
|
// file:line:column valid position with file name
|
||||||
|
// line:column valid position without file name
|
||||||
|
// file invalid position with file name
|
||||||
|
// - invalid position without file name
|
||||||
|
func (p Pos) String() string {
|
||||||
|
s := p.Filename
|
||||||
|
if p.IsValid() {
|
||||||
|
if s != "" {
|
||||||
|
s += ":"
|
||||||
|
}
|
||||||
|
s += fmt.Sprintf("%d:%d", p.Line, p.Column)
|
||||||
|
}
|
||||||
|
if s == "" {
|
||||||
|
s = "-"
|
||||||
|
}
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Before reports whether the position p is before u.
|
||||||
|
func (p Pos) Before(u Pos) bool {
|
||||||
|
return u.Offset > p.Offset || u.Line > p.Line
|
||||||
|
}
|
||||||
|
|
||||||
|
// After reports whether the position p is after u.
|
||||||
|
func (p Pos) After(u Pos) bool {
|
||||||
|
return u.Offset < p.Offset || u.Line < p.Line
|
||||||
|
}
|
118
vendor/github.com/hashicorp/hcl/json/token/token.go
generated
vendored
Normal file
118
vendor/github.com/hashicorp/hcl/json/token/token.go
generated
vendored
Normal file
@ -0,0 +1,118 @@
|
|||||||
|
package token
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strconv"
|
||||||
|
|
||||||
|
hcltoken "github.com/hashicorp/hcl/hcl/token"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Token defines a single HCL token which can be obtained via the Scanner
|
||||||
|
type Token struct {
|
||||||
|
Type Type
|
||||||
|
Pos Pos
|
||||||
|
Text string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Type is the set of lexical tokens of the HCL (HashiCorp Configuration Language)
|
||||||
|
type Type int
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Special tokens
|
||||||
|
ILLEGAL Type = iota
|
||||||
|
EOF
|
||||||
|
|
||||||
|
identifier_beg
|
||||||
|
literal_beg
|
||||||
|
NUMBER // 12345
|
||||||
|
FLOAT // 123.45
|
||||||
|
BOOL // true,false
|
||||||
|
STRING // "abc"
|
||||||
|
NULL // null
|
||||||
|
literal_end
|
||||||
|
identifier_end
|
||||||
|
|
||||||
|
operator_beg
|
||||||
|
LBRACK // [
|
||||||
|
LBRACE // {
|
||||||
|
COMMA // ,
|
||||||
|
PERIOD // .
|
||||||
|
COLON // :
|
||||||
|
|
||||||
|
RBRACK // ]
|
||||||
|
RBRACE // }
|
||||||
|
|
||||||
|
operator_end
|
||||||
|
)
|
||||||
|
|
||||||
|
var tokens = [...]string{
|
||||||
|
ILLEGAL: "ILLEGAL",
|
||||||
|
|
||||||
|
EOF: "EOF",
|
||||||
|
|
||||||
|
NUMBER: "NUMBER",
|
||||||
|
FLOAT: "FLOAT",
|
||||||
|
BOOL: "BOOL",
|
||||||
|
STRING: "STRING",
|
||||||
|
NULL: "NULL",
|
||||||
|
|
||||||
|
LBRACK: "LBRACK",
|
||||||
|
LBRACE: "LBRACE",
|
||||||
|
COMMA: "COMMA",
|
||||||
|
PERIOD: "PERIOD",
|
||||||
|
COLON: "COLON",
|
||||||
|
|
||||||
|
RBRACK: "RBRACK",
|
||||||
|
RBRACE: "RBRACE",
|
||||||
|
}
|
||||||
|
|
||||||
|
// String returns the string corresponding to the token tok.
|
||||||
|
func (t Type) String() string {
|
||||||
|
s := ""
|
||||||
|
if 0 <= t && t < Type(len(tokens)) {
|
||||||
|
s = tokens[t]
|
||||||
|
}
|
||||||
|
if s == "" {
|
||||||
|
s = "token(" + strconv.Itoa(int(t)) + ")"
|
||||||
|
}
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsIdentifier returns true for tokens corresponding to identifiers and basic
|
||||||
|
// type literals; it returns false otherwise.
|
||||||
|
func (t Type) IsIdentifier() bool { return identifier_beg < t && t < identifier_end }
|
||||||
|
|
||||||
|
// IsLiteral returns true for tokens corresponding to basic type literals; it
|
||||||
|
// returns false otherwise.
|
||||||
|
func (t Type) IsLiteral() bool { return literal_beg < t && t < literal_end }
|
||||||
|
|
||||||
|
// IsOperator returns true for tokens corresponding to operators and
|
||||||
|
// delimiters; it returns false otherwise.
|
||||||
|
func (t Type) IsOperator() bool { return operator_beg < t && t < operator_end }
|
||||||
|
|
||||||
|
// String returns the token's literal text. Note that this is only
|
||||||
|
// applicable for certain token types, such as token.IDENT,
|
||||||
|
// token.STRING, etc..
|
||||||
|
func (t Token) String() string {
|
||||||
|
return fmt.Sprintf("%s %s %s", t.Pos.String(), t.Type.String(), t.Text)
|
||||||
|
}
|
||||||
|
|
||||||
|
// HCLToken converts this token to an HCL token.
|
||||||
|
//
|
||||||
|
// The token type must be a literal type or this will panic.
|
||||||
|
func (t Token) HCLToken() hcltoken.Token {
|
||||||
|
switch t.Type {
|
||||||
|
case BOOL:
|
||||||
|
return hcltoken.Token{Type: hcltoken.BOOL, Text: t.Text}
|
||||||
|
case FLOAT:
|
||||||
|
return hcltoken.Token{Type: hcltoken.FLOAT, Text: t.Text}
|
||||||
|
case NULL:
|
||||||
|
return hcltoken.Token{Type: hcltoken.STRING, Text: ""}
|
||||||
|
case NUMBER:
|
||||||
|
return hcltoken.Token{Type: hcltoken.NUMBER, Text: t.Text}
|
||||||
|
case STRING:
|
||||||
|
return hcltoken.Token{Type: hcltoken.STRING, Text: t.Text, JSON: true}
|
||||||
|
default:
|
||||||
|
panic(fmt.Sprintf("unimplemented HCLToken for type: %s", t.Type))
|
||||||
|
}
|
||||||
|
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user