Update dependencies and remove old matterclient lib (#2067)

This commit is contained in:
Wim
2023-08-05 20:43:19 +02:00
committed by GitHub
parent 9459495484
commit 56e7bd01ca
772 changed files with 139315 additions and 121315 deletions

20
vendor/golang.org/x/net/html/doc.go generated vendored
View File

@@ -99,14 +99,20 @@ Care should be taken when parsing and interpreting HTML, whether full documents
or fragments, within the framework of the HTML specification, especially with
regard to untrusted inputs.
This package provides both a tokenizer and a parser. Only the parser constructs
a DOM according to the HTML specification, resolving malformed and misplaced
tags where appropriate. The tokenizer simply tokenizes the HTML presented to it,
and as such does not resolve issues that may exist in the processed HTML,
producing a literal interpretation of the input.
This package provides both a tokenizer and a parser, which implement the
tokenization, and tokenization and tree construction stages of the WHATWG HTML
parsing specification respectively. While the tokenizer parses and normalizes
individual HTML tokens, only the parser constructs the DOM tree from the
tokenized HTML, as described in the tree construction stage of the
specification, dynamically modifying or extending the docuemnt's DOM tree.
If your use case requires semantically well-formed HTML, as defined by the
WHATWG specifiction, the parser should be used rather than the tokenizer.
If your use case requires semantically well-formed HTML documents, as defined by
the WHATWG specification, the parser should be used rather than the tokenizer.
In security contexts, if trust decisions are being made using the tokenized or
parsed content, the input must be re-serialized (for instance by using Render or
Token.String) in order for those trust decisions to hold, as the process of
tokenization or parsing may alter the content.
*/
package html // import "golang.org/x/net/html"

View File

@@ -194,9 +194,8 @@ func render1(w writer, n *Node) error {
}
}
// Render any child nodes.
switch n.Data {
case "iframe", "noembed", "noframes", "noscript", "plaintext", "script", "style", "xmp":
// Render any child nodes
if childTextNodesAreLiteral(n) {
for c := n.FirstChild; c != nil; c = c.NextSibling {
if c.Type == TextNode {
if _, err := w.WriteString(c.Data); err != nil {
@@ -213,7 +212,7 @@ func render1(w writer, n *Node) error {
// last element in the file, with no closing tag.
return plaintextAbort
}
default:
} else {
for c := n.FirstChild; c != nil; c = c.NextSibling {
if err := render1(w, c); err != nil {
return err
@@ -231,6 +230,27 @@ func render1(w writer, n *Node) error {
return w.WriteByte('>')
}
func childTextNodesAreLiteral(n *Node) bool {
// Per WHATWG HTML 13.3, if the parent of the current node is a style,
// script, xmp, iframe, noembed, noframes, or plaintext element, and the
// current node is a text node, append the value of the node's data
// literally. The specification is not explicit about it, but we only
// enforce this if we are in the HTML namespace (i.e. when the namespace is
// "").
// NOTE: we also always include noscript elements, although the
// specification states that they should only be rendered as such if
// scripting is enabled for the node (which is not something we track).
if n.Namespace != "" {
return false
}
switch n.Data {
case "iframe", "noembed", "noframes", "noscript", "plaintext", "script", "style", "xmp":
return true
default:
return false
}
}
// writeQuoted writes s to w surrounded by quotes. Normally it will use double
// quotes, but if s contains a double quote, it will use single quotes.
// It is used for writing the identifiers in a doctype declaration.

View File

@@ -913,7 +913,14 @@ func (z *Tokenizer) readTagAttrKey() {
case ' ', '\n', '\r', '\t', '\f', '/':
z.pendingAttr[0].end = z.raw.end - 1
return
case '=', '>':
case '=':
if z.pendingAttr[0].start+1 == z.raw.end {
// WHATWG 13.2.5.32, if we see an equals sign before the attribute name
// begins, we treat it as a character in the attribute name and continue.
continue
}
fallthrough
case '>':
z.raw.end--
z.pendingAttr[0].end = z.raw.end
return

View File

@@ -44,7 +44,7 @@ func init() {
// HTTP/1, but unlikely to occur in practice and (2) Upgrading from HTTP/1 to
// h2c - this works by using the HTTP/1 Upgrade header to request an upgrade to
// h2c. When either of those situations occur we hijack the HTTP/1 connection,
// convert it to a HTTP/2 connection and pass the net.Conn to http2.ServeConn.
// convert it to an HTTP/2 connection and pass the net.Conn to http2.ServeConn.
type h2cHandler struct {
Handler http.Handler
s *http2.Server

View File

@@ -88,13 +88,9 @@ func (p *pipe) Write(d []byte) (n int, err error) {
p.c.L = &p.mu
}
defer p.c.Signal()
if p.err != nil {
if p.err != nil || p.breakErr != nil {
return 0, errClosedPipeWrite
}
if p.breakErr != nil {
p.unread += len(d)
return len(d), nil // discard when there is no reader
}
return p.b.Write(d)
}

View File

@@ -441,7 +441,7 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) {
if s.NewWriteScheduler != nil {
sc.writeSched = s.NewWriteScheduler()
} else {
sc.writeSched = NewPriorityWriteScheduler(nil)
sc.writeSched = newRoundRobinWriteScheduler()
}
// These start at the RFC-specified defaults. If there is a higher
@@ -1822,15 +1822,18 @@ func (sc *serverConn) processData(f *DataFrame) error {
}
if len(data) > 0 {
st.bodyBytes += int64(len(data))
wrote, err := st.body.Write(data)
if err != nil {
// The handler has closed the request body.
// Return the connection-level flow control for the discarded data,
// but not the stream-level flow control.
sc.sendWindowUpdate(nil, int(f.Length)-wrote)
return sc.countError("body_write_err", streamError(id, ErrCodeStreamClosed))
return nil
}
if wrote != len(data) {
panic("internal error: bad Writer")
}
st.bodyBytes += int64(len(data))
}
// Return any padded flow control now, since we won't
@@ -2426,7 +2429,7 @@ type requestBody struct {
conn *serverConn
closeOnce sync.Once // for use by Close only
sawEOF bool // for use by Read only
pipe *pipe // non-nil if we have a HTTP entity message body
pipe *pipe // non-nil if we have an HTTP entity message body
needsContinue bool // need to send a 100-continue
}
@@ -2566,7 +2569,8 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) {
clen = ""
}
}
if clen == "" && rws.handlerDone && bodyAllowedForStatus(rws.status) && (len(p) > 0 || !isHeadResp) {
_, hasContentLength := rws.snapHeader["Content-Length"]
if !hasContentLength && clen == "" && rws.handlerDone && bodyAllowedForStatus(rws.status) && (len(p) > 0 || !isHeadResp) {
clen = strconv.Itoa(len(p))
}
_, hasContentType := rws.snapHeader["Content-Type"]
@@ -2771,7 +2775,7 @@ func (w *responseWriter) FlushError() error {
err = rws.bw.Flush()
} else {
// The bufio.Writer won't call chunkWriter.Write
// (writeChunk with zero bytes, so we have to do it
// (writeChunk with zero bytes), so we have to do it
// ourselves to force the HTTP response header and/or
// final DATA frame (with END_STREAM) to be sent.
_, err = chunkWriter{rws}.Write(nil)

View File

@@ -19,6 +19,7 @@ import (
"io/fs"
"log"
"math"
"math/bits"
mathrand "math/rand"
"net"
"net/http"
@@ -518,11 +519,14 @@ func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) {
func authorityAddr(scheme string, authority string) (addr string) {
host, port, err := net.SplitHostPort(authority)
if err != nil { // authority didn't have a port
host = authority
port = ""
}
if port == "" { // authority's port was empty
port = "443"
if scheme == "http" {
port = "80"
}
host = authority
}
if a, err := idna.ToASCII(host); err == nil {
host = a
@@ -560,10 +564,11 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res
traceGotConn(req, cc, reused)
res, err := cc.RoundTrip(req)
if err != nil && retry <= 6 {
roundTripErr := err
if req, err = shouldRetryRequest(req, err); err == nil {
// After the first retry, do exponential backoff with 10% jitter.
if retry == 0 {
t.vlogf("RoundTrip retrying after failure: %v", err)
t.vlogf("RoundTrip retrying after failure: %v", roundTripErr)
continue
}
backoff := float64(uint(1) << (uint(retry) - 1))
@@ -572,7 +577,7 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res
timer := backoffNewTimer(d)
select {
case <-timer.C:
t.vlogf("RoundTrip retrying after failure: %v", err)
t.vlogf("RoundTrip retrying after failure: %v", roundTripErr)
continue
case <-req.Context().Done():
timer.Stop()
@@ -1265,6 +1270,29 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) {
return res, nil
}
cancelRequest := func(cs *clientStream, err error) error {
cs.cc.mu.Lock()
bodyClosed := cs.reqBodyClosed
cs.cc.mu.Unlock()
// Wait for the request body to be closed.
//
// If nothing closed the body before now, abortStreamLocked
// will have started a goroutine to close it.
//
// Closing the body before returning avoids a race condition
// with net/http checking its readTrackingBody to see if the
// body was read from or closed. See golang/go#60041.
//
// The body is closed in a separate goroutine without the
// connection mutex held, but dropping the mutex before waiting
// will keep us from holding it indefinitely if the body
// close is slow for some reason.
if bodyClosed != nil {
<-bodyClosed
}
return err
}
for {
select {
case <-cs.respHeaderRecv:
@@ -1284,10 +1312,10 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) {
case <-ctx.Done():
err := ctx.Err()
cs.abortStream(err)
return nil, err
return nil, cancelRequest(cs, err)
case <-cs.reqCancel:
cs.abortStream(errRequestCanceled)
return nil, errRequestCanceled
return nil, cancelRequest(cs, errRequestCanceled)
}
}
}
@@ -1653,7 +1681,27 @@ func (cs *clientStream) frameScratchBufferLen(maxFrameSize int) int {
return int(n) // doesn't truncate; max is 512K
}
var bufPool sync.Pool // of *[]byte
// Seven bufPools manage different frame sizes. This helps to avoid scenarios where long-running
// streaming requests using small frame sizes occupy large buffers initially allocated for prior
// requests needing big buffers. The size ranges are as follows:
// {0 KB, 16 KB], {16 KB, 32 KB], {32 KB, 64 KB], {64 KB, 128 KB], {128 KB, 256 KB],
// {256 KB, 512 KB], {512 KB, infinity}
// In practice, the maximum scratch buffer size should not exceed 512 KB due to
// frameScratchBufferLen(maxFrameSize), thus the "infinity pool" should never be used.
// It exists mainly as a safety measure, for potential future increases in max buffer size.
var bufPools [7]sync.Pool // of *[]byte
func bufPoolIndex(size int) int {
if size <= 16384 {
return 0
}
size -= 1
bits := bits.Len(uint(size))
index := bits - 14
if index >= len(bufPools) {
return len(bufPools) - 1
}
return index
}
func (cs *clientStream) writeRequestBody(req *http.Request) (err error) {
cc := cs.cc
@@ -1671,12 +1719,13 @@ func (cs *clientStream) writeRequestBody(req *http.Request) (err error) {
// Scratch buffer for reading into & writing from.
scratchLen := cs.frameScratchBufferLen(maxFrameSize)
var buf []byte
if bp, ok := bufPool.Get().(*[]byte); ok && len(*bp) >= scratchLen {
defer bufPool.Put(bp)
index := bufPoolIndex(scratchLen)
if bp, ok := bufPools[index].Get().(*[]byte); ok && len(*bp) >= scratchLen {
defer bufPools[index].Put(bp)
buf = *bp
} else {
buf = make([]byte, scratchLen)
defer bufPool.Put(&buf)
defer bufPools[index].Put(&buf)
}
var sawEOF bool
@@ -1844,6 +1893,9 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail
if err != nil {
return nil, err
}
if !httpguts.ValidHostHeader(host) {
return nil, errors.New("http2: invalid Host header")
}
var path string
if req.Method != "CONNECT" {
@@ -1880,7 +1932,7 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail
// 8.1.2.3 Request Pseudo-Header Fields
// The :path pseudo-header field includes the path and query parts of the
// target URI (the path-absolute production and optionally a '?' character
// followed by the query production (see Sections 3.3 and 3.4 of
// followed by the query production, see Sections 3.3 and 3.4 of
// [RFC3986]).
f(":authority", host)
m := req.Method
@@ -2555,6 +2607,9 @@ func (b transportResponseBody) Close() error {
cs := b.cs
cc := cs.cc
cs.bufPipe.BreakWithError(errClosedResponseBody)
cs.abortStream(errClosedResponseBody)
unread := cs.bufPipe.Len()
if unread > 0 {
cc.mu.Lock()
@@ -2573,9 +2628,6 @@ func (b transportResponseBody) Close() error {
cc.wmu.Unlock()
}
cs.bufPipe.BreakWithError(errClosedResponseBody)
cs.abortStream(errClosedResponseBody)
select {
case <-cs.donec:
case <-cs.ctx.Done():

View File

@@ -184,7 +184,8 @@ func (wr *FrameWriteRequest) replyToWriter(err error) {
// writeQueue is used by implementations of WriteScheduler.
type writeQueue struct {
s []FrameWriteRequest
s []FrameWriteRequest
prev, next *writeQueue
}
func (q *writeQueue) empty() bool { return len(q.s) == 0 }

119
vendor/golang.org/x/net/http2/writesched_roundrobin.go generated vendored Normal file
View File

@@ -0,0 +1,119 @@
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package http2
import (
"fmt"
"math"
)
type roundRobinWriteScheduler struct {
// control contains control frames (SETTINGS, PING, etc.).
control writeQueue
// streams maps stream ID to a queue.
streams map[uint32]*writeQueue
// stream queues are stored in a circular linked list.
// head is the next stream to write, or nil if there are no streams open.
head *writeQueue
// pool of empty queues for reuse.
queuePool writeQueuePool
}
// newRoundRobinWriteScheduler constructs a new write scheduler.
// The round robin scheduler priorizes control frames
// like SETTINGS and PING over DATA frames.
// When there are no control frames to send, it performs a round-robin
// selection from the ready streams.
func newRoundRobinWriteScheduler() WriteScheduler {
ws := &roundRobinWriteScheduler{
streams: make(map[uint32]*writeQueue),
}
return ws
}
func (ws *roundRobinWriteScheduler) OpenStream(streamID uint32, options OpenStreamOptions) {
if ws.streams[streamID] != nil {
panic(fmt.Errorf("stream %d already opened", streamID))
}
q := ws.queuePool.get()
ws.streams[streamID] = q
if ws.head == nil {
ws.head = q
q.next = q
q.prev = q
} else {
// Queues are stored in a ring.
// Insert the new stream before ws.head, putting it at the end of the list.
q.prev = ws.head.prev
q.next = ws.head
q.prev.next = q
q.next.prev = q
}
}
func (ws *roundRobinWriteScheduler) CloseStream(streamID uint32) {
q := ws.streams[streamID]
if q == nil {
return
}
if q.next == q {
// This was the only open stream.
ws.head = nil
} else {
q.prev.next = q.next
q.next.prev = q.prev
if ws.head == q {
ws.head = q.next
}
}
delete(ws.streams, streamID)
ws.queuePool.put(q)
}
func (ws *roundRobinWriteScheduler) AdjustStream(streamID uint32, priority PriorityParam) {}
func (ws *roundRobinWriteScheduler) Push(wr FrameWriteRequest) {
if wr.isControl() {
ws.control.push(wr)
return
}
q := ws.streams[wr.StreamID()]
if q == nil {
// This is a closed stream.
// wr should not be a HEADERS or DATA frame.
// We push the request onto the control queue.
if wr.DataSize() > 0 {
panic("add DATA on non-open stream")
}
ws.control.push(wr)
return
}
q.push(wr)
}
func (ws *roundRobinWriteScheduler) Pop() (FrameWriteRequest, bool) {
// Control and RST_STREAM frames first.
if !ws.control.empty() {
return ws.control.shift(), true
}
if ws.head == nil {
return FrameWriteRequest{}, false
}
q := ws.head
for {
if wr, ok := q.consume(math.MaxInt32); ok {
ws.head = q.next
return wr, true
}
q = q.next
if q == ws.head {
break
}
}
return FrameWriteRequest{}, false
}

View File

@@ -121,7 +121,7 @@ func CheckJoiners(enable bool) Option {
}
}
// StrictDomainName limits the set of permissable ASCII characters to those
// StrictDomainName limits the set of permissible ASCII characters to those
// allowed in domain names as defined in RFC 1034 (A-Z, a-z, 0-9 and the
// hyphen). This is set by default for MapForLookup and ValidateForRegistration,
// but is only useful if ValidateLabels is set.

File diff suppressed because it is too large Load Diff

5145
vendor/golang.org/x/net/idna/tables15.0.0.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

21
vendor/golang.org/x/net/idna/trie.go generated vendored
View File

@@ -6,27 +6,6 @@
package idna
// appendMapping appends the mapping for the respective rune. isMapped must be
// true. A mapping is a categorization of a rune as defined in UTS #46.
func (c info) appendMapping(b []byte, s string) []byte {
index := int(c >> indexShift)
if c&xorBit == 0 {
s := mappings[index:]
return append(b, s[1:s[0]+1]...)
}
b = append(b, s...)
if c&inlineXOR == inlineXOR {
// TODO: support and handle two-byte inline masks
b[len(b)-1] ^= byte(index)
} else {
for p := len(b) - int(xorData[index]); p < len(b); p++ {
index++
b[p] ^= xorData[index]
}
}
return b
}
// Sparse block handling code.
type valueRange struct {

31
vendor/golang.org/x/net/idna/trie12.0.0.go generated vendored Normal file
View File

@@ -0,0 +1,31 @@
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !go1.16
// +build !go1.16
package idna
// appendMapping appends the mapping for the respective rune. isMapped must be
// true. A mapping is a categorization of a rune as defined in UTS #46.
func (c info) appendMapping(b []byte, s string) []byte {
index := int(c >> indexShift)
if c&xorBit == 0 {
s := mappings[index:]
return append(b, s[1:s[0]+1]...)
}
b = append(b, s...)
if c&inlineXOR == inlineXOR {
// TODO: support and handle two-byte inline masks
b[len(b)-1] ^= byte(index)
} else {
for p := len(b) - int(xorData[index]); p < len(b); p++ {
index++
b[p] ^= xorData[index]
}
}
return b
}

31
vendor/golang.org/x/net/idna/trie13.0.0.go generated vendored Normal file
View File

@@ -0,0 +1,31 @@
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build go1.16
// +build go1.16
package idna
// appendMapping appends the mapping for the respective rune. isMapped must be
// true. A mapping is a categorization of a rune as defined in UTS #46.
func (c info) appendMapping(b []byte, s string) []byte {
index := int(c >> indexShift)
if c&xorBit == 0 {
p := index
return append(b, mappings[mappingIndex[p]:mappingIndex[p+1]]...)
}
b = append(b, s...)
if c&inlineXOR == inlineXOR {
// TODO: support and handle two-byte inline masks
b[len(b)-1] ^= byte(index)
} else {
for p := len(b) - int(xorData[index]); p < len(b); p++ {
index++
b[p] ^= xorData[index]
}
}
return b
}