Update dependencies and build to go1.22 (#2113)
* Update dependencies and build to go1.22 * Fix api changes wrt to dependencies * Update golangci config
This commit is contained in:
51
vendor/golang.org/x/net/http2/Dockerfile
generated
vendored
51
vendor/golang.org/x/net/http2/Dockerfile
generated
vendored
@@ -1,51 +0,0 @@
|
||||
#
|
||||
# This Dockerfile builds a recent curl with HTTP/2 client support, using
|
||||
# a recent nghttp2 build.
|
||||
#
|
||||
# See the Makefile for how to tag it. If Docker and that image is found, the
|
||||
# Go tests use this curl binary for integration tests.
|
||||
#
|
||||
|
||||
FROM ubuntu:trusty
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get upgrade -y && \
|
||||
apt-get install -y git-core build-essential wget
|
||||
|
||||
RUN apt-get install -y --no-install-recommends \
|
||||
autotools-dev libtool pkg-config zlib1g-dev \
|
||||
libcunit1-dev libssl-dev libxml2-dev libevent-dev \
|
||||
automake autoconf
|
||||
|
||||
# The list of packages nghttp2 recommends for h2load:
|
||||
RUN apt-get install -y --no-install-recommends make binutils \
|
||||
autoconf automake autotools-dev \
|
||||
libtool pkg-config zlib1g-dev libcunit1-dev libssl-dev libxml2-dev \
|
||||
libev-dev libevent-dev libjansson-dev libjemalloc-dev \
|
||||
cython python3.4-dev python-setuptools
|
||||
|
||||
# Note: setting NGHTTP2_VER before the git clone, so an old git clone isn't cached:
|
||||
ENV NGHTTP2_VER 895da9a
|
||||
RUN cd /root && git clone https://github.com/tatsuhiro-t/nghttp2.git
|
||||
|
||||
WORKDIR /root/nghttp2
|
||||
RUN git reset --hard $NGHTTP2_VER
|
||||
RUN autoreconf -i
|
||||
RUN automake
|
||||
RUN autoconf
|
||||
RUN ./configure
|
||||
RUN make
|
||||
RUN make install
|
||||
|
||||
WORKDIR /root
|
||||
RUN wget https://curl.se/download/curl-7.45.0.tar.gz
|
||||
RUN tar -zxvf curl-7.45.0.tar.gz
|
||||
WORKDIR /root/curl-7.45.0
|
||||
RUN ./configure --with-ssl --with-nghttp2=/usr/local
|
||||
RUN make
|
||||
RUN make install
|
||||
RUN ldconfig
|
||||
|
||||
CMD ["-h"]
|
||||
ENTRYPOINT ["/usr/local/bin/curl"]
|
||||
|
||||
3
vendor/golang.org/x/net/http2/Makefile
generated
vendored
3
vendor/golang.org/x/net/http2/Makefile
generated
vendored
@@ -1,3 +0,0 @@
|
||||
curlimage:
|
||||
docker build -t gohttp2/curl .
|
||||
|
||||
59
vendor/golang.org/x/net/http2/databuffer.go
generated
vendored
59
vendor/golang.org/x/net/http2/databuffer.go
generated
vendored
@@ -20,41 +20,44 @@ import (
|
||||
// TODO: Benchmark to determine if the pools are necessary. The GC may have
|
||||
// improved enough that we can instead allocate chunks like this:
|
||||
// make([]byte, max(16<<10, expectedBytesRemaining))
|
||||
var (
|
||||
dataChunkSizeClasses = []int{
|
||||
1 << 10,
|
||||
2 << 10,
|
||||
4 << 10,
|
||||
8 << 10,
|
||||
16 << 10,
|
||||
}
|
||||
dataChunkPools = [...]sync.Pool{
|
||||
{New: func() interface{} { return make([]byte, 1<<10) }},
|
||||
{New: func() interface{} { return make([]byte, 2<<10) }},
|
||||
{New: func() interface{} { return make([]byte, 4<<10) }},
|
||||
{New: func() interface{} { return make([]byte, 8<<10) }},
|
||||
{New: func() interface{} { return make([]byte, 16<<10) }},
|
||||
}
|
||||
)
|
||||
var dataChunkPools = [...]sync.Pool{
|
||||
{New: func() interface{} { return new([1 << 10]byte) }},
|
||||
{New: func() interface{} { return new([2 << 10]byte) }},
|
||||
{New: func() interface{} { return new([4 << 10]byte) }},
|
||||
{New: func() interface{} { return new([8 << 10]byte) }},
|
||||
{New: func() interface{} { return new([16 << 10]byte) }},
|
||||
}
|
||||
|
||||
func getDataBufferChunk(size int64) []byte {
|
||||
i := 0
|
||||
for ; i < len(dataChunkSizeClasses)-1; i++ {
|
||||
if size <= int64(dataChunkSizeClasses[i]) {
|
||||
break
|
||||
}
|
||||
switch {
|
||||
case size <= 1<<10:
|
||||
return dataChunkPools[0].Get().(*[1 << 10]byte)[:]
|
||||
case size <= 2<<10:
|
||||
return dataChunkPools[1].Get().(*[2 << 10]byte)[:]
|
||||
case size <= 4<<10:
|
||||
return dataChunkPools[2].Get().(*[4 << 10]byte)[:]
|
||||
case size <= 8<<10:
|
||||
return dataChunkPools[3].Get().(*[8 << 10]byte)[:]
|
||||
default:
|
||||
return dataChunkPools[4].Get().(*[16 << 10]byte)[:]
|
||||
}
|
||||
return dataChunkPools[i].Get().([]byte)
|
||||
}
|
||||
|
||||
func putDataBufferChunk(p []byte) {
|
||||
for i, n := range dataChunkSizeClasses {
|
||||
if len(p) == n {
|
||||
dataChunkPools[i].Put(p)
|
||||
return
|
||||
}
|
||||
switch len(p) {
|
||||
case 1 << 10:
|
||||
dataChunkPools[0].Put((*[1 << 10]byte)(p))
|
||||
case 2 << 10:
|
||||
dataChunkPools[1].Put((*[2 << 10]byte)(p))
|
||||
case 4 << 10:
|
||||
dataChunkPools[2].Put((*[4 << 10]byte)(p))
|
||||
case 8 << 10:
|
||||
dataChunkPools[3].Put((*[8 << 10]byte)(p))
|
||||
case 16 << 10:
|
||||
dataChunkPools[4].Put((*[16 << 10]byte)(p))
|
||||
default:
|
||||
panic(fmt.Sprintf("unexpected buffer len=%v", len(p)))
|
||||
}
|
||||
panic(fmt.Sprintf("unexpected buffer len=%v", len(p)))
|
||||
}
|
||||
|
||||
// dataBuffer is an io.ReadWriter backed by a list of data chunks.
|
||||
|
||||
51
vendor/golang.org/x/net/http2/frame.go
generated
vendored
51
vendor/golang.org/x/net/http2/frame.go
generated
vendored
@@ -490,6 +490,9 @@ func terminalReadFrameError(err error) bool {
|
||||
// returned error is ErrFrameTooLarge. Other errors may be of type
|
||||
// ConnectionError, StreamError, or anything else from the underlying
|
||||
// reader.
|
||||
//
|
||||
// If ReadFrame returns an error and a non-nil Frame, the Frame's StreamID
|
||||
// indicates the stream responsible for the error.
|
||||
func (fr *Framer) ReadFrame() (Frame, error) {
|
||||
fr.errDetail = nil
|
||||
if fr.lastFrame != nil {
|
||||
@@ -1510,19 +1513,18 @@ func (mh *MetaHeadersFrame) checkPseudos() error {
|
||||
}
|
||||
|
||||
func (fr *Framer) maxHeaderStringLen() int {
|
||||
v := fr.maxHeaderListSize()
|
||||
if uint32(int(v)) == v {
|
||||
return int(v)
|
||||
v := int(fr.maxHeaderListSize())
|
||||
if v < 0 {
|
||||
// If maxHeaderListSize overflows an int, use no limit (0).
|
||||
return 0
|
||||
}
|
||||
// They had a crazy big number for MaxHeaderBytes anyway,
|
||||
// so give them unlimited header lengths:
|
||||
return 0
|
||||
return v
|
||||
}
|
||||
|
||||
// readMetaFrame returns 0 or more CONTINUATION frames from fr and
|
||||
// merge them into the provided hf and returns a MetaHeadersFrame
|
||||
// with the decoded hpack values.
|
||||
func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) {
|
||||
func (fr *Framer) readMetaFrame(hf *HeadersFrame) (Frame, error) {
|
||||
if fr.AllowIllegalReads {
|
||||
return nil, errors.New("illegal use of AllowIllegalReads with ReadMetaHeaders")
|
||||
}
|
||||
@@ -1565,6 +1567,7 @@ func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) {
|
||||
if size > remainSize {
|
||||
hdec.SetEmitEnabled(false)
|
||||
mh.Truncated = true
|
||||
remainSize = 0
|
||||
return
|
||||
}
|
||||
remainSize -= size
|
||||
@@ -1577,8 +1580,38 @@ func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) {
|
||||
var hc headersOrContinuation = hf
|
||||
for {
|
||||
frag := hc.HeaderBlockFragment()
|
||||
|
||||
// Avoid parsing large amounts of headers that we will then discard.
|
||||
// If the sender exceeds the max header list size by too much,
|
||||
// skip parsing the fragment and close the connection.
|
||||
//
|
||||
// "Too much" is either any CONTINUATION frame after we've already
|
||||
// exceeded the max header list size (in which case remainSize is 0),
|
||||
// or a frame whose encoded size is more than twice the remaining
|
||||
// header list bytes we're willing to accept.
|
||||
if int64(len(frag)) > int64(2*remainSize) {
|
||||
if VerboseLogs {
|
||||
log.Printf("http2: header list too large")
|
||||
}
|
||||
// It would be nice to send a RST_STREAM before sending the GOAWAY,
|
||||
// but the structure of the server's frame writer makes this difficult.
|
||||
return mh, ConnectionError(ErrCodeProtocol)
|
||||
}
|
||||
|
||||
// Also close the connection after any CONTINUATION frame following an
|
||||
// invalid header, since we stop tracking the size of the headers after
|
||||
// an invalid one.
|
||||
if invalid != nil {
|
||||
if VerboseLogs {
|
||||
log.Printf("http2: invalid header: %v", invalid)
|
||||
}
|
||||
// It would be nice to send a RST_STREAM before sending the GOAWAY,
|
||||
// but the structure of the server's frame writer makes this difficult.
|
||||
return mh, ConnectionError(ErrCodeProtocol)
|
||||
}
|
||||
|
||||
if _, err := hdec.Write(frag); err != nil {
|
||||
return nil, ConnectionError(ErrCodeCompression)
|
||||
return mh, ConnectionError(ErrCodeCompression)
|
||||
}
|
||||
|
||||
if hc.HeadersEnded() {
|
||||
@@ -1595,7 +1628,7 @@ func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) {
|
||||
mh.HeadersFrame.invalidate()
|
||||
|
||||
if err := hdec.Close(); err != nil {
|
||||
return nil, ConnectionError(ErrCodeCompression)
|
||||
return mh, ConnectionError(ErrCodeCompression)
|
||||
}
|
||||
if invalid != nil {
|
||||
fr.errDetail = invalid
|
||||
|
||||
30
vendor/golang.org/x/net/http2/go111.go
generated
vendored
30
vendor/golang.org/x/net/http2/go111.go
generated
vendored
@@ -1,30 +0,0 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build go1.11
|
||||
// +build go1.11
|
||||
|
||||
package http2
|
||||
|
||||
import (
|
||||
"net/http/httptrace"
|
||||
"net/textproto"
|
||||
)
|
||||
|
||||
func traceHasWroteHeaderField(trace *httptrace.ClientTrace) bool {
|
||||
return trace != nil && trace.WroteHeaderField != nil
|
||||
}
|
||||
|
||||
func traceWroteHeaderField(trace *httptrace.ClientTrace, k, v string) {
|
||||
if trace != nil && trace.WroteHeaderField != nil {
|
||||
trace.WroteHeaderField(k, []string{v})
|
||||
}
|
||||
}
|
||||
|
||||
func traceGot1xxResponseFunc(trace *httptrace.ClientTrace) func(int, textproto.MIMEHeader) error {
|
||||
if trace != nil {
|
||||
return trace.Got1xxResponse
|
||||
}
|
||||
return nil
|
||||
}
|
||||
27
vendor/golang.org/x/net/http2/go115.go
generated
vendored
27
vendor/golang.org/x/net/http2/go115.go
generated
vendored
@@ -1,27 +0,0 @@
|
||||
// Copyright 2021 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build go1.15
|
||||
// +build go1.15
|
||||
|
||||
package http2
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
)
|
||||
|
||||
// dialTLSWithContext uses tls.Dialer, added in Go 1.15, to open a TLS
|
||||
// connection.
|
||||
func (t *Transport) dialTLSWithContext(ctx context.Context, network, addr string, cfg *tls.Config) (*tls.Conn, error) {
|
||||
dialer := &tls.Dialer{
|
||||
Config: cfg,
|
||||
}
|
||||
cn, err := dialer.DialContext(ctx, network, addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tlsCn := cn.(*tls.Conn) // DialContext comment promises this will always succeed
|
||||
return tlsCn, nil
|
||||
}
|
||||
17
vendor/golang.org/x/net/http2/go118.go
generated
vendored
17
vendor/golang.org/x/net/http2/go118.go
generated
vendored
@@ -1,17 +0,0 @@
|
||||
// Copyright 2021 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build go1.18
|
||||
// +build go1.18
|
||||
|
||||
package http2
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"net"
|
||||
)
|
||||
|
||||
func tlsUnderlyingConn(tc *tls.Conn) net.Conn {
|
||||
return tc.NetConn()
|
||||
}
|
||||
21
vendor/golang.org/x/net/http2/not_go111.go
generated
vendored
21
vendor/golang.org/x/net/http2/not_go111.go
generated
vendored
@@ -1,21 +0,0 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !go1.11
|
||||
// +build !go1.11
|
||||
|
||||
package http2
|
||||
|
||||
import (
|
||||
"net/http/httptrace"
|
||||
"net/textproto"
|
||||
)
|
||||
|
||||
func traceHasWroteHeaderField(trace *httptrace.ClientTrace) bool { return false }
|
||||
|
||||
func traceWroteHeaderField(trace *httptrace.ClientTrace, k, v string) {}
|
||||
|
||||
func traceGot1xxResponseFunc(trace *httptrace.ClientTrace) func(int, textproto.MIMEHeader) error {
|
||||
return nil
|
||||
}
|
||||
31
vendor/golang.org/x/net/http2/not_go115.go
generated
vendored
31
vendor/golang.org/x/net/http2/not_go115.go
generated
vendored
@@ -1,31 +0,0 @@
|
||||
// Copyright 2021 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !go1.15
|
||||
// +build !go1.15
|
||||
|
||||
package http2
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
)
|
||||
|
||||
// dialTLSWithContext opens a TLS connection.
|
||||
func (t *Transport) dialTLSWithContext(ctx context.Context, network, addr string, cfg *tls.Config) (*tls.Conn, error) {
|
||||
cn, err := tls.Dial(network, addr, cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := cn.Handshake(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if cfg.InsecureSkipVerify {
|
||||
return cn, nil
|
||||
}
|
||||
if err := cn.VerifyHostname(cfg.ServerName); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cn, nil
|
||||
}
|
||||
17
vendor/golang.org/x/net/http2/not_go118.go
generated
vendored
17
vendor/golang.org/x/net/http2/not_go118.go
generated
vendored
@@ -1,17 +0,0 @@
|
||||
// Copyright 2021 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !go1.18
|
||||
// +build !go1.18
|
||||
|
||||
package http2
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"net"
|
||||
)
|
||||
|
||||
func tlsUnderlyingConn(tc *tls.Conn) net.Conn {
|
||||
return nil
|
||||
}
|
||||
11
vendor/golang.org/x/net/http2/pipe.go
generated
vendored
11
vendor/golang.org/x/net/http2/pipe.go
generated
vendored
@@ -77,7 +77,10 @@ func (p *pipe) Read(d []byte) (n int, err error) {
|
||||
}
|
||||
}
|
||||
|
||||
var errClosedPipeWrite = errors.New("write on closed buffer")
|
||||
var (
|
||||
errClosedPipeWrite = errors.New("write on closed buffer")
|
||||
errUninitializedPipeWrite = errors.New("write on uninitialized buffer")
|
||||
)
|
||||
|
||||
// Write copies bytes from p into the buffer and wakes a reader.
|
||||
// It is an error to write more data than the buffer can hold.
|
||||
@@ -91,6 +94,12 @@ func (p *pipe) Write(d []byte) (n int, err error) {
|
||||
if p.err != nil || p.breakErr != nil {
|
||||
return 0, errClosedPipeWrite
|
||||
}
|
||||
// pipe.setBuffer is never invoked, leaving the buffer uninitialized.
|
||||
// We shouldn't try to write to an uninitialized pipe,
|
||||
// but returning an error is better than panicking.
|
||||
if p.b == nil {
|
||||
return 0, errUninitializedPipeWrite
|
||||
}
|
||||
return p.b.Write(d)
|
||||
}
|
||||
|
||||
|
||||
134
vendor/golang.org/x/net/http2/server.go
generated
vendored
134
vendor/golang.org/x/net/http2/server.go
generated
vendored
@@ -124,6 +124,7 @@ type Server struct {
|
||||
// IdleTimeout specifies how long until idle clients should be
|
||||
// closed with a GOAWAY frame. PING frames are not considered
|
||||
// activity for the purposes of IdleTimeout.
|
||||
// If zero or negative, there is no timeout.
|
||||
IdleTimeout time.Duration
|
||||
|
||||
// MaxUploadBufferPerConnection is the size of the initial flow
|
||||
@@ -434,7 +435,7 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) {
|
||||
// passes the connection off to us with the deadline already set.
|
||||
// Write deadlines are set per stream in serverConn.newStream.
|
||||
// Disarm the net.Conn write deadline here.
|
||||
if sc.hs.WriteTimeout != 0 {
|
||||
if sc.hs.WriteTimeout > 0 {
|
||||
sc.conn.SetWriteDeadline(time.Time{})
|
||||
}
|
||||
|
||||
@@ -581,9 +582,11 @@ type serverConn struct {
|
||||
advMaxStreams uint32 // our SETTINGS_MAX_CONCURRENT_STREAMS advertised the client
|
||||
curClientStreams uint32 // number of open streams initiated by the client
|
||||
curPushedStreams uint32 // number of open streams initiated by server push
|
||||
curHandlers uint32 // number of running handler goroutines
|
||||
maxClientStreamID uint32 // max ever seen from client (odd), or 0 if there have been no client requests
|
||||
maxPushPromiseID uint32 // ID of the last push promise (even), or 0 if there have been no pushes
|
||||
streams map[uint32]*stream
|
||||
unstartedHandlers []unstartedHandler
|
||||
initialStreamSendWindowSize int32
|
||||
maxFrameSize int32
|
||||
peerMaxHeaderListSize uint32 // zero means unknown (default)
|
||||
@@ -729,11 +732,7 @@ func isClosedConnError(err error) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// TODO: remove this string search and be more like the Windows
|
||||
// case below. That might involve modifying the standard library
|
||||
// to return better error types.
|
||||
str := err.Error()
|
||||
if strings.Contains(str, "use of closed network connection") {
|
||||
if errors.Is(err, net.ErrClosed) {
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -922,7 +921,7 @@ func (sc *serverConn) serve() {
|
||||
sc.setConnState(http.StateActive)
|
||||
sc.setConnState(http.StateIdle)
|
||||
|
||||
if sc.srv.IdleTimeout != 0 {
|
||||
if sc.srv.IdleTimeout > 0 {
|
||||
sc.idleTimer = time.AfterFunc(sc.srv.IdleTimeout, sc.onIdleTimer)
|
||||
defer sc.idleTimer.Stop()
|
||||
}
|
||||
@@ -981,6 +980,8 @@ func (sc *serverConn) serve() {
|
||||
return
|
||||
case gracefulShutdownMsg:
|
||||
sc.startGracefulShutdownInternal()
|
||||
case handlerDoneMsg:
|
||||
sc.handlerDone()
|
||||
default:
|
||||
panic("unknown timer")
|
||||
}
|
||||
@@ -1012,14 +1013,6 @@ func (sc *serverConn) serve() {
|
||||
}
|
||||
}
|
||||
|
||||
func (sc *serverConn) awaitGracefulShutdown(sharedCh <-chan struct{}, privateCh chan struct{}) {
|
||||
select {
|
||||
case <-sc.doneServing:
|
||||
case <-sharedCh:
|
||||
close(privateCh)
|
||||
}
|
||||
}
|
||||
|
||||
type serverMessage int
|
||||
|
||||
// Message values sent to serveMsgCh.
|
||||
@@ -1028,6 +1021,7 @@ var (
|
||||
idleTimerMsg = new(serverMessage)
|
||||
shutdownTimerMsg = new(serverMessage)
|
||||
gracefulShutdownMsg = new(serverMessage)
|
||||
handlerDoneMsg = new(serverMessage)
|
||||
)
|
||||
|
||||
func (sc *serverConn) onSettingsTimer() { sc.sendServeMsg(settingsTimerMsg) }
|
||||
@@ -1484,6 +1478,11 @@ func (sc *serverConn) processFrameFromReader(res readFrameResult) bool {
|
||||
sc.goAway(ErrCodeFlowControl)
|
||||
return true
|
||||
case ConnectionError:
|
||||
if res.f != nil {
|
||||
if id := res.f.Header().StreamID; id > sc.maxClientStreamID {
|
||||
sc.maxClientStreamID = id
|
||||
}
|
||||
}
|
||||
sc.logf("http2: server connection error from %v: %v", sc.conn.RemoteAddr(), ev)
|
||||
sc.goAway(ErrCode(ev))
|
||||
return true // goAway will handle shutdown
|
||||
@@ -1640,7 +1639,7 @@ func (sc *serverConn) closeStream(st *stream, err error) {
|
||||
delete(sc.streams, st.id)
|
||||
if len(sc.streams) == 0 {
|
||||
sc.setConnState(http.StateIdle)
|
||||
if sc.srv.IdleTimeout != 0 {
|
||||
if sc.srv.IdleTimeout > 0 {
|
||||
sc.idleTimer.Reset(sc.srv.IdleTimeout)
|
||||
}
|
||||
if h1ServerKeepAlivesDisabled(sc.hs) {
|
||||
@@ -1900,9 +1899,11 @@ func (st *stream) copyTrailersToHandlerRequest() {
|
||||
// onReadTimeout is run on its own goroutine (from time.AfterFunc)
|
||||
// when the stream's ReadTimeout has fired.
|
||||
func (st *stream) onReadTimeout() {
|
||||
// Wrap the ErrDeadlineExceeded to avoid callers depending on us
|
||||
// returning the bare error.
|
||||
st.body.CloseWithError(fmt.Errorf("%w", os.ErrDeadlineExceeded))
|
||||
if st.body != nil {
|
||||
// Wrap the ErrDeadlineExceeded to avoid callers depending on us
|
||||
// returning the bare error.
|
||||
st.body.CloseWithError(fmt.Errorf("%w", os.ErrDeadlineExceeded))
|
||||
}
|
||||
}
|
||||
|
||||
// onWriteTimeout is run on its own goroutine (from time.AfterFunc)
|
||||
@@ -2018,15 +2019,12 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error {
|
||||
// similar to how the http1 server works. Here it's
|
||||
// technically more like the http1 Server's ReadHeaderTimeout
|
||||
// (in Go 1.8), though. That's a more sane option anyway.
|
||||
if sc.hs.ReadTimeout != 0 {
|
||||
if sc.hs.ReadTimeout > 0 {
|
||||
sc.conn.SetReadDeadline(time.Time{})
|
||||
if st.body != nil {
|
||||
st.readDeadline = time.AfterFunc(sc.hs.ReadTimeout, st.onReadTimeout)
|
||||
}
|
||||
st.readDeadline = time.AfterFunc(sc.hs.ReadTimeout, st.onReadTimeout)
|
||||
}
|
||||
|
||||
go sc.runHandler(rw, req, handler)
|
||||
return nil
|
||||
return sc.scheduleHandler(id, rw, req, handler)
|
||||
}
|
||||
|
||||
func (sc *serverConn) upgradeRequest(req *http.Request) {
|
||||
@@ -2042,10 +2040,14 @@ func (sc *serverConn) upgradeRequest(req *http.Request) {
|
||||
|
||||
// Disable any read deadline set by the net/http package
|
||||
// prior to the upgrade.
|
||||
if sc.hs.ReadTimeout != 0 {
|
||||
if sc.hs.ReadTimeout > 0 {
|
||||
sc.conn.SetReadDeadline(time.Time{})
|
||||
}
|
||||
|
||||
// This is the first request on the connection,
|
||||
// so start the handler directly rather than going
|
||||
// through scheduleHandler.
|
||||
sc.curHandlers++
|
||||
go sc.runHandler(rw, req, sc.handler.ServeHTTP)
|
||||
}
|
||||
|
||||
@@ -2116,7 +2118,7 @@ func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream
|
||||
st.flow.conn = &sc.flow // link to conn-level counter
|
||||
st.flow.add(sc.initialStreamSendWindowSize)
|
||||
st.inflow.init(sc.srv.initialStreamRecvWindowSize())
|
||||
if sc.hs.WriteTimeout != 0 {
|
||||
if sc.hs.WriteTimeout > 0 {
|
||||
st.writeDeadline = time.AfterFunc(sc.hs.WriteTimeout, st.onWriteTimeout)
|
||||
}
|
||||
|
||||
@@ -2286,8 +2288,62 @@ func (sc *serverConn) newResponseWriter(st *stream, req *http.Request) *response
|
||||
return &responseWriter{rws: rws}
|
||||
}
|
||||
|
||||
type unstartedHandler struct {
|
||||
streamID uint32
|
||||
rw *responseWriter
|
||||
req *http.Request
|
||||
handler func(http.ResponseWriter, *http.Request)
|
||||
}
|
||||
|
||||
// scheduleHandler starts a handler goroutine,
|
||||
// or schedules one to start as soon as an existing handler finishes.
|
||||
func (sc *serverConn) scheduleHandler(streamID uint32, rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) error {
|
||||
sc.serveG.check()
|
||||
maxHandlers := sc.advMaxStreams
|
||||
if sc.curHandlers < maxHandlers {
|
||||
sc.curHandlers++
|
||||
go sc.runHandler(rw, req, handler)
|
||||
return nil
|
||||
}
|
||||
if len(sc.unstartedHandlers) > int(4*sc.advMaxStreams) {
|
||||
return sc.countError("too_many_early_resets", ConnectionError(ErrCodeEnhanceYourCalm))
|
||||
}
|
||||
sc.unstartedHandlers = append(sc.unstartedHandlers, unstartedHandler{
|
||||
streamID: streamID,
|
||||
rw: rw,
|
||||
req: req,
|
||||
handler: handler,
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sc *serverConn) handlerDone() {
|
||||
sc.serveG.check()
|
||||
sc.curHandlers--
|
||||
i := 0
|
||||
maxHandlers := sc.advMaxStreams
|
||||
for ; i < len(sc.unstartedHandlers); i++ {
|
||||
u := sc.unstartedHandlers[i]
|
||||
if sc.streams[u.streamID] == nil {
|
||||
// This stream was reset before its goroutine had a chance to start.
|
||||
continue
|
||||
}
|
||||
if sc.curHandlers >= maxHandlers {
|
||||
break
|
||||
}
|
||||
sc.curHandlers++
|
||||
go sc.runHandler(u.rw, u.req, u.handler)
|
||||
sc.unstartedHandlers[i] = unstartedHandler{} // don't retain references
|
||||
}
|
||||
sc.unstartedHandlers = sc.unstartedHandlers[i:]
|
||||
if len(sc.unstartedHandlers) == 0 {
|
||||
sc.unstartedHandlers = nil
|
||||
}
|
||||
}
|
||||
|
||||
// Run on its own goroutine.
|
||||
func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) {
|
||||
defer sc.sendServeMsg(handlerDoneMsg)
|
||||
didPanic := true
|
||||
defer func() {
|
||||
rw.rws.stream.cancelCtx()
|
||||
@@ -2495,7 +2551,6 @@ type responseWriterState struct {
|
||||
wroteHeader bool // WriteHeader called (explicitly or implicitly). Not necessarily sent to user yet.
|
||||
sentHeader bool // have we sent the header frame?
|
||||
handlerDone bool // handler has finished
|
||||
dirty bool // a Write failed; don't reuse this responseWriterState
|
||||
|
||||
sentContentLen int64 // non-zero if handler set a Content-Length header
|
||||
wroteBytes int64
|
||||
@@ -2615,7 +2670,6 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) {
|
||||
date: date,
|
||||
})
|
||||
if err != nil {
|
||||
rws.dirty = true
|
||||
return 0, err
|
||||
}
|
||||
if endStream {
|
||||
@@ -2636,7 +2690,6 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) {
|
||||
if len(p) > 0 || endStream {
|
||||
// only send a 0 byte DATA frame if we're ending the stream.
|
||||
if err := rws.conn.writeDataFromHandler(rws.stream, p, endStream); err != nil {
|
||||
rws.dirty = true
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
@@ -2648,9 +2701,6 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) {
|
||||
trailers: rws.trailers,
|
||||
endStream: true,
|
||||
})
|
||||
if err != nil {
|
||||
rws.dirty = true
|
||||
}
|
||||
return len(p), err
|
||||
}
|
||||
return len(p), nil
|
||||
@@ -2866,14 +2916,12 @@ func (rws *responseWriterState) writeHeader(code int) {
|
||||
h.Del("Transfer-Encoding")
|
||||
}
|
||||
|
||||
if rws.conn.writeHeaders(rws.stream, &writeResHeaders{
|
||||
rws.conn.writeHeaders(rws.stream, &writeResHeaders{
|
||||
streamID: rws.stream.id,
|
||||
httpResCode: code,
|
||||
h: h,
|
||||
endStream: rws.handlerDone && !rws.hasTrailers(),
|
||||
}) != nil {
|
||||
rws.dirty = true
|
||||
}
|
||||
})
|
||||
|
||||
return
|
||||
}
|
||||
@@ -2938,19 +2986,10 @@ func (w *responseWriter) write(lenData int, dataB []byte, dataS string) (n int,
|
||||
|
||||
func (w *responseWriter) handlerDone() {
|
||||
rws := w.rws
|
||||
dirty := rws.dirty
|
||||
rws.handlerDone = true
|
||||
w.Flush()
|
||||
w.rws = nil
|
||||
if !dirty {
|
||||
// Only recycle the pool if all prior Write calls to
|
||||
// the serverConn goroutine completed successfully. If
|
||||
// they returned earlier due to resets from the peer
|
||||
// there might still be write goroutines outstanding
|
||||
// from the serverConn referencing the rws memory. See
|
||||
// issue 20704.
|
||||
responseWriterStatePool.Put(rws)
|
||||
}
|
||||
responseWriterStatePool.Put(rws)
|
||||
}
|
||||
|
||||
// Push errors.
|
||||
@@ -3133,6 +3172,7 @@ func (sc *serverConn) startPush(msg *startPushRequest) {
|
||||
panic(fmt.Sprintf("newWriterAndRequestNoBody(%+v): %v", msg.url, err))
|
||||
}
|
||||
|
||||
sc.curHandlers++
|
||||
go sc.runHandler(rw, req, sc.handler.ServeHTTP)
|
||||
return promisedID, nil
|
||||
}
|
||||
|
||||
331
vendor/golang.org/x/net/http2/testsync.go
generated
vendored
Normal file
331
vendor/golang.org/x/net/http2/testsync.go
generated
vendored
Normal file
@@ -0,0 +1,331 @@
|
||||
// Copyright 2024 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
package http2
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// testSyncHooks coordinates goroutines in tests.
|
||||
//
|
||||
// For example, a call to ClientConn.RoundTrip involves several goroutines, including:
|
||||
// - the goroutine running RoundTrip;
|
||||
// - the clientStream.doRequest goroutine, which writes the request; and
|
||||
// - the clientStream.readLoop goroutine, which reads the response.
|
||||
//
|
||||
// Using testSyncHooks, a test can start a RoundTrip and identify when all these goroutines
|
||||
// are blocked waiting for some condition such as reading the Request.Body or waiting for
|
||||
// flow control to become available.
|
||||
//
|
||||
// The testSyncHooks also manage timers and synthetic time in tests.
|
||||
// This permits us to, for example, start a request and cause it to time out waiting for
|
||||
// response headers without resorting to time.Sleep calls.
|
||||
type testSyncHooks struct {
|
||||
// active/inactive act as a mutex and condition variable.
|
||||
//
|
||||
// - neither chan contains a value: testSyncHooks is locked.
|
||||
// - active contains a value: unlocked, and at least one goroutine is not blocked
|
||||
// - inactive contains a value: unlocked, and all goroutines are blocked
|
||||
active chan struct{}
|
||||
inactive chan struct{}
|
||||
|
||||
// goroutine counts
|
||||
total int // total goroutines
|
||||
condwait map[*sync.Cond]int // blocked in sync.Cond.Wait
|
||||
blocked []*testBlockedGoroutine // otherwise blocked
|
||||
|
||||
// fake time
|
||||
now time.Time
|
||||
timers []*fakeTimer
|
||||
|
||||
// Transport testing: Report various events.
|
||||
newclientconn func(*ClientConn)
|
||||
newstream func(*clientStream)
|
||||
}
|
||||
|
||||
// testBlockedGoroutine is a blocked goroutine.
|
||||
type testBlockedGoroutine struct {
|
||||
f func() bool // blocked until f returns true
|
||||
ch chan struct{} // closed when unblocked
|
||||
}
|
||||
|
||||
func newTestSyncHooks() *testSyncHooks {
|
||||
h := &testSyncHooks{
|
||||
active: make(chan struct{}, 1),
|
||||
inactive: make(chan struct{}, 1),
|
||||
condwait: map[*sync.Cond]int{},
|
||||
}
|
||||
h.inactive <- struct{}{}
|
||||
h.now = time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC)
|
||||
return h
|
||||
}
|
||||
|
||||
// lock acquires the testSyncHooks mutex.
|
||||
func (h *testSyncHooks) lock() {
|
||||
select {
|
||||
case <-h.active:
|
||||
case <-h.inactive:
|
||||
}
|
||||
}
|
||||
|
||||
// waitInactive waits for all goroutines to become inactive.
|
||||
func (h *testSyncHooks) waitInactive() {
|
||||
for {
|
||||
<-h.inactive
|
||||
if !h.unlock() {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// unlock releases the testSyncHooks mutex.
|
||||
// It reports whether any goroutines are active.
|
||||
func (h *testSyncHooks) unlock() (active bool) {
|
||||
// Look for a blocked goroutine which can be unblocked.
|
||||
blocked := h.blocked[:0]
|
||||
unblocked := false
|
||||
for _, b := range h.blocked {
|
||||
if !unblocked && b.f() {
|
||||
unblocked = true
|
||||
close(b.ch)
|
||||
} else {
|
||||
blocked = append(blocked, b)
|
||||
}
|
||||
}
|
||||
h.blocked = blocked
|
||||
|
||||
// Count goroutines blocked on condition variables.
|
||||
condwait := 0
|
||||
for _, count := range h.condwait {
|
||||
condwait += count
|
||||
}
|
||||
|
||||
if h.total > condwait+len(blocked) {
|
||||
h.active <- struct{}{}
|
||||
return true
|
||||
} else {
|
||||
h.inactive <- struct{}{}
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// goRun starts a new goroutine.
|
||||
func (h *testSyncHooks) goRun(f func()) {
|
||||
h.lock()
|
||||
h.total++
|
||||
h.unlock()
|
||||
go func() {
|
||||
defer func() {
|
||||
h.lock()
|
||||
h.total--
|
||||
h.unlock()
|
||||
}()
|
||||
f()
|
||||
}()
|
||||
}
|
||||
|
||||
// blockUntil indicates that a goroutine is blocked waiting for some condition to become true.
|
||||
// It waits until f returns true before proceeding.
|
||||
//
|
||||
// Example usage:
|
||||
//
|
||||
// h.blockUntil(func() bool {
|
||||
// // Is the context done yet?
|
||||
// select {
|
||||
// case <-ctx.Done():
|
||||
// default:
|
||||
// return false
|
||||
// }
|
||||
// return true
|
||||
// })
|
||||
// // Wait for the context to become done.
|
||||
// <-ctx.Done()
|
||||
//
|
||||
// The function f passed to blockUntil must be non-blocking and idempotent.
|
||||
func (h *testSyncHooks) blockUntil(f func() bool) {
|
||||
if f() {
|
||||
return
|
||||
}
|
||||
ch := make(chan struct{})
|
||||
h.lock()
|
||||
h.blocked = append(h.blocked, &testBlockedGoroutine{
|
||||
f: f,
|
||||
ch: ch,
|
||||
})
|
||||
h.unlock()
|
||||
<-ch
|
||||
}
|
||||
|
||||
// broadcast is sync.Cond.Broadcast.
|
||||
func (h *testSyncHooks) condBroadcast(cond *sync.Cond) {
|
||||
h.lock()
|
||||
delete(h.condwait, cond)
|
||||
h.unlock()
|
||||
cond.Broadcast()
|
||||
}
|
||||
|
||||
// broadcast is sync.Cond.Wait.
|
||||
func (h *testSyncHooks) condWait(cond *sync.Cond) {
|
||||
h.lock()
|
||||
h.condwait[cond]++
|
||||
h.unlock()
|
||||
}
|
||||
|
||||
// newTimer creates a new fake timer.
|
||||
func (h *testSyncHooks) newTimer(d time.Duration) timer {
|
||||
h.lock()
|
||||
defer h.unlock()
|
||||
t := &fakeTimer{
|
||||
hooks: h,
|
||||
when: h.now.Add(d),
|
||||
c: make(chan time.Time),
|
||||
}
|
||||
h.timers = append(h.timers, t)
|
||||
return t
|
||||
}
|
||||
|
||||
// afterFunc creates a new fake AfterFunc timer.
|
||||
func (h *testSyncHooks) afterFunc(d time.Duration, f func()) timer {
|
||||
h.lock()
|
||||
defer h.unlock()
|
||||
t := &fakeTimer{
|
||||
hooks: h,
|
||||
when: h.now.Add(d),
|
||||
f: f,
|
||||
}
|
||||
h.timers = append(h.timers, t)
|
||||
return t
|
||||
}
|
||||
|
||||
func (h *testSyncHooks) contextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc) {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
t := h.afterFunc(d, cancel)
|
||||
return ctx, func() {
|
||||
t.Stop()
|
||||
cancel()
|
||||
}
|
||||
}
|
||||
|
||||
func (h *testSyncHooks) timeUntilEvent() time.Duration {
|
||||
h.lock()
|
||||
defer h.unlock()
|
||||
var next time.Time
|
||||
for _, t := range h.timers {
|
||||
if next.IsZero() || t.when.Before(next) {
|
||||
next = t.when
|
||||
}
|
||||
}
|
||||
if d := next.Sub(h.now); d > 0 {
|
||||
return d
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// advance advances time and causes synthetic timers to fire.
|
||||
func (h *testSyncHooks) advance(d time.Duration) {
|
||||
h.lock()
|
||||
defer h.unlock()
|
||||
h.now = h.now.Add(d)
|
||||
timers := h.timers[:0]
|
||||
for _, t := range h.timers {
|
||||
t := t // remove after go.mod depends on go1.22
|
||||
t.mu.Lock()
|
||||
switch {
|
||||
case t.when.After(h.now):
|
||||
timers = append(timers, t)
|
||||
case t.when.IsZero():
|
||||
// stopped timer
|
||||
default:
|
||||
t.when = time.Time{}
|
||||
if t.c != nil {
|
||||
close(t.c)
|
||||
}
|
||||
if t.f != nil {
|
||||
h.total++
|
||||
go func() {
|
||||
defer func() {
|
||||
h.lock()
|
||||
h.total--
|
||||
h.unlock()
|
||||
}()
|
||||
t.f()
|
||||
}()
|
||||
}
|
||||
}
|
||||
t.mu.Unlock()
|
||||
}
|
||||
h.timers = timers
|
||||
}
|
||||
|
||||
// A timer wraps a time.Timer, or a synthetic equivalent in tests.
|
||||
// Unlike time.Timer, timer is single-use: The timer channel is closed when the timer expires.
|
||||
type timer interface {
|
||||
C() <-chan time.Time
|
||||
Stop() bool
|
||||
Reset(d time.Duration) bool
|
||||
}
|
||||
|
||||
// timeTimer implements timer using real time.
|
||||
type timeTimer struct {
|
||||
t *time.Timer
|
||||
c chan time.Time
|
||||
}
|
||||
|
||||
// newTimeTimer creates a new timer using real time.
|
||||
func newTimeTimer(d time.Duration) timer {
|
||||
ch := make(chan time.Time)
|
||||
t := time.AfterFunc(d, func() {
|
||||
close(ch)
|
||||
})
|
||||
return &timeTimer{t, ch}
|
||||
}
|
||||
|
||||
// newTimeAfterFunc creates an AfterFunc timer using real time.
|
||||
func newTimeAfterFunc(d time.Duration, f func()) timer {
|
||||
return &timeTimer{
|
||||
t: time.AfterFunc(d, f),
|
||||
}
|
||||
}
|
||||
|
||||
func (t timeTimer) C() <-chan time.Time { return t.c }
|
||||
func (t timeTimer) Stop() bool { return t.t.Stop() }
|
||||
func (t timeTimer) Reset(d time.Duration) bool { return t.t.Reset(d) }
|
||||
|
||||
// fakeTimer implements timer using fake time.
|
||||
type fakeTimer struct {
|
||||
hooks *testSyncHooks
|
||||
|
||||
mu sync.Mutex
|
||||
when time.Time // when the timer will fire
|
||||
c chan time.Time // closed when the timer fires; mutually exclusive with f
|
||||
f func() // called when the timer fires; mutually exclusive with c
|
||||
}
|
||||
|
||||
func (t *fakeTimer) C() <-chan time.Time { return t.c }
|
||||
|
||||
func (t *fakeTimer) Stop() bool {
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
stopped := t.when.IsZero()
|
||||
t.when = time.Time{}
|
||||
return stopped
|
||||
}
|
||||
|
||||
func (t *fakeTimer) Reset(d time.Duration) bool {
|
||||
if t.c != nil || t.f == nil {
|
||||
panic("fakeTimer only supports Reset on AfterFunc timers")
|
||||
}
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
t.hooks.lock()
|
||||
defer t.hooks.unlock()
|
||||
active := !t.when.IsZero()
|
||||
t.when = t.hooks.now.Add(d)
|
||||
if !active {
|
||||
t.hooks.timers = append(t.hooks.timers, t)
|
||||
}
|
||||
return active
|
||||
}
|
||||
358
vendor/golang.org/x/net/http2/transport.go
generated
vendored
358
vendor/golang.org/x/net/http2/transport.go
generated
vendored
@@ -147,6 +147,12 @@ type Transport struct {
|
||||
// waiting for their turn.
|
||||
StrictMaxConcurrentStreams bool
|
||||
|
||||
// IdleConnTimeout is the maximum amount of time an idle
|
||||
// (keep-alive) connection will remain idle before closing
|
||||
// itself.
|
||||
// Zero means no limit.
|
||||
IdleConnTimeout time.Duration
|
||||
|
||||
// ReadIdleTimeout is the timeout after which a health check using ping
|
||||
// frame will be carried out if no frame is received on the connection.
|
||||
// Note that a ping response will is considered a received frame, so if
|
||||
@@ -178,6 +184,8 @@ type Transport struct {
|
||||
|
||||
connPoolOnce sync.Once
|
||||
connPoolOrDef ClientConnPool // non-nil version of ConnPool
|
||||
|
||||
syncHooks *testSyncHooks
|
||||
}
|
||||
|
||||
func (t *Transport) maxHeaderListSize() uint32 {
|
||||
@@ -291,8 +299,7 @@ func (t *Transport) initConnPool() {
|
||||
// HTTP/2 server.
|
||||
type ClientConn struct {
|
||||
t *Transport
|
||||
tconn net.Conn // usually *tls.Conn, except specialized impls
|
||||
tconnClosed bool
|
||||
tconn net.Conn // usually *tls.Conn, except specialized impls
|
||||
tlsState *tls.ConnectionState // nil only for specialized impls
|
||||
reused uint32 // whether conn is being reused; atomic
|
||||
singleUse bool // whether being used for a single http.Request
|
||||
@@ -303,7 +310,7 @@ type ClientConn struct {
|
||||
readerErr error // set before readerDone is closed
|
||||
|
||||
idleTimeout time.Duration // or 0 for never
|
||||
idleTimer *time.Timer
|
||||
idleTimer timer
|
||||
|
||||
mu sync.Mutex // guards following
|
||||
cond *sync.Cond // hold mu; broadcast on flow/closed changes
|
||||
@@ -345,6 +352,60 @@ type ClientConn struct {
|
||||
werr error // first write error that has occurred
|
||||
hbuf bytes.Buffer // HPACK encoder writes into this
|
||||
henc *hpack.Encoder
|
||||
|
||||
syncHooks *testSyncHooks // can be nil
|
||||
}
|
||||
|
||||
// Hook points used for testing.
|
||||
// Outside of tests, cc.syncHooks is nil and these all have minimal implementations.
|
||||
// Inside tests, see the testSyncHooks function docs.
|
||||
|
||||
// goRun starts a new goroutine.
|
||||
func (cc *ClientConn) goRun(f func()) {
|
||||
if cc.syncHooks != nil {
|
||||
cc.syncHooks.goRun(f)
|
||||
return
|
||||
}
|
||||
go f()
|
||||
}
|
||||
|
||||
// condBroadcast is cc.cond.Broadcast.
|
||||
func (cc *ClientConn) condBroadcast() {
|
||||
if cc.syncHooks != nil {
|
||||
cc.syncHooks.condBroadcast(cc.cond)
|
||||
}
|
||||
cc.cond.Broadcast()
|
||||
}
|
||||
|
||||
// condWait is cc.cond.Wait.
|
||||
func (cc *ClientConn) condWait() {
|
||||
if cc.syncHooks != nil {
|
||||
cc.syncHooks.condWait(cc.cond)
|
||||
}
|
||||
cc.cond.Wait()
|
||||
}
|
||||
|
||||
// newTimer creates a new time.Timer, or a synthetic timer in tests.
|
||||
func (cc *ClientConn) newTimer(d time.Duration) timer {
|
||||
if cc.syncHooks != nil {
|
||||
return cc.syncHooks.newTimer(d)
|
||||
}
|
||||
return newTimeTimer(d)
|
||||
}
|
||||
|
||||
// afterFunc creates a new time.AfterFunc timer, or a synthetic timer in tests.
|
||||
func (cc *ClientConn) afterFunc(d time.Duration, f func()) timer {
|
||||
if cc.syncHooks != nil {
|
||||
return cc.syncHooks.afterFunc(d, f)
|
||||
}
|
||||
return newTimeAfterFunc(d, f)
|
||||
}
|
||||
|
||||
func (cc *ClientConn) contextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc) {
|
||||
if cc.syncHooks != nil {
|
||||
return cc.syncHooks.contextWithTimeout(ctx, d)
|
||||
}
|
||||
return context.WithTimeout(ctx, d)
|
||||
}
|
||||
|
||||
// clientStream is the state for a single HTTP/2 stream. One of these
|
||||
@@ -426,7 +487,7 @@ func (cs *clientStream) abortStreamLocked(err error) {
|
||||
// TODO(dneil): Clean up tests where cs.cc.cond is nil.
|
||||
if cs.cc.cond != nil {
|
||||
// Wake up writeRequestBody if it is waiting on flow control.
|
||||
cs.cc.cond.Broadcast()
|
||||
cs.cc.condBroadcast()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -436,7 +497,7 @@ func (cs *clientStream) abortRequestBodyWrite() {
|
||||
defer cc.mu.Unlock()
|
||||
if cs.reqBody != nil && cs.reqBodyClosed == nil {
|
||||
cs.closeReqBodyLocked()
|
||||
cc.cond.Broadcast()
|
||||
cc.condBroadcast()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -446,10 +507,10 @@ func (cs *clientStream) closeReqBodyLocked() {
|
||||
}
|
||||
cs.reqBodyClosed = make(chan struct{})
|
||||
reqBodyClosed := cs.reqBodyClosed
|
||||
go func() {
|
||||
cs.cc.goRun(func() {
|
||||
cs.reqBody.Close()
|
||||
close(reqBodyClosed)
|
||||
}()
|
||||
})
|
||||
}
|
||||
|
||||
type stickyErrWriter struct {
|
||||
@@ -538,15 +599,6 @@ func authorityAddr(scheme string, authority string) (addr string) {
|
||||
return net.JoinHostPort(host, port)
|
||||
}
|
||||
|
||||
var retryBackoffHook func(time.Duration) *time.Timer
|
||||
|
||||
func backoffNewTimer(d time.Duration) *time.Timer {
|
||||
if retryBackoffHook != nil {
|
||||
return retryBackoffHook(d)
|
||||
}
|
||||
return time.NewTimer(d)
|
||||
}
|
||||
|
||||
// RoundTripOpt is like RoundTrip, but takes options.
|
||||
func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Response, error) {
|
||||
if !(req.URL.Scheme == "https" || (req.URL.Scheme == "http" && t.AllowHTTP)) {
|
||||
@@ -574,13 +626,27 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res
|
||||
backoff := float64(uint(1) << (uint(retry) - 1))
|
||||
backoff += backoff * (0.1 * mathrand.Float64())
|
||||
d := time.Second * time.Duration(backoff)
|
||||
timer := backoffNewTimer(d)
|
||||
var tm timer
|
||||
if t.syncHooks != nil {
|
||||
tm = t.syncHooks.newTimer(d)
|
||||
t.syncHooks.blockUntil(func() bool {
|
||||
select {
|
||||
case <-tm.C():
|
||||
case <-req.Context().Done():
|
||||
default:
|
||||
return false
|
||||
}
|
||||
return true
|
||||
})
|
||||
} else {
|
||||
tm = newTimeTimer(d)
|
||||
}
|
||||
select {
|
||||
case <-timer.C:
|
||||
case <-tm.C():
|
||||
t.vlogf("RoundTrip retrying after failure: %v", roundTripErr)
|
||||
continue
|
||||
case <-req.Context().Done():
|
||||
timer.Stop()
|
||||
tm.Stop()
|
||||
err = req.Context().Err()
|
||||
}
|
||||
}
|
||||
@@ -659,6 +725,9 @@ func canRetryError(err error) bool {
|
||||
}
|
||||
|
||||
func (t *Transport) dialClientConn(ctx context.Context, addr string, singleUse bool) (*ClientConn, error) {
|
||||
if t.syncHooks != nil {
|
||||
return t.newClientConn(nil, singleUse, t.syncHooks)
|
||||
}
|
||||
host, _, err := net.SplitHostPort(addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -667,7 +736,7 @@ func (t *Transport) dialClientConn(ctx context.Context, addr string, singleUse b
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return t.newClientConn(tconn, singleUse)
|
||||
return t.newClientConn(tconn, singleUse, nil)
|
||||
}
|
||||
|
||||
func (t *Transport) newTLSConfig(host string) *tls.Config {
|
||||
@@ -733,10 +802,10 @@ func (t *Transport) maxEncoderHeaderTableSize() uint32 {
|
||||
}
|
||||
|
||||
func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) {
|
||||
return t.newClientConn(c, t.disableKeepAlives())
|
||||
return t.newClientConn(c, t.disableKeepAlives(), nil)
|
||||
}
|
||||
|
||||
func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, error) {
|
||||
func (t *Transport) newClientConn(c net.Conn, singleUse bool, hooks *testSyncHooks) (*ClientConn, error) {
|
||||
cc := &ClientConn{
|
||||
t: t,
|
||||
tconn: c,
|
||||
@@ -751,10 +820,15 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro
|
||||
wantSettingsAck: true,
|
||||
pings: make(map[[8]byte]chan struct{}),
|
||||
reqHeaderMu: make(chan struct{}, 1),
|
||||
syncHooks: hooks,
|
||||
}
|
||||
if hooks != nil {
|
||||
hooks.newclientconn(cc)
|
||||
c = cc.tconn
|
||||
}
|
||||
if d := t.idleConnTimeout(); d != 0 {
|
||||
cc.idleTimeout = d
|
||||
cc.idleTimer = time.AfterFunc(d, cc.onIdleTimeout)
|
||||
cc.idleTimer = cc.afterFunc(d, cc.onIdleTimeout)
|
||||
}
|
||||
if VerboseLogs {
|
||||
t.vlogf("http2: Transport creating client conn %p to %v", cc, c.RemoteAddr())
|
||||
@@ -819,7 +893,7 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro
|
||||
return nil, cc.werr
|
||||
}
|
||||
|
||||
go cc.readLoop()
|
||||
cc.goRun(cc.readLoop)
|
||||
return cc, nil
|
||||
}
|
||||
|
||||
@@ -827,7 +901,7 @@ func (cc *ClientConn) healthCheck() {
|
||||
pingTimeout := cc.t.pingTimeout()
|
||||
// We don't need to periodically ping in the health check, because the readLoop of ClientConn will
|
||||
// trigger the healthCheck again if there is no frame received.
|
||||
ctx, cancel := context.WithTimeout(context.Background(), pingTimeout)
|
||||
ctx, cancel := cc.contextWithTimeout(context.Background(), pingTimeout)
|
||||
defer cancel()
|
||||
cc.vlogf("http2: Transport sending health check")
|
||||
err := cc.Ping(ctx)
|
||||
@@ -862,7 +936,20 @@ func (cc *ClientConn) setGoAway(f *GoAwayFrame) {
|
||||
}
|
||||
last := f.LastStreamID
|
||||
for streamID, cs := range cc.streams {
|
||||
if streamID > last {
|
||||
if streamID <= last {
|
||||
// The server's GOAWAY indicates that it received this stream.
|
||||
// It will either finish processing it, or close the connection
|
||||
// without doing so. Either way, leave the stream alone for now.
|
||||
continue
|
||||
}
|
||||
if streamID == 1 && cc.goAway.ErrCode != ErrCodeNo {
|
||||
// Don't retry the first stream on a connection if we get a non-NO error.
|
||||
// If the server is sending an error on a new connection,
|
||||
// retrying the request on a new one probably isn't going to work.
|
||||
cs.abortStreamLocked(fmt.Errorf("http2: Transport received GOAWAY from server ErrCode:%v", cc.goAway.ErrCode))
|
||||
} else {
|
||||
// Aborting the stream with errClentConnGotGoAway indicates that
|
||||
// the request should be retried on a new connection.
|
||||
cs.abortStreamLocked(errClientConnGotGoAway)
|
||||
}
|
||||
}
|
||||
@@ -1019,7 +1106,7 @@ func (cc *ClientConn) forceCloseConn() {
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
if nc := tlsUnderlyingConn(tc); nc != nil {
|
||||
if nc := tc.NetConn(); nc != nil {
|
||||
nc.Close()
|
||||
}
|
||||
}
|
||||
@@ -1057,7 +1144,7 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error {
|
||||
// Wait for all in-flight streams to complete or connection to close
|
||||
done := make(chan struct{})
|
||||
cancelled := false // guarded by cc.mu
|
||||
go func() {
|
||||
cc.goRun(func() {
|
||||
cc.mu.Lock()
|
||||
defer cc.mu.Unlock()
|
||||
for {
|
||||
@@ -1069,9 +1156,9 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error {
|
||||
if cancelled {
|
||||
break
|
||||
}
|
||||
cc.cond.Wait()
|
||||
cc.condWait()
|
||||
}
|
||||
}()
|
||||
})
|
||||
shutdownEnterWaitStateHook()
|
||||
select {
|
||||
case <-done:
|
||||
@@ -1081,7 +1168,7 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error {
|
||||
cc.mu.Lock()
|
||||
// Free the goroutine above
|
||||
cancelled = true
|
||||
cc.cond.Broadcast()
|
||||
cc.condBroadcast()
|
||||
cc.mu.Unlock()
|
||||
return ctx.Err()
|
||||
}
|
||||
@@ -1119,7 +1206,7 @@ func (cc *ClientConn) closeForError(err error) {
|
||||
for _, cs := range cc.streams {
|
||||
cs.abortStreamLocked(err)
|
||||
}
|
||||
cc.cond.Broadcast()
|
||||
cc.condBroadcast()
|
||||
cc.mu.Unlock()
|
||||
cc.closeConn()
|
||||
}
|
||||
@@ -1216,6 +1303,10 @@ func (cc *ClientConn) decrStreamReservationsLocked() {
|
||||
}
|
||||
|
||||
func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||
return cc.roundTrip(req, nil)
|
||||
}
|
||||
|
||||
func (cc *ClientConn) roundTrip(req *http.Request, streamf func(*clientStream)) (*http.Response, error) {
|
||||
ctx := req.Context()
|
||||
cs := &clientStream{
|
||||
cc: cc,
|
||||
@@ -1230,9 +1321,23 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||
respHeaderRecv: make(chan struct{}),
|
||||
donec: make(chan struct{}),
|
||||
}
|
||||
go cs.doRequest(req)
|
||||
cc.goRun(func() {
|
||||
cs.doRequest(req)
|
||||
})
|
||||
|
||||
waitDone := func() error {
|
||||
if cc.syncHooks != nil {
|
||||
cc.syncHooks.blockUntil(func() bool {
|
||||
select {
|
||||
case <-cs.donec:
|
||||
case <-ctx.Done():
|
||||
case <-cs.reqCancel:
|
||||
default:
|
||||
return false
|
||||
}
|
||||
return true
|
||||
})
|
||||
}
|
||||
select {
|
||||
case <-cs.donec:
|
||||
return nil
|
||||
@@ -1293,7 +1398,24 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||
return err
|
||||
}
|
||||
|
||||
if streamf != nil {
|
||||
streamf(cs)
|
||||
}
|
||||
|
||||
for {
|
||||
if cc.syncHooks != nil {
|
||||
cc.syncHooks.blockUntil(func() bool {
|
||||
select {
|
||||
case <-cs.respHeaderRecv:
|
||||
case <-cs.abort:
|
||||
case <-ctx.Done():
|
||||
case <-cs.reqCancel:
|
||||
default:
|
||||
return false
|
||||
}
|
||||
return true
|
||||
})
|
||||
}
|
||||
select {
|
||||
case <-cs.respHeaderRecv:
|
||||
return handleResponseHeaders()
|
||||
@@ -1349,6 +1471,21 @@ func (cs *clientStream) writeRequest(req *http.Request) (err error) {
|
||||
if cc.reqHeaderMu == nil {
|
||||
panic("RoundTrip on uninitialized ClientConn") // for tests
|
||||
}
|
||||
var newStreamHook func(*clientStream)
|
||||
if cc.syncHooks != nil {
|
||||
newStreamHook = cc.syncHooks.newstream
|
||||
cc.syncHooks.blockUntil(func() bool {
|
||||
select {
|
||||
case cc.reqHeaderMu <- struct{}{}:
|
||||
<-cc.reqHeaderMu
|
||||
case <-cs.reqCancel:
|
||||
case <-ctx.Done():
|
||||
default:
|
||||
return false
|
||||
}
|
||||
return true
|
||||
})
|
||||
}
|
||||
select {
|
||||
case cc.reqHeaderMu <- struct{}{}:
|
||||
case <-cs.reqCancel:
|
||||
@@ -1373,6 +1510,10 @@ func (cs *clientStream) writeRequest(req *http.Request) (err error) {
|
||||
}
|
||||
cc.mu.Unlock()
|
||||
|
||||
if newStreamHook != nil {
|
||||
newStreamHook(cs)
|
||||
}
|
||||
|
||||
// TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere?
|
||||
if !cc.t.disableCompression() &&
|
||||
req.Header.Get("Accept-Encoding") == "" &&
|
||||
@@ -1453,15 +1594,30 @@ func (cs *clientStream) writeRequest(req *http.Request) (err error) {
|
||||
var respHeaderTimer <-chan time.Time
|
||||
var respHeaderRecv chan struct{}
|
||||
if d := cc.responseHeaderTimeout(); d != 0 {
|
||||
timer := time.NewTimer(d)
|
||||
timer := cc.newTimer(d)
|
||||
defer timer.Stop()
|
||||
respHeaderTimer = timer.C
|
||||
respHeaderTimer = timer.C()
|
||||
respHeaderRecv = cs.respHeaderRecv
|
||||
}
|
||||
// Wait until the peer half-closes its end of the stream,
|
||||
// or until the request is aborted (via context, error, or otherwise),
|
||||
// whichever comes first.
|
||||
for {
|
||||
if cc.syncHooks != nil {
|
||||
cc.syncHooks.blockUntil(func() bool {
|
||||
select {
|
||||
case <-cs.peerClosed:
|
||||
case <-respHeaderTimer:
|
||||
case <-respHeaderRecv:
|
||||
case <-cs.abort:
|
||||
case <-ctx.Done():
|
||||
case <-cs.reqCancel:
|
||||
default:
|
||||
return false
|
||||
}
|
||||
return true
|
||||
})
|
||||
}
|
||||
select {
|
||||
case <-cs.peerClosed:
|
||||
return nil
|
||||
@@ -1610,7 +1766,7 @@ func (cc *ClientConn) awaitOpenSlotForStreamLocked(cs *clientStream) error {
|
||||
return nil
|
||||
}
|
||||
cc.pendingRequests++
|
||||
cc.cond.Wait()
|
||||
cc.condWait()
|
||||
cc.pendingRequests--
|
||||
select {
|
||||
case <-cs.abort:
|
||||
@@ -1872,10 +2028,26 @@ func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error)
|
||||
cs.flow.take(take)
|
||||
return take, nil
|
||||
}
|
||||
cc.cond.Wait()
|
||||
cc.condWait()
|
||||
}
|
||||
}
|
||||
|
||||
func validateHeaders(hdrs http.Header) string {
|
||||
for k, vv := range hdrs {
|
||||
if !httpguts.ValidHeaderFieldName(k) {
|
||||
return fmt.Sprintf("name %q", k)
|
||||
}
|
||||
for _, v := range vv {
|
||||
if !httpguts.ValidHeaderFieldValue(v) {
|
||||
// Don't include the value in the error,
|
||||
// because it may be sensitive.
|
||||
return fmt.Sprintf("value for header %q", k)
|
||||
}
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
var errNilRequestURL = errors.New("http2: Request.URI is nil")
|
||||
|
||||
// requires cc.wmu be held.
|
||||
@@ -1913,19 +2085,14 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail
|
||||
}
|
||||
}
|
||||
|
||||
// Check for any invalid headers and return an error before we
|
||||
// Check for any invalid headers+trailers and return an error before we
|
||||
// potentially pollute our hpack state. (We want to be able to
|
||||
// continue to reuse the hpack encoder for future requests)
|
||||
for k, vv := range req.Header {
|
||||
if !httpguts.ValidHeaderFieldName(k) {
|
||||
return nil, fmt.Errorf("invalid HTTP header name %q", k)
|
||||
}
|
||||
for _, v := range vv {
|
||||
if !httpguts.ValidHeaderFieldValue(v) {
|
||||
// Don't include the value in the error, because it may be sensitive.
|
||||
return nil, fmt.Errorf("invalid HTTP header value for header %q", k)
|
||||
}
|
||||
}
|
||||
if err := validateHeaders(req.Header); err != "" {
|
||||
return nil, fmt.Errorf("invalid HTTP header %s", err)
|
||||
}
|
||||
if err := validateHeaders(req.Trailer); err != "" {
|
||||
return nil, fmt.Errorf("invalid HTTP trailer %s", err)
|
||||
}
|
||||
|
||||
enumerateHeaders := func(f func(name, value string)) {
|
||||
@@ -2144,7 +2311,7 @@ func (cc *ClientConn) forgetStreamID(id uint32) {
|
||||
}
|
||||
// Wake up writeRequestBody via clientStream.awaitFlowControl and
|
||||
// wake up RoundTrip if there is a pending request.
|
||||
cc.cond.Broadcast()
|
||||
cc.condBroadcast()
|
||||
|
||||
closeOnIdle := cc.singleUse || cc.doNotReuse || cc.t.disableKeepAlives() || cc.goAway != nil
|
||||
if closeOnIdle && cc.streamsReserved == 0 && len(cc.streams) == 0 {
|
||||
@@ -2232,7 +2399,7 @@ func (rl *clientConnReadLoop) cleanup() {
|
||||
cs.abortStreamLocked(err)
|
||||
}
|
||||
}
|
||||
cc.cond.Broadcast()
|
||||
cc.condBroadcast()
|
||||
cc.mu.Unlock()
|
||||
}
|
||||
|
||||
@@ -2267,10 +2434,9 @@ func (rl *clientConnReadLoop) run() error {
|
||||
cc := rl.cc
|
||||
gotSettings := false
|
||||
readIdleTimeout := cc.t.ReadIdleTimeout
|
||||
var t *time.Timer
|
||||
var t timer
|
||||
if readIdleTimeout != 0 {
|
||||
t = time.AfterFunc(readIdleTimeout, cc.healthCheck)
|
||||
defer t.Stop()
|
||||
t = cc.afterFunc(readIdleTimeout, cc.healthCheck)
|
||||
}
|
||||
for {
|
||||
f, err := cc.fr.ReadFrame()
|
||||
@@ -2685,7 +2851,7 @@ func (rl *clientConnReadLoop) processData(f *DataFrame) error {
|
||||
})
|
||||
return nil
|
||||
}
|
||||
if !cs.firstByte {
|
||||
if !cs.pastHeaders {
|
||||
cc.logf("protocol error: received DATA before a HEADERS frame")
|
||||
rl.endStreamError(cs, StreamError{
|
||||
StreamID: f.StreamID,
|
||||
@@ -2868,7 +3034,7 @@ func (rl *clientConnReadLoop) processSettingsNoWrite(f *SettingsFrame) error {
|
||||
for _, cs := range cc.streams {
|
||||
cs.flow.add(delta)
|
||||
}
|
||||
cc.cond.Broadcast()
|
||||
cc.condBroadcast()
|
||||
|
||||
cc.initialWindowSize = s.Val
|
||||
case SettingHeaderTableSize:
|
||||
@@ -2912,9 +3078,18 @@ func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error {
|
||||
fl = &cs.flow
|
||||
}
|
||||
if !fl.add(int32(f.Increment)) {
|
||||
// For stream, the sender sends RST_STREAM with an error code of FLOW_CONTROL_ERROR
|
||||
if cs != nil {
|
||||
rl.endStreamError(cs, StreamError{
|
||||
StreamID: f.StreamID,
|
||||
Code: ErrCodeFlowControl,
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
return ConnectionError(ErrCodeFlowControl)
|
||||
}
|
||||
cc.cond.Broadcast()
|
||||
cc.condBroadcast()
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -2956,24 +3131,38 @@ func (cc *ClientConn) Ping(ctx context.Context) error {
|
||||
}
|
||||
cc.mu.Unlock()
|
||||
}
|
||||
errc := make(chan error, 1)
|
||||
go func() {
|
||||
var pingError error
|
||||
errc := make(chan struct{})
|
||||
cc.goRun(func() {
|
||||
cc.wmu.Lock()
|
||||
defer cc.wmu.Unlock()
|
||||
if err := cc.fr.WritePing(false, p); err != nil {
|
||||
errc <- err
|
||||
if pingError = cc.fr.WritePing(false, p); pingError != nil {
|
||||
close(errc)
|
||||
return
|
||||
}
|
||||
if err := cc.bw.Flush(); err != nil {
|
||||
errc <- err
|
||||
if pingError = cc.bw.Flush(); pingError != nil {
|
||||
close(errc)
|
||||
return
|
||||
}
|
||||
}()
|
||||
})
|
||||
if cc.syncHooks != nil {
|
||||
cc.syncHooks.blockUntil(func() bool {
|
||||
select {
|
||||
case <-c:
|
||||
case <-errc:
|
||||
case <-ctx.Done():
|
||||
case <-cc.readerDone:
|
||||
default:
|
||||
return false
|
||||
}
|
||||
return true
|
||||
})
|
||||
}
|
||||
select {
|
||||
case <-c:
|
||||
return nil
|
||||
case err := <-errc:
|
||||
return err
|
||||
case <-errc:
|
||||
return pingError
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case <-cc.readerDone:
|
||||
@@ -3142,9 +3331,17 @@ func (rt noDialH2RoundTripper) RoundTrip(req *http.Request) (*http.Response, err
|
||||
}
|
||||
|
||||
func (t *Transport) idleConnTimeout() time.Duration {
|
||||
// to keep things backwards compatible, we use non-zero values of
|
||||
// IdleConnTimeout, followed by using the IdleConnTimeout on the underlying
|
||||
// http1 transport, followed by 0
|
||||
if t.IdleConnTimeout != 0 {
|
||||
return t.IdleConnTimeout
|
||||
}
|
||||
|
||||
if t.t1 != nil {
|
||||
return t.t1.IdleConnTimeout
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
@@ -3202,3 +3399,34 @@ func traceFirstResponseByte(trace *httptrace.ClientTrace) {
|
||||
trace.GotFirstResponseByte()
|
||||
}
|
||||
}
|
||||
|
||||
func traceHasWroteHeaderField(trace *httptrace.ClientTrace) bool {
|
||||
return trace != nil && trace.WroteHeaderField != nil
|
||||
}
|
||||
|
||||
func traceWroteHeaderField(trace *httptrace.ClientTrace, k, v string) {
|
||||
if trace != nil && trace.WroteHeaderField != nil {
|
||||
trace.WroteHeaderField(k, []string{v})
|
||||
}
|
||||
}
|
||||
|
||||
func traceGot1xxResponseFunc(trace *httptrace.ClientTrace) func(int, textproto.MIMEHeader) error {
|
||||
if trace != nil {
|
||||
return trace.Got1xxResponse
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// dialTLSWithContext uses tls.Dialer, added in Go 1.15, to open a TLS
|
||||
// connection.
|
||||
func (t *Transport) dialTLSWithContext(ctx context.Context, network, addr string, cfg *tls.Config) (*tls.Conn, error) {
|
||||
dialer := &tls.Dialer{
|
||||
Config: cfg,
|
||||
}
|
||||
cn, err := dialer.DialContext(ctx, network, addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tlsCn := cn.(*tls.Conn) // DialContext comment promises this will always succeed
|
||||
return tlsCn, nil
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user