Update dependencies and build to go1.22 (#2113)
* Update dependencies and build to go1.22 * Fix api changes wrt to dependencies * Update golangci config
This commit is contained in:
1
vendor/golang.org/x/crypto/acme/version_go112.go
generated
vendored
1
vendor/golang.org/x/crypto/acme/version_go112.go
generated
vendored
@@ -3,7 +3,6 @@
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build go1.12
|
||||
// +build go1.12
|
||||
|
||||
package acme
|
||||
|
||||
|
||||
1
vendor/golang.org/x/crypto/argon2/blamka_amd64.go
generated
vendored
1
vendor/golang.org/x/crypto/argon2/blamka_amd64.go
generated
vendored
@@ -3,7 +3,6 @@
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build amd64 && gc && !purego
|
||||
// +build amd64,gc,!purego
|
||||
|
||||
package argon2
|
||||
|
||||
|
||||
13
vendor/golang.org/x/crypto/argon2/blamka_amd64.s
generated
vendored
13
vendor/golang.org/x/crypto/argon2/blamka_amd64.s
generated
vendored
@@ -3,7 +3,6 @@
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build amd64 && gc && !purego
|
||||
// +build amd64,gc,!purego
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
@@ -200,8 +199,8 @@ TEXT ·mixBlocksSSE2(SB), 4, $0-32
|
||||
MOVQ out+0(FP), DX
|
||||
MOVQ a+8(FP), AX
|
||||
MOVQ b+16(FP), BX
|
||||
MOVQ a+24(FP), CX
|
||||
MOVQ $128, BP
|
||||
MOVQ c+24(FP), CX
|
||||
MOVQ $128, DI
|
||||
|
||||
loop:
|
||||
MOVOU 0(AX), X0
|
||||
@@ -214,7 +213,7 @@ loop:
|
||||
ADDQ $16, BX
|
||||
ADDQ $16, CX
|
||||
ADDQ $16, DX
|
||||
SUBQ $2, BP
|
||||
SUBQ $2, DI
|
||||
JA loop
|
||||
RET
|
||||
|
||||
@@ -223,8 +222,8 @@ TEXT ·xorBlocksSSE2(SB), 4, $0-32
|
||||
MOVQ out+0(FP), DX
|
||||
MOVQ a+8(FP), AX
|
||||
MOVQ b+16(FP), BX
|
||||
MOVQ a+24(FP), CX
|
||||
MOVQ $128, BP
|
||||
MOVQ c+24(FP), CX
|
||||
MOVQ $128, DI
|
||||
|
||||
loop:
|
||||
MOVOU 0(AX), X0
|
||||
@@ -239,6 +238,6 @@ loop:
|
||||
ADDQ $16, BX
|
||||
ADDQ $16, CX
|
||||
ADDQ $16, DX
|
||||
SUBQ $2, BP
|
||||
SUBQ $2, DI
|
||||
JA loop
|
||||
RET
|
||||
|
||||
1
vendor/golang.org/x/crypto/argon2/blamka_ref.go
generated
vendored
1
vendor/golang.org/x/crypto/argon2/blamka_ref.go
generated
vendored
@@ -3,7 +3,6 @@
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !amd64 || purego || !gc
|
||||
// +build !amd64 purego !gc
|
||||
|
||||
package argon2
|
||||
|
||||
|
||||
3
vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go
generated
vendored
3
vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go
generated
vendored
@@ -2,8 +2,7 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build go1.7 && amd64 && gc && !purego
|
||||
// +build go1.7,amd64,gc,!purego
|
||||
//go:build amd64 && gc && !purego
|
||||
|
||||
package blake2b
|
||||
|
||||
|
||||
3
vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s
generated
vendored
3
vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s
generated
vendored
@@ -2,8 +2,7 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build go1.7 && amd64 && gc && !purego
|
||||
// +build go1.7,amd64,gc,!purego
|
||||
//go:build amd64 && gc && !purego
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
|
||||
25
vendor/golang.org/x/crypto/blake2b/blake2b_amd64.go
generated
vendored
25
vendor/golang.org/x/crypto/blake2b/blake2b_amd64.go
generated
vendored
@@ -1,25 +0,0 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !go1.7 && amd64 && gc && !purego
|
||||
// +build !go1.7,amd64,gc,!purego
|
||||
|
||||
package blake2b
|
||||
|
||||
import "golang.org/x/sys/cpu"
|
||||
|
||||
func init() {
|
||||
useSSE4 = cpu.X86.HasSSE41
|
||||
}
|
||||
|
||||
//go:noescape
|
||||
func hashBlocksSSE4(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte)
|
||||
|
||||
func hashBlocks(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) {
|
||||
if useSSE4 {
|
||||
hashBlocksSSE4(h, c, flag, blocks)
|
||||
} else {
|
||||
hashBlocksGeneric(h, c, flag, blocks)
|
||||
}
|
||||
}
|
||||
1
vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s
generated
vendored
1
vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s
generated
vendored
@@ -3,7 +3,6 @@
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build amd64 && gc && !purego
|
||||
// +build amd64,gc,!purego
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
|
||||
1
vendor/golang.org/x/crypto/blake2b/blake2b_ref.go
generated
vendored
1
vendor/golang.org/x/crypto/blake2b/blake2b_ref.go
generated
vendored
@@ -3,7 +3,6 @@
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !amd64 || purego || !gc
|
||||
// +build !amd64 purego !gc
|
||||
|
||||
package blake2b
|
||||
|
||||
|
||||
3
vendor/golang.org/x/crypto/blake2b/register.go
generated
vendored
3
vendor/golang.org/x/crypto/blake2b/register.go
generated
vendored
@@ -2,9 +2,6 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build go1.9
|
||||
// +build go1.9
|
||||
|
||||
package blake2b
|
||||
|
||||
import (
|
||||
|
||||
3
vendor/golang.org/x/crypto/chacha20/chacha_arm64.go
generated
vendored
3
vendor/golang.org/x/crypto/chacha20/chacha_arm64.go
generated
vendored
@@ -2,8 +2,7 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build go1.11 && gc && !purego
|
||||
// +build go1.11,gc,!purego
|
||||
//go:build gc && !purego
|
||||
|
||||
package chacha20
|
||||
|
||||
|
||||
3
vendor/golang.org/x/crypto/chacha20/chacha_arm64.s
generated
vendored
3
vendor/golang.org/x/crypto/chacha20/chacha_arm64.s
generated
vendored
@@ -2,8 +2,7 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build go1.11 && gc && !purego
|
||||
// +build go1.11,gc,!purego
|
||||
//go:build gc && !purego
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
|
||||
3
vendor/golang.org/x/crypto/chacha20/chacha_noasm.go
generated
vendored
3
vendor/golang.org/x/crypto/chacha20/chacha_noasm.go
generated
vendored
@@ -2,8 +2,7 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build (!arm64 && !s390x && !ppc64le) || (arm64 && !go1.11) || !gc || purego
|
||||
// +build !arm64,!s390x,!ppc64le arm64,!go1.11 !gc purego
|
||||
//go:build (!arm64 && !s390x && !ppc64le) || !gc || purego
|
||||
|
||||
package chacha20
|
||||
|
||||
|
||||
1
vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.go
generated
vendored
1
vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.go
generated
vendored
@@ -3,7 +3,6 @@
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build gc && !purego
|
||||
// +build gc,!purego
|
||||
|
||||
package chacha20
|
||||
|
||||
|
||||
111
vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.s
generated
vendored
111
vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.s
generated
vendored
@@ -20,7 +20,6 @@
|
||||
// due to the calling conventions and initialization of constants.
|
||||
|
||||
//go:build gc && !purego
|
||||
// +build gc,!purego
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
@@ -34,6 +33,9 @@
|
||||
#define CONSTBASE R16
|
||||
#define BLOCKS R17
|
||||
|
||||
// for VPERMXOR
|
||||
#define MASK R18
|
||||
|
||||
DATA consts<>+0x00(SB)/8, $0x3320646e61707865
|
||||
DATA consts<>+0x08(SB)/8, $0x6b20657479622d32
|
||||
DATA consts<>+0x10(SB)/8, $0x0000000000000001
|
||||
@@ -54,7 +56,11 @@ DATA consts<>+0x80(SB)/8, $0x6b2065746b206574
|
||||
DATA consts<>+0x88(SB)/8, $0x6b2065746b206574
|
||||
DATA consts<>+0x90(SB)/8, $0x0000000100000000
|
||||
DATA consts<>+0x98(SB)/8, $0x0000000300000002
|
||||
GLOBL consts<>(SB), RODATA, $0xa0
|
||||
DATA consts<>+0xa0(SB)/8, $0x5566774411223300
|
||||
DATA consts<>+0xa8(SB)/8, $0xddeeffcc99aabb88
|
||||
DATA consts<>+0xb0(SB)/8, $0x6677445522330011
|
||||
DATA consts<>+0xb8(SB)/8, $0xeeffccddaabb8899
|
||||
GLOBL consts<>(SB), RODATA, $0xc0
|
||||
|
||||
//func chaCha20_ctr32_vsx(out, inp *byte, len int, key *[8]uint32, counter *uint32)
|
||||
TEXT ·chaCha20_ctr32_vsx(SB),NOSPLIT,$64-40
|
||||
@@ -71,6 +77,9 @@ TEXT ·chaCha20_ctr32_vsx(SB),NOSPLIT,$64-40
|
||||
MOVD $48, R10
|
||||
MOVD $64, R11
|
||||
SRD $6, LEN, BLOCKS
|
||||
// for VPERMXOR
|
||||
MOVD $consts<>+0xa0(SB), MASK
|
||||
MOVD $16, R20
|
||||
// V16
|
||||
LXVW4X (CONSTBASE)(R0), VS48
|
||||
ADD $80,CONSTBASE
|
||||
@@ -88,6 +97,10 @@ TEXT ·chaCha20_ctr32_vsx(SB),NOSPLIT,$64-40
|
||||
// V28
|
||||
LXVW4X (CONSTBASE)(R11), VS60
|
||||
|
||||
// Load mask constants for VPERMXOR
|
||||
LXVW4X (MASK)(R0), V20
|
||||
LXVW4X (MASK)(R20), V21
|
||||
|
||||
// splat slot from V19 -> V26
|
||||
VSPLTW $0, V19, V26
|
||||
|
||||
@@ -98,7 +111,7 @@ TEXT ·chaCha20_ctr32_vsx(SB),NOSPLIT,$64-40
|
||||
|
||||
MOVD $10, R14
|
||||
MOVD R14, CTR
|
||||
|
||||
PCALIGN $16
|
||||
loop_outer_vsx:
|
||||
// V0, V1, V2, V3
|
||||
LXVW4X (R0)(CONSTBASE), VS32
|
||||
@@ -129,22 +142,17 @@ loop_outer_vsx:
|
||||
VSPLTISW $12, V28
|
||||
VSPLTISW $8, V29
|
||||
VSPLTISW $7, V30
|
||||
|
||||
PCALIGN $16
|
||||
loop_vsx:
|
||||
VADDUWM V0, V4, V0
|
||||
VADDUWM V1, V5, V1
|
||||
VADDUWM V2, V6, V2
|
||||
VADDUWM V3, V7, V3
|
||||
|
||||
VXOR V12, V0, V12
|
||||
VXOR V13, V1, V13
|
||||
VXOR V14, V2, V14
|
||||
VXOR V15, V3, V15
|
||||
|
||||
VRLW V12, V27, V12
|
||||
VRLW V13, V27, V13
|
||||
VRLW V14, V27, V14
|
||||
VRLW V15, V27, V15
|
||||
VPERMXOR V12, V0, V21, V12
|
||||
VPERMXOR V13, V1, V21, V13
|
||||
VPERMXOR V14, V2, V21, V14
|
||||
VPERMXOR V15, V3, V21, V15
|
||||
|
||||
VADDUWM V8, V12, V8
|
||||
VADDUWM V9, V13, V9
|
||||
@@ -166,15 +174,10 @@ loop_vsx:
|
||||
VADDUWM V2, V6, V2
|
||||
VADDUWM V3, V7, V3
|
||||
|
||||
VXOR V12, V0, V12
|
||||
VXOR V13, V1, V13
|
||||
VXOR V14, V2, V14
|
||||
VXOR V15, V3, V15
|
||||
|
||||
VRLW V12, V29, V12
|
||||
VRLW V13, V29, V13
|
||||
VRLW V14, V29, V14
|
||||
VRLW V15, V29, V15
|
||||
VPERMXOR V12, V0, V20, V12
|
||||
VPERMXOR V13, V1, V20, V13
|
||||
VPERMXOR V14, V2, V20, V14
|
||||
VPERMXOR V15, V3, V20, V15
|
||||
|
||||
VADDUWM V8, V12, V8
|
||||
VADDUWM V9, V13, V9
|
||||
@@ -196,15 +199,10 @@ loop_vsx:
|
||||
VADDUWM V2, V7, V2
|
||||
VADDUWM V3, V4, V3
|
||||
|
||||
VXOR V15, V0, V15
|
||||
VXOR V12, V1, V12
|
||||
VXOR V13, V2, V13
|
||||
VXOR V14, V3, V14
|
||||
|
||||
VRLW V15, V27, V15
|
||||
VRLW V12, V27, V12
|
||||
VRLW V13, V27, V13
|
||||
VRLW V14, V27, V14
|
||||
VPERMXOR V15, V0, V21, V15
|
||||
VPERMXOR V12, V1, V21, V12
|
||||
VPERMXOR V13, V2, V21, V13
|
||||
VPERMXOR V14, V3, V21, V14
|
||||
|
||||
VADDUWM V10, V15, V10
|
||||
VADDUWM V11, V12, V11
|
||||
@@ -226,15 +224,10 @@ loop_vsx:
|
||||
VADDUWM V2, V7, V2
|
||||
VADDUWM V3, V4, V3
|
||||
|
||||
VXOR V15, V0, V15
|
||||
VXOR V12, V1, V12
|
||||
VXOR V13, V2, V13
|
||||
VXOR V14, V3, V14
|
||||
|
||||
VRLW V15, V29, V15
|
||||
VRLW V12, V29, V12
|
||||
VRLW V13, V29, V13
|
||||
VRLW V14, V29, V14
|
||||
VPERMXOR V15, V0, V20, V15
|
||||
VPERMXOR V12, V1, V20, V12
|
||||
VPERMXOR V13, V2, V20, V13
|
||||
VPERMXOR V14, V3, V20, V14
|
||||
|
||||
VADDUWM V10, V15, V10
|
||||
VADDUWM V11, V12, V11
|
||||
@@ -250,48 +243,48 @@ loop_vsx:
|
||||
VRLW V6, V30, V6
|
||||
VRLW V7, V30, V7
|
||||
VRLW V4, V30, V4
|
||||
BC 16, LT, loop_vsx
|
||||
BDNZ loop_vsx
|
||||
|
||||
VADDUWM V12, V26, V12
|
||||
|
||||
WORD $0x13600F8C // VMRGEW V0, V1, V27
|
||||
WORD $0x13821F8C // VMRGEW V2, V3, V28
|
||||
VMRGEW V0, V1, V27
|
||||
VMRGEW V2, V3, V28
|
||||
|
||||
WORD $0x10000E8C // VMRGOW V0, V1, V0
|
||||
WORD $0x10421E8C // VMRGOW V2, V3, V2
|
||||
VMRGOW V0, V1, V0
|
||||
VMRGOW V2, V3, V2
|
||||
|
||||
WORD $0x13A42F8C // VMRGEW V4, V5, V29
|
||||
WORD $0x13C63F8C // VMRGEW V6, V7, V30
|
||||
VMRGEW V4, V5, V29
|
||||
VMRGEW V6, V7, V30
|
||||
|
||||
XXPERMDI VS32, VS34, $0, VS33
|
||||
XXPERMDI VS32, VS34, $3, VS35
|
||||
XXPERMDI VS59, VS60, $0, VS32
|
||||
XXPERMDI VS59, VS60, $3, VS34
|
||||
|
||||
WORD $0x10842E8C // VMRGOW V4, V5, V4
|
||||
WORD $0x10C63E8C // VMRGOW V6, V7, V6
|
||||
VMRGOW V4, V5, V4
|
||||
VMRGOW V6, V7, V6
|
||||
|
||||
WORD $0x13684F8C // VMRGEW V8, V9, V27
|
||||
WORD $0x138A5F8C // VMRGEW V10, V11, V28
|
||||
VMRGEW V8, V9, V27
|
||||
VMRGEW V10, V11, V28
|
||||
|
||||
XXPERMDI VS36, VS38, $0, VS37
|
||||
XXPERMDI VS36, VS38, $3, VS39
|
||||
XXPERMDI VS61, VS62, $0, VS36
|
||||
XXPERMDI VS61, VS62, $3, VS38
|
||||
|
||||
WORD $0x11084E8C // VMRGOW V8, V9, V8
|
||||
WORD $0x114A5E8C // VMRGOW V10, V11, V10
|
||||
VMRGOW V8, V9, V8
|
||||
VMRGOW V10, V11, V10
|
||||
|
||||
WORD $0x13AC6F8C // VMRGEW V12, V13, V29
|
||||
WORD $0x13CE7F8C // VMRGEW V14, V15, V30
|
||||
VMRGEW V12, V13, V29
|
||||
VMRGEW V14, V15, V30
|
||||
|
||||
XXPERMDI VS40, VS42, $0, VS41
|
||||
XXPERMDI VS40, VS42, $3, VS43
|
||||
XXPERMDI VS59, VS60, $0, VS40
|
||||
XXPERMDI VS59, VS60, $3, VS42
|
||||
|
||||
WORD $0x118C6E8C // VMRGOW V12, V13, V12
|
||||
WORD $0x11CE7E8C // VMRGOW V14, V15, V14
|
||||
VMRGOW V12, V13, V12
|
||||
VMRGOW V14, V15, V14
|
||||
|
||||
VSPLTISW $4, V27
|
||||
VADDUWM V26, V27, V26
|
||||
@@ -432,7 +425,7 @@ tail_vsx:
|
||||
ADD $-1, R11, R12
|
||||
ADD $-1, INP
|
||||
ADD $-1, OUT
|
||||
|
||||
PCALIGN $16
|
||||
looptail_vsx:
|
||||
// Copying the result to OUT
|
||||
// in bytes.
|
||||
@@ -440,7 +433,7 @@ looptail_vsx:
|
||||
MOVBZU 1(INP), TMP
|
||||
XOR KEY, TMP, KEY
|
||||
MOVBU KEY, 1(OUT)
|
||||
BC 16, LT, looptail_vsx
|
||||
BDNZ looptail_vsx
|
||||
|
||||
// Clear the stack values
|
||||
STXVW4X VS48, (R11)(R0)
|
||||
|
||||
1
vendor/golang.org/x/crypto/chacha20/chacha_s390x.go
generated
vendored
1
vendor/golang.org/x/crypto/chacha20/chacha_s390x.go
generated
vendored
@@ -3,7 +3,6 @@
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build gc && !purego
|
||||
// +build gc,!purego
|
||||
|
||||
package chacha20
|
||||
|
||||
|
||||
1
vendor/golang.org/x/crypto/chacha20/chacha_s390x.s
generated
vendored
1
vendor/golang.org/x/crypto/chacha20/chacha_s390x.s
generated
vendored
@@ -3,7 +3,6 @@
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build gc && !purego
|
||||
// +build gc,!purego
|
||||
|
||||
#include "go_asm.h"
|
||||
#include "textflag.h"
|
||||
|
||||
1
vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.go
generated
vendored
1
vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.go
generated
vendored
@@ -1,7 +1,6 @@
|
||||
// Code generated by command: go run fe_amd64_asm.go -out ../fe_amd64.s -stubs ../fe_amd64.go -pkg field. DO NOT EDIT.
|
||||
|
||||
//go:build amd64 && gc && !purego
|
||||
// +build amd64,gc,!purego
|
||||
|
||||
package field
|
||||
|
||||
|
||||
1
vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.s
generated
vendored
1
vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.s
generated
vendored
@@ -1,7 +1,6 @@
|
||||
// Code generated by command: go run fe_amd64_asm.go -out ../fe_amd64.s -stubs ../fe_amd64.go -pkg field. DO NOT EDIT.
|
||||
|
||||
//go:build amd64 && gc && !purego
|
||||
// +build amd64,gc,!purego
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
|
||||
1
vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64_noasm.go
generated
vendored
1
vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64_noasm.go
generated
vendored
@@ -3,7 +3,6 @@
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !amd64 || !gc || purego
|
||||
// +build !amd64 !gc purego
|
||||
|
||||
package field
|
||||
|
||||
|
||||
1
vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64.go
generated
vendored
1
vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64.go
generated
vendored
@@ -3,7 +3,6 @@
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build arm64 && gc && !purego
|
||||
// +build arm64,gc,!purego
|
||||
|
||||
package field
|
||||
|
||||
|
||||
1
vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64.s
generated
vendored
1
vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64.s
generated
vendored
@@ -3,7 +3,6 @@
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build arm64 && gc && !purego
|
||||
// +build arm64,gc,!purego
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
|
||||
1
vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64_noasm.go
generated
vendored
1
vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64_noasm.go
generated
vendored
@@ -3,7 +3,6 @@
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !arm64 || !gc || purego
|
||||
// +build !arm64 !gc purego
|
||||
|
||||
package field
|
||||
|
||||
|
||||
71
vendor/golang.org/x/crypto/ed25519/ed25519.go
generated
vendored
71
vendor/golang.org/x/crypto/ed25519/ed25519.go
generated
vendored
@@ -1,71 +0,0 @@
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package ed25519 implements the Ed25519 signature algorithm. See
|
||||
// https://ed25519.cr.yp.to/.
|
||||
//
|
||||
// These functions are also compatible with the “Ed25519” function defined in
|
||||
// RFC 8032. However, unlike RFC 8032's formulation, this package's private key
|
||||
// representation includes a public key suffix to make multiple signing
|
||||
// operations with the same key more efficient. This package refers to the RFC
|
||||
// 8032 private key as the “seed”.
|
||||
//
|
||||
// Beginning with Go 1.13, the functionality of this package was moved to the
|
||||
// standard library as crypto/ed25519. This package only acts as a compatibility
|
||||
// wrapper.
|
||||
package ed25519
|
||||
|
||||
import (
|
||||
"crypto/ed25519"
|
||||
"io"
|
||||
)
|
||||
|
||||
const (
|
||||
// PublicKeySize is the size, in bytes, of public keys as used in this package.
|
||||
PublicKeySize = 32
|
||||
// PrivateKeySize is the size, in bytes, of private keys as used in this package.
|
||||
PrivateKeySize = 64
|
||||
// SignatureSize is the size, in bytes, of signatures generated and verified by this package.
|
||||
SignatureSize = 64
|
||||
// SeedSize is the size, in bytes, of private key seeds. These are the private key representations used by RFC 8032.
|
||||
SeedSize = 32
|
||||
)
|
||||
|
||||
// PublicKey is the type of Ed25519 public keys.
|
||||
//
|
||||
// This type is an alias for crypto/ed25519's PublicKey type.
|
||||
// See the crypto/ed25519 package for the methods on this type.
|
||||
type PublicKey = ed25519.PublicKey
|
||||
|
||||
// PrivateKey is the type of Ed25519 private keys. It implements crypto.Signer.
|
||||
//
|
||||
// This type is an alias for crypto/ed25519's PrivateKey type.
|
||||
// See the crypto/ed25519 package for the methods on this type.
|
||||
type PrivateKey = ed25519.PrivateKey
|
||||
|
||||
// GenerateKey generates a public/private key pair using entropy from rand.
|
||||
// If rand is nil, crypto/rand.Reader will be used.
|
||||
func GenerateKey(rand io.Reader) (PublicKey, PrivateKey, error) {
|
||||
return ed25519.GenerateKey(rand)
|
||||
}
|
||||
|
||||
// NewKeyFromSeed calculates a private key from a seed. It will panic if
|
||||
// len(seed) is not SeedSize. This function is provided for interoperability
|
||||
// with RFC 8032. RFC 8032's private keys correspond to seeds in this
|
||||
// package.
|
||||
func NewKeyFromSeed(seed []byte) PrivateKey {
|
||||
return ed25519.NewKeyFromSeed(seed)
|
||||
}
|
||||
|
||||
// Sign signs the message with privateKey and returns a signature. It will
|
||||
// panic if len(privateKey) is not PrivateKeySize.
|
||||
func Sign(privateKey PrivateKey, message []byte) []byte {
|
||||
return ed25519.Sign(privateKey, message)
|
||||
}
|
||||
|
||||
// Verify reports whether sig is a valid signature of message by publicKey. It
|
||||
// will panic if len(publicKey) is not PublicKeySize.
|
||||
func Verify(publicKey PublicKey, message, sig []byte) bool {
|
||||
return ed25519.Verify(publicKey, message, sig)
|
||||
}
|
||||
4
vendor/golang.org/x/crypto/hkdf/hkdf.go
generated
vendored
4
vendor/golang.org/x/crypto/hkdf/hkdf.go
generated
vendored
@@ -56,7 +56,9 @@ func (f *hkdf) Read(p []byte) (int, error) {
|
||||
|
||||
// Fill the rest of the buffer
|
||||
for len(p) > 0 {
|
||||
f.expander.Reset()
|
||||
if f.counter > 1 {
|
||||
f.expander.Reset()
|
||||
}
|
||||
f.expander.Write(f.prev)
|
||||
f.expander.Write(f.info)
|
||||
f.expander.Write([]byte{f.counter})
|
||||
|
||||
1
vendor/golang.org/x/crypto/internal/alias/alias.go
generated
vendored
1
vendor/golang.org/x/crypto/internal/alias/alias.go
generated
vendored
@@ -3,7 +3,6 @@
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !purego
|
||||
// +build !purego
|
||||
|
||||
// Package alias implements memory aliasing tests.
|
||||
package alias
|
||||
|
||||
1
vendor/golang.org/x/crypto/internal/alias/alias_purego.go
generated
vendored
1
vendor/golang.org/x/crypto/internal/alias/alias_purego.go
generated
vendored
@@ -3,7 +3,6 @@
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build purego
|
||||
// +build purego
|
||||
|
||||
// Package alias implements memory aliasing tests.
|
||||
package alias
|
||||
|
||||
40
vendor/golang.org/x/crypto/internal/poly1305/bits_compat.go
generated
vendored
40
vendor/golang.org/x/crypto/internal/poly1305/bits_compat.go
generated
vendored
@@ -1,40 +0,0 @@
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !go1.13
|
||||
// +build !go1.13
|
||||
|
||||
package poly1305
|
||||
|
||||
// Generic fallbacks for the math/bits intrinsics, copied from
|
||||
// src/math/bits/bits.go. They were added in Go 1.12, but Add64 and Sum64 had
|
||||
// variable time fallbacks until Go 1.13.
|
||||
|
||||
func bitsAdd64(x, y, carry uint64) (sum, carryOut uint64) {
|
||||
sum = x + y + carry
|
||||
carryOut = ((x & y) | ((x | y) &^ sum)) >> 63
|
||||
return
|
||||
}
|
||||
|
||||
func bitsSub64(x, y, borrow uint64) (diff, borrowOut uint64) {
|
||||
diff = x - y - borrow
|
||||
borrowOut = ((^x & y) | (^(x ^ y) & diff)) >> 63
|
||||
return
|
||||
}
|
||||
|
||||
func bitsMul64(x, y uint64) (hi, lo uint64) {
|
||||
const mask32 = 1<<32 - 1
|
||||
x0 := x & mask32
|
||||
x1 := x >> 32
|
||||
y0 := y & mask32
|
||||
y1 := y >> 32
|
||||
w0 := x0 * y0
|
||||
t := x1*y0 + w0>>32
|
||||
w1 := t & mask32
|
||||
w2 := t >> 32
|
||||
w1 += x0 * y1
|
||||
hi = x1*y1 + w2 + w1>>32
|
||||
lo = x * y
|
||||
return
|
||||
}
|
||||
22
vendor/golang.org/x/crypto/internal/poly1305/bits_go1.13.go
generated
vendored
22
vendor/golang.org/x/crypto/internal/poly1305/bits_go1.13.go
generated
vendored
@@ -1,22 +0,0 @@
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build go1.13
|
||||
// +build go1.13
|
||||
|
||||
package poly1305
|
||||
|
||||
import "math/bits"
|
||||
|
||||
func bitsAdd64(x, y, carry uint64) (sum, carryOut uint64) {
|
||||
return bits.Add64(x, y, carry)
|
||||
}
|
||||
|
||||
func bitsSub64(x, y, borrow uint64) (diff, borrowOut uint64) {
|
||||
return bits.Sub64(x, y, borrow)
|
||||
}
|
||||
|
||||
func bitsMul64(x, y uint64) (hi, lo uint64) {
|
||||
return bits.Mul64(x, y)
|
||||
}
|
||||
1
vendor/golang.org/x/crypto/internal/poly1305/mac_noasm.go
generated
vendored
1
vendor/golang.org/x/crypto/internal/poly1305/mac_noasm.go
generated
vendored
@@ -3,7 +3,6 @@
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build (!amd64 && !ppc64le && !s390x) || !gc || purego
|
||||
// +build !amd64,!ppc64le,!s390x !gc purego
|
||||
|
||||
package poly1305
|
||||
|
||||
|
||||
1
vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.go
generated
vendored
1
vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.go
generated
vendored
@@ -3,7 +3,6 @@
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build gc && !purego
|
||||
// +build gc,!purego
|
||||
|
||||
package poly1305
|
||||
|
||||
|
||||
1
vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.s
generated
vendored
1
vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.s
generated
vendored
@@ -3,7 +3,6 @@
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build gc && !purego
|
||||
// +build gc,!purego
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
|
||||
43
vendor/golang.org/x/crypto/internal/poly1305/sum_generic.go
generated
vendored
43
vendor/golang.org/x/crypto/internal/poly1305/sum_generic.go
generated
vendored
@@ -7,7 +7,10 @@
|
||||
|
||||
package poly1305
|
||||
|
||||
import "encoding/binary"
|
||||
import (
|
||||
"encoding/binary"
|
||||
"math/bits"
|
||||
)
|
||||
|
||||
// Poly1305 [RFC 7539] is a relatively simple algorithm: the authentication tag
|
||||
// for a 64 bytes message is approximately
|
||||
@@ -114,13 +117,13 @@ type uint128 struct {
|
||||
}
|
||||
|
||||
func mul64(a, b uint64) uint128 {
|
||||
hi, lo := bitsMul64(a, b)
|
||||
hi, lo := bits.Mul64(a, b)
|
||||
return uint128{lo, hi}
|
||||
}
|
||||
|
||||
func add128(a, b uint128) uint128 {
|
||||
lo, c := bitsAdd64(a.lo, b.lo, 0)
|
||||
hi, c := bitsAdd64(a.hi, b.hi, c)
|
||||
lo, c := bits.Add64(a.lo, b.lo, 0)
|
||||
hi, c := bits.Add64(a.hi, b.hi, c)
|
||||
if c != 0 {
|
||||
panic("poly1305: unexpected overflow")
|
||||
}
|
||||
@@ -155,8 +158,8 @@ func updateGeneric(state *macState, msg []byte) {
|
||||
// hide leading zeroes. For full chunks, that's 1 << 128, so we can just
|
||||
// add 1 to the most significant (2¹²⁸) limb, h2.
|
||||
if len(msg) >= TagSize {
|
||||
h0, c = bitsAdd64(h0, binary.LittleEndian.Uint64(msg[0:8]), 0)
|
||||
h1, c = bitsAdd64(h1, binary.LittleEndian.Uint64(msg[8:16]), c)
|
||||
h0, c = bits.Add64(h0, binary.LittleEndian.Uint64(msg[0:8]), 0)
|
||||
h1, c = bits.Add64(h1, binary.LittleEndian.Uint64(msg[8:16]), c)
|
||||
h2 += c + 1
|
||||
|
||||
msg = msg[TagSize:]
|
||||
@@ -165,8 +168,8 @@ func updateGeneric(state *macState, msg []byte) {
|
||||
copy(buf[:], msg)
|
||||
buf[len(msg)] = 1
|
||||
|
||||
h0, c = bitsAdd64(h0, binary.LittleEndian.Uint64(buf[0:8]), 0)
|
||||
h1, c = bitsAdd64(h1, binary.LittleEndian.Uint64(buf[8:16]), c)
|
||||
h0, c = bits.Add64(h0, binary.LittleEndian.Uint64(buf[0:8]), 0)
|
||||
h1, c = bits.Add64(h1, binary.LittleEndian.Uint64(buf[8:16]), c)
|
||||
h2 += c
|
||||
|
||||
msg = nil
|
||||
@@ -219,9 +222,9 @@ func updateGeneric(state *macState, msg []byte) {
|
||||
m3 := h2r1
|
||||
|
||||
t0 := m0.lo
|
||||
t1, c := bitsAdd64(m1.lo, m0.hi, 0)
|
||||
t2, c := bitsAdd64(m2.lo, m1.hi, c)
|
||||
t3, _ := bitsAdd64(m3.lo, m2.hi, c)
|
||||
t1, c := bits.Add64(m1.lo, m0.hi, 0)
|
||||
t2, c := bits.Add64(m2.lo, m1.hi, c)
|
||||
t3, _ := bits.Add64(m3.lo, m2.hi, c)
|
||||
|
||||
// Now we have the result as 4 64-bit limbs, and we need to reduce it
|
||||
// modulo 2¹³⁰ - 5. The special shape of this Crandall prime lets us do
|
||||
@@ -243,14 +246,14 @@ func updateGeneric(state *macState, msg []byte) {
|
||||
|
||||
// To add c * 5 to h, we first add cc = c * 4, and then add (cc >> 2) = c.
|
||||
|
||||
h0, c = bitsAdd64(h0, cc.lo, 0)
|
||||
h1, c = bitsAdd64(h1, cc.hi, c)
|
||||
h0, c = bits.Add64(h0, cc.lo, 0)
|
||||
h1, c = bits.Add64(h1, cc.hi, c)
|
||||
h2 += c
|
||||
|
||||
cc = shiftRightBy2(cc)
|
||||
|
||||
h0, c = bitsAdd64(h0, cc.lo, 0)
|
||||
h1, c = bitsAdd64(h1, cc.hi, c)
|
||||
h0, c = bits.Add64(h0, cc.lo, 0)
|
||||
h1, c = bits.Add64(h1, cc.hi, c)
|
||||
h2 += c
|
||||
|
||||
// h2 is at most 3 + 1 + 1 = 5, making the whole of h at most
|
||||
@@ -287,9 +290,9 @@ func finalize(out *[TagSize]byte, h *[3]uint64, s *[2]uint64) {
|
||||
// in constant time, we compute t = h - (2¹³⁰ - 5), and select h as the
|
||||
// result if the subtraction underflows, and t otherwise.
|
||||
|
||||
hMinusP0, b := bitsSub64(h0, p0, 0)
|
||||
hMinusP1, b := bitsSub64(h1, p1, b)
|
||||
_, b = bitsSub64(h2, p2, b)
|
||||
hMinusP0, b := bits.Sub64(h0, p0, 0)
|
||||
hMinusP1, b := bits.Sub64(h1, p1, b)
|
||||
_, b = bits.Sub64(h2, p2, b)
|
||||
|
||||
// h = h if h < p else h - p
|
||||
h0 = select64(b, h0, hMinusP0)
|
||||
@@ -301,8 +304,8 @@ func finalize(out *[TagSize]byte, h *[3]uint64, s *[2]uint64) {
|
||||
//
|
||||
// by just doing a wide addition with the 128 low bits of h and discarding
|
||||
// the overflow.
|
||||
h0, c := bitsAdd64(h0, s[0], 0)
|
||||
h1, _ = bitsAdd64(h1, s[1], c)
|
||||
h0, c := bits.Add64(h0, s[0], 0)
|
||||
h1, _ = bits.Add64(h1, s[1], c)
|
||||
|
||||
binary.LittleEndian.PutUint64(out[0:8], h0)
|
||||
binary.LittleEndian.PutUint64(out[8:16], h1)
|
||||
|
||||
1
vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.go
generated
vendored
1
vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.go
generated
vendored
@@ -3,7 +3,6 @@
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build gc && !purego
|
||||
// +build gc,!purego
|
||||
|
||||
package poly1305
|
||||
|
||||
|
||||
15
vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.s
generated
vendored
15
vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.s
generated
vendored
@@ -3,7 +3,6 @@
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build gc && !purego
|
||||
// +build gc,!purego
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
@@ -20,15 +19,14 @@
|
||||
|
||||
#define POLY1305_MUL(h0, h1, h2, r0, r1, t0, t1, t2, t3, t4, t5) \
|
||||
MULLD r0, h0, t0; \
|
||||
MULLD r0, h1, t4; \
|
||||
MULHDU r0, h0, t1; \
|
||||
MULLD r0, h1, t4; \
|
||||
MULHDU r0, h1, t5; \
|
||||
ADDC t4, t1, t1; \
|
||||
MULLD r0, h2, t2; \
|
||||
ADDZE t5; \
|
||||
MULHDU r1, h0, t4; \
|
||||
MULLD r1, h0, h0; \
|
||||
ADD t5, t2, t2; \
|
||||
ADDE t5, t2, t2; \
|
||||
ADDC h0, t1, t1; \
|
||||
MULLD h2, r1, t3; \
|
||||
ADDZE t4, h0; \
|
||||
@@ -38,13 +36,11 @@
|
||||
ADDE t5, t3, t3; \
|
||||
ADDC h0, t2, t2; \
|
||||
MOVD $-4, t4; \
|
||||
MOVD t0, h0; \
|
||||
MOVD t1, h1; \
|
||||
ADDZE t3; \
|
||||
ANDCC $3, t2, h2; \
|
||||
AND t2, t4, t0; \
|
||||
RLDICL $0, t2, $62, h2; \
|
||||
AND t2, t4, h0; \
|
||||
ADDC t0, h0, h0; \
|
||||
ADDE t3, h1, h1; \
|
||||
ADDE t3, t1, h1; \
|
||||
SLD $62, t3, t4; \
|
||||
SRD $2, t2; \
|
||||
ADDZE h2; \
|
||||
@@ -76,6 +72,7 @@ TEXT ·update(SB), $0-32
|
||||
loop:
|
||||
POLY1305_ADD(R4, R8, R9, R10, R20, R21, R22)
|
||||
|
||||
PCALIGN $16
|
||||
multiply:
|
||||
POLY1305_MUL(R8, R9, R10, R11, R12, R16, R17, R18, R14, R20, R21)
|
||||
ADD $-16, R5
|
||||
|
||||
1
vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.go
generated
vendored
1
vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.go
generated
vendored
@@ -3,7 +3,6 @@
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build gc && !purego
|
||||
// +build gc,!purego
|
||||
|
||||
package poly1305
|
||||
|
||||
|
||||
1
vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.s
generated
vendored
1
vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.s
generated
vendored
@@ -3,7 +3,6 @@
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build gc && !purego
|
||||
// +build gc,!purego
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
|
||||
1
vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.go
generated
vendored
1
vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.go
generated
vendored
@@ -3,7 +3,6 @@
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build amd64 && !purego && gc
|
||||
// +build amd64,!purego,gc
|
||||
|
||||
package salsa
|
||||
|
||||
|
||||
1
vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.s
generated
vendored
1
vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.s
generated
vendored
@@ -3,7 +3,6 @@
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build amd64 && !purego && gc
|
||||
// +build amd64,!purego,gc
|
||||
|
||||
// This code was translated into a form compatible with 6a from the public
|
||||
// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html
|
||||
|
||||
1
vendor/golang.org/x/crypto/salsa20/salsa/salsa20_noasm.go
generated
vendored
1
vendor/golang.org/x/crypto/salsa20/salsa/salsa20_noasm.go
generated
vendored
@@ -3,7 +3,6 @@
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !amd64 || purego || !gc
|
||||
// +build !amd64 purego !gc
|
||||
|
||||
package salsa
|
||||
|
||||
|
||||
38
vendor/golang.org/x/crypto/ssh/certs.go
generated
vendored
38
vendor/golang.org/x/crypto/ssh/certs.go
generated
vendored
@@ -16,8 +16,9 @@ import (
|
||||
|
||||
// Certificate algorithm names from [PROTOCOL.certkeys]. These values can appear
|
||||
// in Certificate.Type, PublicKey.Type, and ClientConfig.HostKeyAlgorithms.
|
||||
// Unlike key algorithm names, these are not passed to AlgorithmSigner and don't
|
||||
// appear in the Signature.Format field.
|
||||
// Unlike key algorithm names, these are not passed to AlgorithmSigner nor
|
||||
// returned by MultiAlgorithmSigner and don't appear in the Signature.Format
|
||||
// field.
|
||||
const (
|
||||
CertAlgoRSAv01 = "ssh-rsa-cert-v01@openssh.com"
|
||||
CertAlgoDSAv01 = "ssh-dss-cert-v01@openssh.com"
|
||||
@@ -255,10 +256,17 @@ func NewCertSigner(cert *Certificate, signer Signer) (Signer, error) {
|
||||
return nil, errors.New("ssh: signer and cert have different public key")
|
||||
}
|
||||
|
||||
if algorithmSigner, ok := signer.(AlgorithmSigner); ok {
|
||||
switch s := signer.(type) {
|
||||
case MultiAlgorithmSigner:
|
||||
return &multiAlgorithmSigner{
|
||||
AlgorithmSigner: &algorithmOpenSSHCertSigner{
|
||||
&openSSHCertSigner{cert, signer}, s},
|
||||
supportedAlgorithms: s.Algorithms(),
|
||||
}, nil
|
||||
case AlgorithmSigner:
|
||||
return &algorithmOpenSSHCertSigner{
|
||||
&openSSHCertSigner{cert, signer}, algorithmSigner}, nil
|
||||
} else {
|
||||
&openSSHCertSigner{cert, signer}, s}, nil
|
||||
default:
|
||||
return &openSSHCertSigner{cert, signer}, nil
|
||||
}
|
||||
}
|
||||
@@ -432,7 +440,9 @@ func (c *CertChecker) CheckCert(principal string, cert *Certificate) error {
|
||||
}
|
||||
|
||||
// SignCert signs the certificate with an authority, setting the Nonce,
|
||||
// SignatureKey, and Signature fields.
|
||||
// SignatureKey, and Signature fields. If the authority implements the
|
||||
// MultiAlgorithmSigner interface the first algorithm in the list is used. This
|
||||
// is useful if you want to sign with a specific algorithm.
|
||||
func (c *Certificate) SignCert(rand io.Reader, authority Signer) error {
|
||||
c.Nonce = make([]byte, 32)
|
||||
if _, err := io.ReadFull(rand, c.Nonce); err != nil {
|
||||
@@ -440,8 +450,20 @@ func (c *Certificate) SignCert(rand io.Reader, authority Signer) error {
|
||||
}
|
||||
c.SignatureKey = authority.PublicKey()
|
||||
|
||||
// Default to KeyAlgoRSASHA512 for ssh-rsa signers.
|
||||
if v, ok := authority.(AlgorithmSigner); ok && v.PublicKey().Type() == KeyAlgoRSA {
|
||||
if v, ok := authority.(MultiAlgorithmSigner); ok {
|
||||
if len(v.Algorithms()) == 0 {
|
||||
return errors.New("the provided authority has no signature algorithm")
|
||||
}
|
||||
// Use the first algorithm in the list.
|
||||
sig, err := v.SignWithAlgorithm(rand, c.bytesForSigning(), v.Algorithms()[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.Signature = sig
|
||||
return nil
|
||||
} else if v, ok := authority.(AlgorithmSigner); ok && v.PublicKey().Type() == KeyAlgoRSA {
|
||||
// Default to KeyAlgoRSASHA512 for ssh-rsa signers.
|
||||
// TODO: consider using KeyAlgoRSASHA256 as default.
|
||||
sig, err := v.SignWithAlgorithm(rand, c.bytesForSigning(), KeyAlgoRSASHA512)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
28
vendor/golang.org/x/crypto/ssh/channel.go
generated
vendored
28
vendor/golang.org/x/crypto/ssh/channel.go
generated
vendored
@@ -187,9 +187,11 @@ type channel struct {
|
||||
pending *buffer
|
||||
extPending *buffer
|
||||
|
||||
// windowMu protects myWindow, the flow-control window.
|
||||
windowMu sync.Mutex
|
||||
myWindow uint32
|
||||
// windowMu protects myWindow, the flow-control window, and myConsumed,
|
||||
// the number of bytes consumed since we last increased myWindow
|
||||
windowMu sync.Mutex
|
||||
myWindow uint32
|
||||
myConsumed uint32
|
||||
|
||||
// writeMu serializes calls to mux.conn.writePacket() and
|
||||
// protects sentClose and packetPool. This mutex must be
|
||||
@@ -332,14 +334,24 @@ func (ch *channel) handleData(packet []byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *channel) adjustWindow(n uint32) error {
|
||||
func (c *channel) adjustWindow(adj uint32) error {
|
||||
c.windowMu.Lock()
|
||||
// Since myWindow is managed on our side, and can never exceed
|
||||
// the initial window setting, we don't worry about overflow.
|
||||
c.myWindow += uint32(n)
|
||||
// Since myConsumed and myWindow are managed on our side, and can never
|
||||
// exceed the initial window setting, we don't worry about overflow.
|
||||
c.myConsumed += adj
|
||||
var sendAdj uint32
|
||||
if (channelWindowSize-c.myWindow > 3*c.maxIncomingPayload) ||
|
||||
(c.myWindow < channelWindowSize/2) {
|
||||
sendAdj = c.myConsumed
|
||||
c.myConsumed = 0
|
||||
c.myWindow += sendAdj
|
||||
}
|
||||
c.windowMu.Unlock()
|
||||
if sendAdj == 0 {
|
||||
return nil
|
||||
}
|
||||
return c.sendMessage(windowAdjustMsg{
|
||||
AdditionalBytes: uint32(n),
|
||||
AdditionalBytes: sendAdj,
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
2
vendor/golang.org/x/crypto/ssh/client.go
generated
vendored
2
vendor/golang.org/x/crypto/ssh/client.go
generated
vendored
@@ -82,7 +82,7 @@ func NewClientConn(c net.Conn, addr string, config *ClientConfig) (Conn, <-chan
|
||||
|
||||
if err := conn.clientHandshake(addr, &fullConf); err != nil {
|
||||
c.Close()
|
||||
return nil, nil, nil, fmt.Errorf("ssh: handshake failed: %v", err)
|
||||
return nil, nil, nil, fmt.Errorf("ssh: handshake failed: %w", err)
|
||||
}
|
||||
conn.mux = newMux(conn.transport)
|
||||
return conn, conn.mux.incomingChannels, conn.mux.incomingRequests, nil
|
||||
|
||||
130
vendor/golang.org/x/crypto/ssh/client_auth.go
generated
vendored
130
vendor/golang.org/x/crypto/ssh/client_auth.go
generated
vendored
@@ -71,7 +71,9 @@ func (c *connection) clientAuthenticate(config *ClientConfig) error {
|
||||
for auth := AuthMethod(new(noneAuth)); auth != nil; {
|
||||
ok, methods, err := auth.auth(sessionID, config.User, c.transport, config.Rand, extensions)
|
||||
if err != nil {
|
||||
return err
|
||||
// We return the error later if there is no other method left to
|
||||
// try.
|
||||
ok = authFailure
|
||||
}
|
||||
if ok == authSuccess {
|
||||
// success
|
||||
@@ -101,6 +103,12 @@ func (c *connection) clientAuthenticate(config *ClientConfig) error {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if auth == nil && err != nil {
|
||||
// We have an error and there are no other authentication methods to
|
||||
// try, so we return it.
|
||||
return err
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("ssh: unable to authenticate, attempted methods %v, no supported methods remain", tried)
|
||||
}
|
||||
@@ -217,21 +225,45 @@ func (cb publicKeyCallback) method() string {
|
||||
return "publickey"
|
||||
}
|
||||
|
||||
func pickSignatureAlgorithm(signer Signer, extensions map[string][]byte) (as AlgorithmSigner, algo string) {
|
||||
func pickSignatureAlgorithm(signer Signer, extensions map[string][]byte) (MultiAlgorithmSigner, string, error) {
|
||||
var as MultiAlgorithmSigner
|
||||
keyFormat := signer.PublicKey().Type()
|
||||
|
||||
// Like in sendKexInit, if the public key implements AlgorithmSigner we
|
||||
// assume it supports all algorithms, otherwise only the key format one.
|
||||
as, ok := signer.(AlgorithmSigner)
|
||||
if !ok {
|
||||
return algorithmSignerWrapper{signer}, keyFormat
|
||||
// If the signer implements MultiAlgorithmSigner we use the algorithms it
|
||||
// support, if it implements AlgorithmSigner we assume it supports all
|
||||
// algorithms, otherwise only the key format one.
|
||||
switch s := signer.(type) {
|
||||
case MultiAlgorithmSigner:
|
||||
as = s
|
||||
case AlgorithmSigner:
|
||||
as = &multiAlgorithmSigner{
|
||||
AlgorithmSigner: s,
|
||||
supportedAlgorithms: algorithmsForKeyFormat(underlyingAlgo(keyFormat)),
|
||||
}
|
||||
default:
|
||||
as = &multiAlgorithmSigner{
|
||||
AlgorithmSigner: algorithmSignerWrapper{signer},
|
||||
supportedAlgorithms: []string{underlyingAlgo(keyFormat)},
|
||||
}
|
||||
}
|
||||
|
||||
getFallbackAlgo := func() (string, error) {
|
||||
// Fallback to use if there is no "server-sig-algs" extension or a
|
||||
// common algorithm cannot be found. We use the public key format if the
|
||||
// MultiAlgorithmSigner supports it, otherwise we return an error.
|
||||
if !contains(as.Algorithms(), underlyingAlgo(keyFormat)) {
|
||||
return "", fmt.Errorf("ssh: no common public key signature algorithm, server only supports %q for key type %q, signer only supports %v",
|
||||
underlyingAlgo(keyFormat), keyFormat, as.Algorithms())
|
||||
}
|
||||
return keyFormat, nil
|
||||
}
|
||||
|
||||
extPayload, ok := extensions["server-sig-algs"]
|
||||
if !ok {
|
||||
// If there is no "server-sig-algs" extension, fall back to the key
|
||||
// format algorithm.
|
||||
return as, keyFormat
|
||||
// If there is no "server-sig-algs" extension use the fallback
|
||||
// algorithm.
|
||||
algo, err := getFallbackAlgo()
|
||||
return as, algo, err
|
||||
}
|
||||
|
||||
// The server-sig-algs extension only carries underlying signature
|
||||
@@ -245,15 +277,22 @@ func pickSignatureAlgorithm(signer Signer, extensions map[string][]byte) (as Alg
|
||||
}
|
||||
}
|
||||
|
||||
keyAlgos := algorithmsForKeyFormat(keyFormat)
|
||||
// Filter algorithms based on those supported by MultiAlgorithmSigner.
|
||||
var keyAlgos []string
|
||||
for _, algo := range algorithmsForKeyFormat(keyFormat) {
|
||||
if contains(as.Algorithms(), underlyingAlgo(algo)) {
|
||||
keyAlgos = append(keyAlgos, algo)
|
||||
}
|
||||
}
|
||||
|
||||
algo, err := findCommon("public key signature algorithm", keyAlgos, serverAlgos)
|
||||
if err != nil {
|
||||
// If there is no overlap, try the key anyway with the key format
|
||||
// algorithm, to support servers that fail to list all supported
|
||||
// algorithms.
|
||||
return as, keyFormat
|
||||
// If there is no overlap, return the fallback algorithm to support
|
||||
// servers that fail to list all supported algorithms.
|
||||
algo, err := getFallbackAlgo()
|
||||
return as, algo, err
|
||||
}
|
||||
return as, algo
|
||||
return as, algo, nil
|
||||
}
|
||||
|
||||
func (cb publicKeyCallback) auth(session []byte, user string, c packetConn, rand io.Reader, extensions map[string][]byte) (authResult, []string, error) {
|
||||
@@ -267,14 +306,39 @@ func (cb publicKeyCallback) auth(session []byte, user string, c packetConn, rand
|
||||
return authFailure, nil, err
|
||||
}
|
||||
var methods []string
|
||||
for _, signer := range signers {
|
||||
pub := signer.PublicKey()
|
||||
as, algo := pickSignatureAlgorithm(signer, extensions)
|
||||
var errSigAlgo error
|
||||
|
||||
origSignersLen := len(signers)
|
||||
for idx := 0; idx < len(signers); idx++ {
|
||||
signer := signers[idx]
|
||||
pub := signer.PublicKey()
|
||||
as, algo, err := pickSignatureAlgorithm(signer, extensions)
|
||||
if err != nil && errSigAlgo == nil {
|
||||
// If we cannot negotiate a signature algorithm store the first
|
||||
// error so we can return it to provide a more meaningful message if
|
||||
// no other signers work.
|
||||
errSigAlgo = err
|
||||
continue
|
||||
}
|
||||
ok, err := validateKey(pub, algo, user, c)
|
||||
if err != nil {
|
||||
return authFailure, nil, err
|
||||
}
|
||||
// OpenSSH 7.2-7.7 advertises support for rsa-sha2-256 and rsa-sha2-512
|
||||
// in the "server-sig-algs" extension but doesn't support these
|
||||
// algorithms for certificate authentication, so if the server rejects
|
||||
// the key try to use the obtained algorithm as if "server-sig-algs" had
|
||||
// not been implemented if supported from the algorithm signer.
|
||||
if !ok && idx < origSignersLen && isRSACert(algo) && algo != CertAlgoRSAv01 {
|
||||
if contains(as.Algorithms(), KeyAlgoRSA) {
|
||||
// We retry using the compat algorithm after all signers have
|
||||
// been tried normally.
|
||||
signers = append(signers, &multiAlgorithmSigner{
|
||||
AlgorithmSigner: as,
|
||||
supportedAlgorithms: []string{KeyAlgoRSA},
|
||||
})
|
||||
}
|
||||
}
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
@@ -317,22 +381,12 @@ func (cb publicKeyCallback) auth(session []byte, user string, c packetConn, rand
|
||||
// contain the "publickey" method, do not attempt to authenticate with any
|
||||
// other keys. According to RFC 4252 Section 7, the latter can occur when
|
||||
// additional authentication methods are required.
|
||||
if success == authSuccess || !containsMethod(methods, cb.method()) {
|
||||
if success == authSuccess || !contains(methods, cb.method()) {
|
||||
return success, methods, err
|
||||
}
|
||||
}
|
||||
|
||||
return authFailure, methods, nil
|
||||
}
|
||||
|
||||
func containsMethod(methods []string, method string) bool {
|
||||
for _, m := range methods {
|
||||
if m == method {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
return authFailure, methods, errSigAlgo
|
||||
}
|
||||
|
||||
// validateKey validates the key provided is acceptable to the server.
|
||||
@@ -350,10 +404,10 @@ func validateKey(key PublicKey, algo string, user string, c packetConn) (bool, e
|
||||
return false, err
|
||||
}
|
||||
|
||||
return confirmKeyAck(key, algo, c)
|
||||
return confirmKeyAck(key, c)
|
||||
}
|
||||
|
||||
func confirmKeyAck(key PublicKey, algo string, c packetConn) (bool, error) {
|
||||
func confirmKeyAck(key PublicKey, c packetConn) (bool, error) {
|
||||
pubKey := key.Marshal()
|
||||
|
||||
for {
|
||||
@@ -371,7 +425,15 @@ func confirmKeyAck(key PublicKey, algo string, c packetConn) (bool, error) {
|
||||
if err := Unmarshal(packet, &msg); err != nil {
|
||||
return false, err
|
||||
}
|
||||
if msg.Algo != algo || !bytes.Equal(msg.PubKey, pubKey) {
|
||||
// According to RFC 4252 Section 7 the algorithm in
|
||||
// SSH_MSG_USERAUTH_PK_OK should match that of the request but some
|
||||
// servers send the key type instead. OpenSSH allows any algorithm
|
||||
// that matches the public key, so we do the same.
|
||||
// https://github.com/openssh/openssh-portable/blob/86bdd385/sshconnect2.c#L709
|
||||
if !contains(algorithmsForKeyFormat(key.Type()), msg.Algo) {
|
||||
return false, nil
|
||||
}
|
||||
if !bytes.Equal(msg.PubKey, pubKey) {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
|
||||
11
vendor/golang.org/x/crypto/ssh/common.go
generated
vendored
11
vendor/golang.org/x/crypto/ssh/common.go
generated
vendored
@@ -10,7 +10,6 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
_ "crypto/sha1"
|
||||
@@ -128,6 +127,14 @@ func isRSA(algo string) bool {
|
||||
return contains(algos, underlyingAlgo(algo))
|
||||
}
|
||||
|
||||
func isRSACert(algo string) bool {
|
||||
_, ok := certKeyAlgoNames[algo]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
return isRSA(algo)
|
||||
}
|
||||
|
||||
// supportedPubKeyAuthAlgos specifies the supported client public key
|
||||
// authentication algorithms. Note that this doesn't include certificate types
|
||||
// since those use the underlying algorithm. This list is sent to the client if
|
||||
@@ -140,8 +147,6 @@ var supportedPubKeyAuthAlgos = []string{
|
||||
KeyAlgoDSA,
|
||||
}
|
||||
|
||||
var supportedPubKeyAuthAlgosList = strings.Join(supportedPubKeyAuthAlgos, ",")
|
||||
|
||||
// unexpectedMessageError results when the SSH message that we received didn't
|
||||
// match what we wanted.
|
||||
func unexpectedMessageError(expected, got uint8) error {
|
||||
|
||||
1
vendor/golang.org/x/crypto/ssh/doc.go
generated
vendored
1
vendor/golang.org/x/crypto/ssh/doc.go
generated
vendored
@@ -13,6 +13,7 @@ others.
|
||||
|
||||
References:
|
||||
|
||||
[PROTOCOL]: https://cvsweb.openbsd.org/cgi-bin/cvsweb/src/usr.bin/ssh/PROTOCOL?rev=HEAD
|
||||
[PROTOCOL.certkeys]: http://cvsweb.openbsd.org/cgi-bin/cvsweb/src/usr.bin/ssh/PROTOCOL.certkeys?rev=HEAD
|
||||
[SSH-PARAMETERS]: http://www.iana.org/assignments/ssh-parameters/ssh-parameters.xml#ssh-parameters-1
|
||||
|
||||
|
||||
107
vendor/golang.org/x/crypto/ssh/handshake.go
generated
vendored
107
vendor/golang.org/x/crypto/ssh/handshake.go
generated
vendored
@@ -11,6 +11,7 @@ import (
|
||||
"io"
|
||||
"log"
|
||||
"net"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
@@ -34,6 +35,16 @@ type keyingTransport interface {
|
||||
// direction will be effected if a msgNewKeys message is sent
|
||||
// or received.
|
||||
prepareKeyChange(*algorithms, *kexResult) error
|
||||
|
||||
// setStrictMode sets the strict KEX mode, notably triggering
|
||||
// sequence number resets on sending or receiving msgNewKeys.
|
||||
// If the sequence number is already > 1 when setStrictMode
|
||||
// is called, an error is returned.
|
||||
setStrictMode() error
|
||||
|
||||
// setInitialKEXDone indicates to the transport that the initial key exchange
|
||||
// was completed
|
||||
setInitialKEXDone()
|
||||
}
|
||||
|
||||
// handshakeTransport implements rekeying on top of a keyingTransport
|
||||
@@ -50,6 +61,10 @@ type handshakeTransport struct {
|
||||
// connection.
|
||||
hostKeys []Signer
|
||||
|
||||
// publicKeyAuthAlgorithms is non-empty if we are the server. In that case,
|
||||
// it contains the supported client public key authentication algorithms.
|
||||
publicKeyAuthAlgorithms []string
|
||||
|
||||
// hostKeyAlgorithms is non-empty if we are the client. In that case,
|
||||
// we accept these key types from the server as host key.
|
||||
hostKeyAlgorithms []string
|
||||
@@ -95,6 +110,10 @@ type handshakeTransport struct {
|
||||
|
||||
// The session ID or nil if first kex did not complete yet.
|
||||
sessionID []byte
|
||||
|
||||
// strictMode indicates if the other side of the handshake indicated
|
||||
// that we should be following the strict KEX protocol restrictions.
|
||||
strictMode bool
|
||||
}
|
||||
|
||||
type pendingKex struct {
|
||||
@@ -141,6 +160,7 @@ func newClientTransport(conn keyingTransport, clientVersion, serverVersion []byt
|
||||
func newServerTransport(conn keyingTransport, clientVersion, serverVersion []byte, config *ServerConfig) *handshakeTransport {
|
||||
t := newHandshakeTransport(conn, &config.Config, clientVersion, serverVersion)
|
||||
t.hostKeys = config.hostKeys
|
||||
t.publicKeyAuthAlgorithms = config.PublicKeyAuthAlgorithms
|
||||
go t.readLoop()
|
||||
go t.kexLoop()
|
||||
return t
|
||||
@@ -203,7 +223,10 @@ func (t *handshakeTransport) readLoop() {
|
||||
close(t.incoming)
|
||||
break
|
||||
}
|
||||
if p[0] == msgIgnore || p[0] == msgDebug {
|
||||
// If this is the first kex, and strict KEX mode is enabled,
|
||||
// we don't ignore any messages, as they may be used to manipulate
|
||||
// the packet sequence numbers.
|
||||
if !(t.sessionID == nil && t.strictMode) && (p[0] == msgIgnore || p[0] == msgDebug) {
|
||||
continue
|
||||
}
|
||||
t.incoming <- p
|
||||
@@ -435,6 +458,11 @@ func (t *handshakeTransport) readOnePacket(first bool) ([]byte, error) {
|
||||
return successPacket, nil
|
||||
}
|
||||
|
||||
const (
|
||||
kexStrictClient = "kex-strict-c-v00@openssh.com"
|
||||
kexStrictServer = "kex-strict-s-v00@openssh.com"
|
||||
)
|
||||
|
||||
// sendKexInit sends a key change message.
|
||||
func (t *handshakeTransport) sendKexInit() error {
|
||||
t.mu.Lock()
|
||||
@@ -448,7 +476,6 @@ func (t *handshakeTransport) sendKexInit() error {
|
||||
}
|
||||
|
||||
msg := &kexInitMsg{
|
||||
KexAlgos: t.config.KeyExchanges,
|
||||
CiphersClientServer: t.config.Ciphers,
|
||||
CiphersServerClient: t.config.Ciphers,
|
||||
MACsClientServer: t.config.MACs,
|
||||
@@ -458,36 +485,55 @@ func (t *handshakeTransport) sendKexInit() error {
|
||||
}
|
||||
io.ReadFull(rand.Reader, msg.Cookie[:])
|
||||
|
||||
// We mutate the KexAlgos slice, in order to add the kex-strict extension algorithm,
|
||||
// and possibly to add the ext-info extension algorithm. Since the slice may be the
|
||||
// user owned KeyExchanges, we create our own slice in order to avoid using user
|
||||
// owned memory by mistake.
|
||||
msg.KexAlgos = make([]string, 0, len(t.config.KeyExchanges)+2) // room for kex-strict and ext-info
|
||||
msg.KexAlgos = append(msg.KexAlgos, t.config.KeyExchanges...)
|
||||
|
||||
isServer := len(t.hostKeys) > 0
|
||||
if isServer {
|
||||
for _, k := range t.hostKeys {
|
||||
// If k is an AlgorithmSigner, presume it supports all signature algorithms
|
||||
// associated with the key format. (Ideally AlgorithmSigner would have a
|
||||
// method to advertise supported algorithms, but it doesn't. This means that
|
||||
// adding support for a new algorithm is a breaking change, as we will
|
||||
// immediately negotiate it even if existing implementations don't support
|
||||
// it. If that ever happens, we'll have to figure something out.)
|
||||
// If k is not an AlgorithmSigner, we can only assume it only supports the
|
||||
// algorithms that matches the key format. (This means that Sign can't pick
|
||||
// a different default.)
|
||||
// If k is a MultiAlgorithmSigner, we restrict the signature
|
||||
// algorithms. If k is a AlgorithmSigner, presume it supports all
|
||||
// signature algorithms associated with the key format. If k is not
|
||||
// an AlgorithmSigner, we can only assume it only supports the
|
||||
// algorithms that matches the key format. (This means that Sign
|
||||
// can't pick a different default).
|
||||
keyFormat := k.PublicKey().Type()
|
||||
if _, ok := k.(AlgorithmSigner); ok {
|
||||
|
||||
switch s := k.(type) {
|
||||
case MultiAlgorithmSigner:
|
||||
for _, algo := range algorithmsForKeyFormat(keyFormat) {
|
||||
if contains(s.Algorithms(), underlyingAlgo(algo)) {
|
||||
msg.ServerHostKeyAlgos = append(msg.ServerHostKeyAlgos, algo)
|
||||
}
|
||||
}
|
||||
case AlgorithmSigner:
|
||||
msg.ServerHostKeyAlgos = append(msg.ServerHostKeyAlgos, algorithmsForKeyFormat(keyFormat)...)
|
||||
} else {
|
||||
default:
|
||||
msg.ServerHostKeyAlgos = append(msg.ServerHostKeyAlgos, keyFormat)
|
||||
}
|
||||
}
|
||||
|
||||
if t.sessionID == nil {
|
||||
msg.KexAlgos = append(msg.KexAlgos, kexStrictServer)
|
||||
}
|
||||
} else {
|
||||
msg.ServerHostKeyAlgos = t.hostKeyAlgorithms
|
||||
|
||||
// As a client we opt in to receiving SSH_MSG_EXT_INFO so we know what
|
||||
// algorithms the server supports for public key authentication. See RFC
|
||||
// 8308, Section 2.1.
|
||||
//
|
||||
// We also send the strict KEX mode extension algorithm, in order to opt
|
||||
// into the strict KEX mode.
|
||||
if firstKeyExchange := t.sessionID == nil; firstKeyExchange {
|
||||
msg.KexAlgos = make([]string, 0, len(t.config.KeyExchanges)+1)
|
||||
msg.KexAlgos = append(msg.KexAlgos, t.config.KeyExchanges...)
|
||||
msg.KexAlgos = append(msg.KexAlgos, "ext-info-c")
|
||||
msg.KexAlgos = append(msg.KexAlgos, kexStrictClient)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
packet := Marshal(msg)
|
||||
@@ -593,6 +639,13 @@ func (t *handshakeTransport) enterKeyExchange(otherInitPacket []byte) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if t.sessionID == nil && ((isClient && contains(serverInit.KexAlgos, kexStrictServer)) || (!isClient && contains(clientInit.KexAlgos, kexStrictClient))) {
|
||||
t.strictMode = true
|
||||
if err := t.conn.setStrictMode(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// We don't send FirstKexFollows, but we handle receiving it.
|
||||
//
|
||||
// RFC 4253 section 7 defines the kex and the agreement method for
|
||||
@@ -642,16 +695,21 @@ func (t *handshakeTransport) enterKeyExchange(otherInitPacket []byte) error {
|
||||
|
||||
// On the server side, after the first SSH_MSG_NEWKEYS, send a SSH_MSG_EXT_INFO
|
||||
// message with the server-sig-algs extension if the client supports it. See
|
||||
// RFC 8308, Sections 2.4 and 3.1.
|
||||
// RFC 8308, Sections 2.4 and 3.1, and [PROTOCOL], Section 1.9.
|
||||
if !isClient && firstKeyExchange && contains(clientInit.KexAlgos, "ext-info-c") {
|
||||
supportedPubKeyAuthAlgosList := strings.Join(t.publicKeyAuthAlgorithms, ",")
|
||||
extInfo := &extInfoMsg{
|
||||
NumExtensions: 1,
|
||||
Payload: make([]byte, 0, 4+15+4+len(supportedPubKeyAuthAlgosList)),
|
||||
NumExtensions: 2,
|
||||
Payload: make([]byte, 0, 4+15+4+len(supportedPubKeyAuthAlgosList)+4+16+4+1),
|
||||
}
|
||||
extInfo.Payload = appendInt(extInfo.Payload, len("server-sig-algs"))
|
||||
extInfo.Payload = append(extInfo.Payload, "server-sig-algs"...)
|
||||
extInfo.Payload = appendInt(extInfo.Payload, len(supportedPubKeyAuthAlgosList))
|
||||
extInfo.Payload = append(extInfo.Payload, supportedPubKeyAuthAlgosList...)
|
||||
extInfo.Payload = appendInt(extInfo.Payload, len("ping@openssh.com"))
|
||||
extInfo.Payload = append(extInfo.Payload, "ping@openssh.com"...)
|
||||
extInfo.Payload = appendInt(extInfo.Payload, 1)
|
||||
extInfo.Payload = append(extInfo.Payload, "0"...)
|
||||
if err := t.conn.writePacket(Marshal(extInfo)); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -663,6 +721,12 @@ func (t *handshakeTransport) enterKeyExchange(otherInitPacket []byte) error {
|
||||
return unexpectedMessageError(msgNewKeys, packet[0])
|
||||
}
|
||||
|
||||
if firstKeyExchange {
|
||||
// Indicates to the transport that the first key exchange is completed
|
||||
// after receiving SSH_MSG_NEWKEYS.
|
||||
t.conn.setInitialKEXDone()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -685,9 +749,16 @@ func (a algorithmSignerWrapper) SignWithAlgorithm(rand io.Reader, data []byte, a
|
||||
|
||||
func pickHostKey(hostKeys []Signer, algo string) AlgorithmSigner {
|
||||
for _, k := range hostKeys {
|
||||
if s, ok := k.(MultiAlgorithmSigner); ok {
|
||||
if !contains(s.Algorithms(), underlyingAlgo(algo)) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if algo == k.PublicKey().Type() {
|
||||
return algorithmSignerWrapper{k}
|
||||
}
|
||||
|
||||
k, ok := k.(AlgorithmSigner)
|
||||
if !ok {
|
||||
continue
|
||||
|
||||
395
vendor/golang.org/x/crypto/ssh/keys.go
generated
vendored
395
vendor/golang.org/x/crypto/ssh/keys.go
generated
vendored
@@ -11,13 +11,16 @@ import (
|
||||
"crypto/cipher"
|
||||
"crypto/dsa"
|
||||
"crypto/ecdsa"
|
||||
"crypto/ed25519"
|
||||
"crypto/elliptic"
|
||||
"crypto/md5"
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
"crypto/sha256"
|
||||
"crypto/x509"
|
||||
"encoding/asn1"
|
||||
"encoding/base64"
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
@@ -26,7 +29,6 @@ import (
|
||||
"math/big"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/crypto/ed25519"
|
||||
"golang.org/x/crypto/ssh/internal/bcrypt_pbkdf"
|
||||
)
|
||||
|
||||
@@ -295,6 +297,18 @@ func MarshalAuthorizedKey(key PublicKey) []byte {
|
||||
return b.Bytes()
|
||||
}
|
||||
|
||||
// MarshalPrivateKey returns a PEM block with the private key serialized in the
|
||||
// OpenSSH format.
|
||||
func MarshalPrivateKey(key crypto.PrivateKey, comment string) (*pem.Block, error) {
|
||||
return marshalOpenSSHPrivateKey(key, comment, unencryptedOpenSSHMarshaler)
|
||||
}
|
||||
|
||||
// MarshalPrivateKeyWithPassphrase returns a PEM block holding the encrypted
|
||||
// private key serialized in the OpenSSH format.
|
||||
func MarshalPrivateKeyWithPassphrase(key crypto.PrivateKey, comment string, passphrase []byte) (*pem.Block, error) {
|
||||
return marshalOpenSSHPrivateKey(key, comment, passphraseProtectedOpenSSHMarshaler(passphrase))
|
||||
}
|
||||
|
||||
// PublicKey represents a public key using an unspecified algorithm.
|
||||
//
|
||||
// Some PublicKeys provided by this package also implement CryptoPublicKey.
|
||||
@@ -321,7 +335,7 @@ type CryptoPublicKey interface {
|
||||
|
||||
// A Signer can create signatures that verify against a public key.
|
||||
//
|
||||
// Some Signers provided by this package also implement AlgorithmSigner.
|
||||
// Some Signers provided by this package also implement MultiAlgorithmSigner.
|
||||
type Signer interface {
|
||||
// PublicKey returns the associated PublicKey.
|
||||
PublicKey() PublicKey
|
||||
@@ -336,9 +350,9 @@ type Signer interface {
|
||||
// An AlgorithmSigner is a Signer that also supports specifying an algorithm to
|
||||
// use for signing.
|
||||
//
|
||||
// An AlgorithmSigner can't advertise the algorithms it supports, so it should
|
||||
// be prepared to be invoked with every algorithm supported by the public key
|
||||
// format.
|
||||
// An AlgorithmSigner can't advertise the algorithms it supports, unless it also
|
||||
// implements MultiAlgorithmSigner, so it should be prepared to be invoked with
|
||||
// every algorithm supported by the public key format.
|
||||
type AlgorithmSigner interface {
|
||||
Signer
|
||||
|
||||
@@ -349,6 +363,75 @@ type AlgorithmSigner interface {
|
||||
SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*Signature, error)
|
||||
}
|
||||
|
||||
// MultiAlgorithmSigner is an AlgorithmSigner that also reports the algorithms
|
||||
// supported by that signer.
|
||||
type MultiAlgorithmSigner interface {
|
||||
AlgorithmSigner
|
||||
|
||||
// Algorithms returns the available algorithms in preference order. The list
|
||||
// must not be empty, and it must not include certificate types.
|
||||
Algorithms() []string
|
||||
}
|
||||
|
||||
// NewSignerWithAlgorithms returns a signer restricted to the specified
|
||||
// algorithms. The algorithms must be set in preference order. The list must not
|
||||
// be empty, and it must not include certificate types. An error is returned if
|
||||
// the specified algorithms are incompatible with the public key type.
|
||||
func NewSignerWithAlgorithms(signer AlgorithmSigner, algorithms []string) (MultiAlgorithmSigner, error) {
|
||||
if len(algorithms) == 0 {
|
||||
return nil, errors.New("ssh: please specify at least one valid signing algorithm")
|
||||
}
|
||||
var signerAlgos []string
|
||||
supportedAlgos := algorithmsForKeyFormat(underlyingAlgo(signer.PublicKey().Type()))
|
||||
if s, ok := signer.(*multiAlgorithmSigner); ok {
|
||||
signerAlgos = s.Algorithms()
|
||||
} else {
|
||||
signerAlgos = supportedAlgos
|
||||
}
|
||||
|
||||
for _, algo := range algorithms {
|
||||
if !contains(supportedAlgos, algo) {
|
||||
return nil, fmt.Errorf("ssh: algorithm %q is not supported for key type %q",
|
||||
algo, signer.PublicKey().Type())
|
||||
}
|
||||
if !contains(signerAlgos, algo) {
|
||||
return nil, fmt.Errorf("ssh: algorithm %q is restricted for the provided signer", algo)
|
||||
}
|
||||
}
|
||||
return &multiAlgorithmSigner{
|
||||
AlgorithmSigner: signer,
|
||||
supportedAlgorithms: algorithms,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type multiAlgorithmSigner struct {
|
||||
AlgorithmSigner
|
||||
supportedAlgorithms []string
|
||||
}
|
||||
|
||||
func (s *multiAlgorithmSigner) Algorithms() []string {
|
||||
return s.supportedAlgorithms
|
||||
}
|
||||
|
||||
func (s *multiAlgorithmSigner) isAlgorithmSupported(algorithm string) bool {
|
||||
if algorithm == "" {
|
||||
algorithm = underlyingAlgo(s.PublicKey().Type())
|
||||
}
|
||||
for _, algo := range s.supportedAlgorithms {
|
||||
if algorithm == algo {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (s *multiAlgorithmSigner) SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*Signature, error) {
|
||||
if !s.isAlgorithmSupported(algorithm) {
|
||||
return nil, fmt.Errorf("ssh: algorithm %q is not supported: %v", algorithm, s.supportedAlgorithms)
|
||||
}
|
||||
return s.AlgorithmSigner.SignWithAlgorithm(rand, data, algorithm)
|
||||
}
|
||||
|
||||
type rsaPublicKey rsa.PublicKey
|
||||
|
||||
func (r *rsaPublicKey) Type() string {
|
||||
@@ -512,6 +595,10 @@ func (k *dsaPrivateKey) Sign(rand io.Reader, data []byte) (*Signature, error) {
|
||||
return k.SignWithAlgorithm(rand, data, k.PublicKey().Type())
|
||||
}
|
||||
|
||||
func (k *dsaPrivateKey) Algorithms() []string {
|
||||
return []string{k.PublicKey().Type()}
|
||||
}
|
||||
|
||||
func (k *dsaPrivateKey) SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*Signature, error) {
|
||||
if algorithm != "" && algorithm != k.PublicKey().Type() {
|
||||
return nil, fmt.Errorf("ssh: unsupported signature algorithm %s", algorithm)
|
||||
@@ -961,13 +1048,16 @@ func (s *wrappedSigner) Sign(rand io.Reader, data []byte) (*Signature, error) {
|
||||
return s.SignWithAlgorithm(rand, data, s.pubKey.Type())
|
||||
}
|
||||
|
||||
func (s *wrappedSigner) Algorithms() []string {
|
||||
return algorithmsForKeyFormat(s.pubKey.Type())
|
||||
}
|
||||
|
||||
func (s *wrappedSigner) SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*Signature, error) {
|
||||
if algorithm == "" {
|
||||
algorithm = s.pubKey.Type()
|
||||
}
|
||||
|
||||
supportedAlgos := algorithmsForKeyFormat(s.pubKey.Type())
|
||||
if !contains(supportedAlgos, algorithm) {
|
||||
if !contains(s.Algorithms(), algorithm) {
|
||||
return nil, fmt.Errorf("ssh: unsupported signature algorithm %q for key format %q", algorithm, s.pubKey.Type())
|
||||
}
|
||||
|
||||
@@ -1142,16 +1232,27 @@ func ParseRawPrivateKeyWithPassphrase(pemBytes, passphrase []byte) (interface{},
|
||||
return nil, fmt.Errorf("ssh: cannot decode encrypted private keys: %v", err)
|
||||
}
|
||||
|
||||
var result interface{}
|
||||
|
||||
switch block.Type {
|
||||
case "RSA PRIVATE KEY":
|
||||
return x509.ParsePKCS1PrivateKey(buf)
|
||||
result, err = x509.ParsePKCS1PrivateKey(buf)
|
||||
case "EC PRIVATE KEY":
|
||||
return x509.ParseECPrivateKey(buf)
|
||||
result, err = x509.ParseECPrivateKey(buf)
|
||||
case "DSA PRIVATE KEY":
|
||||
return ParseDSAPrivateKey(buf)
|
||||
result, err = ParseDSAPrivateKey(buf)
|
||||
default:
|
||||
return nil, fmt.Errorf("ssh: unsupported key type %q", block.Type)
|
||||
err = fmt.Errorf("ssh: unsupported key type %q", block.Type)
|
||||
}
|
||||
// Because of deficiencies in the format, DecryptPEMBlock does not always
|
||||
// detect an incorrect password. In these cases decrypted DER bytes is
|
||||
// random noise. If the parsing of the key returns an asn1.StructuralError
|
||||
// we return x509.IncorrectPasswordError.
|
||||
if _, ok := err.(asn1.StructuralError); ok {
|
||||
return nil, x509.IncorrectPasswordError
|
||||
}
|
||||
|
||||
return result, err
|
||||
}
|
||||
|
||||
// ParseDSAPrivateKey returns a DSA private key from its ASN.1 DER encoding, as
|
||||
@@ -1241,28 +1342,106 @@ func passphraseProtectedOpenSSHKey(passphrase []byte) openSSHDecryptFunc {
|
||||
}
|
||||
}
|
||||
|
||||
func unencryptedOpenSSHMarshaler(privKeyBlock []byte) ([]byte, string, string, string, error) {
|
||||
key := generateOpenSSHPadding(privKeyBlock, 8)
|
||||
return key, "none", "none", "", nil
|
||||
}
|
||||
|
||||
func passphraseProtectedOpenSSHMarshaler(passphrase []byte) openSSHEncryptFunc {
|
||||
return func(privKeyBlock []byte) ([]byte, string, string, string, error) {
|
||||
salt := make([]byte, 16)
|
||||
if _, err := rand.Read(salt); err != nil {
|
||||
return nil, "", "", "", err
|
||||
}
|
||||
|
||||
opts := struct {
|
||||
Salt []byte
|
||||
Rounds uint32
|
||||
}{salt, 16}
|
||||
|
||||
// Derive key to encrypt the private key block.
|
||||
k, err := bcrypt_pbkdf.Key(passphrase, salt, int(opts.Rounds), 32+aes.BlockSize)
|
||||
if err != nil {
|
||||
return nil, "", "", "", err
|
||||
}
|
||||
|
||||
// Add padding matching the block size of AES.
|
||||
keyBlock := generateOpenSSHPadding(privKeyBlock, aes.BlockSize)
|
||||
|
||||
// Encrypt the private key using the derived secret.
|
||||
|
||||
dst := make([]byte, len(keyBlock))
|
||||
key, iv := k[:32], k[32:]
|
||||
block, err := aes.NewCipher(key)
|
||||
if err != nil {
|
||||
return nil, "", "", "", err
|
||||
}
|
||||
|
||||
stream := cipher.NewCTR(block, iv)
|
||||
stream.XORKeyStream(dst, keyBlock)
|
||||
|
||||
return dst, "aes256-ctr", "bcrypt", string(Marshal(opts)), nil
|
||||
}
|
||||
}
|
||||
|
||||
const privateKeyAuthMagic = "openssh-key-v1\x00"
|
||||
|
||||
type openSSHDecryptFunc func(CipherName, KdfName, KdfOpts string, PrivKeyBlock []byte) ([]byte, error)
|
||||
type openSSHEncryptFunc func(PrivKeyBlock []byte) (ProtectedKeyBlock []byte, cipherName, kdfName, kdfOptions string, err error)
|
||||
|
||||
type openSSHEncryptedPrivateKey struct {
|
||||
CipherName string
|
||||
KdfName string
|
||||
KdfOpts string
|
||||
NumKeys uint32
|
||||
PubKey []byte
|
||||
PrivKeyBlock []byte
|
||||
}
|
||||
|
||||
type openSSHPrivateKey struct {
|
||||
Check1 uint32
|
||||
Check2 uint32
|
||||
Keytype string
|
||||
Rest []byte `ssh:"rest"`
|
||||
}
|
||||
|
||||
type openSSHRSAPrivateKey struct {
|
||||
N *big.Int
|
||||
E *big.Int
|
||||
D *big.Int
|
||||
Iqmp *big.Int
|
||||
P *big.Int
|
||||
Q *big.Int
|
||||
Comment string
|
||||
Pad []byte `ssh:"rest"`
|
||||
}
|
||||
|
||||
type openSSHEd25519PrivateKey struct {
|
||||
Pub []byte
|
||||
Priv []byte
|
||||
Comment string
|
||||
Pad []byte `ssh:"rest"`
|
||||
}
|
||||
|
||||
type openSSHECDSAPrivateKey struct {
|
||||
Curve string
|
||||
Pub []byte
|
||||
D *big.Int
|
||||
Comment string
|
||||
Pad []byte `ssh:"rest"`
|
||||
}
|
||||
|
||||
// parseOpenSSHPrivateKey parses an OpenSSH private key, using the decrypt
|
||||
// function to unwrap the encrypted portion. unencryptedOpenSSHKey can be used
|
||||
// as the decrypt function to parse an unencrypted private key. See
|
||||
// https://github.com/openssh/openssh-portable/blob/master/PROTOCOL.key.
|
||||
func parseOpenSSHPrivateKey(key []byte, decrypt openSSHDecryptFunc) (crypto.PrivateKey, error) {
|
||||
const magic = "openssh-key-v1\x00"
|
||||
if len(key) < len(magic) || string(key[:len(magic)]) != magic {
|
||||
if len(key) < len(privateKeyAuthMagic) || string(key[:len(privateKeyAuthMagic)]) != privateKeyAuthMagic {
|
||||
return nil, errors.New("ssh: invalid openssh private key format")
|
||||
}
|
||||
remaining := key[len(magic):]
|
||||
|
||||
var w struct {
|
||||
CipherName string
|
||||
KdfName string
|
||||
KdfOpts string
|
||||
NumKeys uint32
|
||||
PubKey []byte
|
||||
PrivKeyBlock []byte
|
||||
}
|
||||
remaining := key[len(privateKeyAuthMagic):]
|
||||
|
||||
var w openSSHEncryptedPrivateKey
|
||||
if err := Unmarshal(remaining, &w); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -1284,13 +1463,7 @@ func parseOpenSSHPrivateKey(key []byte, decrypt openSSHDecryptFunc) (crypto.Priv
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pk1 := struct {
|
||||
Check1 uint32
|
||||
Check2 uint32
|
||||
Keytype string
|
||||
Rest []byte `ssh:"rest"`
|
||||
}{}
|
||||
|
||||
var pk1 openSSHPrivateKey
|
||||
if err := Unmarshal(privKeyBlock, &pk1); err != nil || pk1.Check1 != pk1.Check2 {
|
||||
if w.CipherName != "none" {
|
||||
return nil, x509.IncorrectPasswordError
|
||||
@@ -1300,18 +1473,7 @@ func parseOpenSSHPrivateKey(key []byte, decrypt openSSHDecryptFunc) (crypto.Priv
|
||||
|
||||
switch pk1.Keytype {
|
||||
case KeyAlgoRSA:
|
||||
// https://github.com/openssh/openssh-portable/blob/master/sshkey.c#L2760-L2773
|
||||
key := struct {
|
||||
N *big.Int
|
||||
E *big.Int
|
||||
D *big.Int
|
||||
Iqmp *big.Int
|
||||
P *big.Int
|
||||
Q *big.Int
|
||||
Comment string
|
||||
Pad []byte `ssh:"rest"`
|
||||
}{}
|
||||
|
||||
var key openSSHRSAPrivateKey
|
||||
if err := Unmarshal(pk1.Rest, &key); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -1337,13 +1499,7 @@ func parseOpenSSHPrivateKey(key []byte, decrypt openSSHDecryptFunc) (crypto.Priv
|
||||
|
||||
return pk, nil
|
||||
case KeyAlgoED25519:
|
||||
key := struct {
|
||||
Pub []byte
|
||||
Priv []byte
|
||||
Comment string
|
||||
Pad []byte `ssh:"rest"`
|
||||
}{}
|
||||
|
||||
var key openSSHEd25519PrivateKey
|
||||
if err := Unmarshal(pk1.Rest, &key); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -1360,14 +1516,7 @@ func parseOpenSSHPrivateKey(key []byte, decrypt openSSHDecryptFunc) (crypto.Priv
|
||||
copy(pk, key.Priv)
|
||||
return &pk, nil
|
||||
case KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521:
|
||||
key := struct {
|
||||
Curve string
|
||||
Pub []byte
|
||||
D *big.Int
|
||||
Comment string
|
||||
Pad []byte `ssh:"rest"`
|
||||
}{}
|
||||
|
||||
var key openSSHECDSAPrivateKey
|
||||
if err := Unmarshal(pk1.Rest, &key); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -1415,6 +1564,131 @@ func parseOpenSSHPrivateKey(key []byte, decrypt openSSHDecryptFunc) (crypto.Priv
|
||||
}
|
||||
}
|
||||
|
||||
func marshalOpenSSHPrivateKey(key crypto.PrivateKey, comment string, encrypt openSSHEncryptFunc) (*pem.Block, error) {
|
||||
var w openSSHEncryptedPrivateKey
|
||||
var pk1 openSSHPrivateKey
|
||||
|
||||
// Random check bytes.
|
||||
var check uint32
|
||||
if err := binary.Read(rand.Reader, binary.BigEndian, &check); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pk1.Check1 = check
|
||||
pk1.Check2 = check
|
||||
w.NumKeys = 1
|
||||
|
||||
// Use a []byte directly on ed25519 keys.
|
||||
if k, ok := key.(*ed25519.PrivateKey); ok {
|
||||
key = *k
|
||||
}
|
||||
|
||||
switch k := key.(type) {
|
||||
case *rsa.PrivateKey:
|
||||
E := new(big.Int).SetInt64(int64(k.PublicKey.E))
|
||||
// Marshal public key:
|
||||
// E and N are in reversed order in the public and private key.
|
||||
pubKey := struct {
|
||||
KeyType string
|
||||
E *big.Int
|
||||
N *big.Int
|
||||
}{
|
||||
KeyAlgoRSA,
|
||||
E, k.PublicKey.N,
|
||||
}
|
||||
w.PubKey = Marshal(pubKey)
|
||||
|
||||
// Marshal private key.
|
||||
key := openSSHRSAPrivateKey{
|
||||
N: k.PublicKey.N,
|
||||
E: E,
|
||||
D: k.D,
|
||||
Iqmp: k.Precomputed.Qinv,
|
||||
P: k.Primes[0],
|
||||
Q: k.Primes[1],
|
||||
Comment: comment,
|
||||
}
|
||||
pk1.Keytype = KeyAlgoRSA
|
||||
pk1.Rest = Marshal(key)
|
||||
case ed25519.PrivateKey:
|
||||
pub := make([]byte, ed25519.PublicKeySize)
|
||||
priv := make([]byte, ed25519.PrivateKeySize)
|
||||
copy(pub, k[32:])
|
||||
copy(priv, k)
|
||||
|
||||
// Marshal public key.
|
||||
pubKey := struct {
|
||||
KeyType string
|
||||
Pub []byte
|
||||
}{
|
||||
KeyAlgoED25519, pub,
|
||||
}
|
||||
w.PubKey = Marshal(pubKey)
|
||||
|
||||
// Marshal private key.
|
||||
key := openSSHEd25519PrivateKey{
|
||||
Pub: pub,
|
||||
Priv: priv,
|
||||
Comment: comment,
|
||||
}
|
||||
pk1.Keytype = KeyAlgoED25519
|
||||
pk1.Rest = Marshal(key)
|
||||
case *ecdsa.PrivateKey:
|
||||
var curve, keyType string
|
||||
switch name := k.Curve.Params().Name; name {
|
||||
case "P-256":
|
||||
curve = "nistp256"
|
||||
keyType = KeyAlgoECDSA256
|
||||
case "P-384":
|
||||
curve = "nistp384"
|
||||
keyType = KeyAlgoECDSA384
|
||||
case "P-521":
|
||||
curve = "nistp521"
|
||||
keyType = KeyAlgoECDSA521
|
||||
default:
|
||||
return nil, errors.New("ssh: unhandled elliptic curve " + name)
|
||||
}
|
||||
|
||||
pub := elliptic.Marshal(k.Curve, k.PublicKey.X, k.PublicKey.Y)
|
||||
|
||||
// Marshal public key.
|
||||
pubKey := struct {
|
||||
KeyType string
|
||||
Curve string
|
||||
Pub []byte
|
||||
}{
|
||||
keyType, curve, pub,
|
||||
}
|
||||
w.PubKey = Marshal(pubKey)
|
||||
|
||||
// Marshal private key.
|
||||
key := openSSHECDSAPrivateKey{
|
||||
Curve: curve,
|
||||
Pub: pub,
|
||||
D: k.D,
|
||||
Comment: comment,
|
||||
}
|
||||
pk1.Keytype = keyType
|
||||
pk1.Rest = Marshal(key)
|
||||
default:
|
||||
return nil, fmt.Errorf("ssh: unsupported key type %T", k)
|
||||
}
|
||||
|
||||
var err error
|
||||
// Add padding and encrypt the key if necessary.
|
||||
w.PrivKeyBlock, w.CipherName, w.KdfName, w.KdfOpts, err = encrypt(Marshal(pk1))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
b := Marshal(w)
|
||||
block := &pem.Block{
|
||||
Type: "OPENSSH PRIVATE KEY",
|
||||
Bytes: append([]byte(privateKeyAuthMagic), b...),
|
||||
}
|
||||
return block, nil
|
||||
}
|
||||
|
||||
func checkOpenSSHKeyPadding(pad []byte) error {
|
||||
for i, b := range pad {
|
||||
if int(b) != i+1 {
|
||||
@@ -1424,6 +1698,13 @@ func checkOpenSSHKeyPadding(pad []byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func generateOpenSSHPadding(block []byte, blockSize int) []byte {
|
||||
for i, l := 0, len(block); (l+i)%blockSize != 0; i++ {
|
||||
block = append(block, byte(i+1))
|
||||
}
|
||||
return block
|
||||
}
|
||||
|
||||
// FingerprintLegacyMD5 returns the user presentation of the key's
|
||||
// fingerprint as described by RFC 4716 section 4.
|
||||
func FingerprintLegacyMD5(pubKey PublicKey) string {
|
||||
|
||||
14
vendor/golang.org/x/crypto/ssh/messages.go
generated
vendored
14
vendor/golang.org/x/crypto/ssh/messages.go
generated
vendored
@@ -349,6 +349,20 @@ type userAuthGSSAPIError struct {
|
||||
LanguageTag string
|
||||
}
|
||||
|
||||
// Transport layer OpenSSH extension. See [PROTOCOL], section 1.9
|
||||
const msgPing = 192
|
||||
|
||||
type pingMsg struct {
|
||||
Data string `sshtype:"192"`
|
||||
}
|
||||
|
||||
// Transport layer OpenSSH extension. See [PROTOCOL], section 1.9
|
||||
const msgPong = 193
|
||||
|
||||
type pongMsg struct {
|
||||
Data string `sshtype:"193"`
|
||||
}
|
||||
|
||||
// typeTags returns the possible type bytes for the given reflect.Type, which
|
||||
// should be a struct. The possible values are separated by a '|' character.
|
||||
func typeTags(structType reflect.Type) (tags []byte) {
|
||||
|
||||
6
vendor/golang.org/x/crypto/ssh/mux.go
generated
vendored
6
vendor/golang.org/x/crypto/ssh/mux.go
generated
vendored
@@ -231,6 +231,12 @@ func (m *mux) onePacket() error {
|
||||
return m.handleChannelOpen(packet)
|
||||
case msgGlobalRequest, msgRequestSuccess, msgRequestFailure:
|
||||
return m.handleGlobalPacket(packet)
|
||||
case msgPing:
|
||||
var msg pingMsg
|
||||
if err := Unmarshal(packet, &msg); err != nil {
|
||||
return fmt.Errorf("failed to unmarshal ping@openssh.com message: %w", err)
|
||||
}
|
||||
return m.sendMessage(pongMsg(msg))
|
||||
}
|
||||
|
||||
// assume a channel packet.
|
||||
|
||||
208
vendor/golang.org/x/crypto/ssh/server.go
generated
vendored
208
vendor/golang.org/x/crypto/ssh/server.go
generated
vendored
@@ -64,6 +64,13 @@ type ServerConfig struct {
|
||||
// Config contains configuration shared between client and server.
|
||||
Config
|
||||
|
||||
// PublicKeyAuthAlgorithms specifies the supported client public key
|
||||
// authentication algorithms. Note that this should not include certificate
|
||||
// types since those use the underlying algorithm. This list is sent to the
|
||||
// client if it supports the server-sig-algs extension. Order is irrelevant.
|
||||
// If unspecified then a default set of algorithms is used.
|
||||
PublicKeyAuthAlgorithms []string
|
||||
|
||||
hostKeys []Signer
|
||||
|
||||
// NoClientAuth is true if clients are allowed to connect without
|
||||
@@ -201,9 +208,20 @@ func NewServerConn(c net.Conn, config *ServerConfig) (*ServerConn, <-chan NewCha
|
||||
if fullConf.MaxAuthTries == 0 {
|
||||
fullConf.MaxAuthTries = 6
|
||||
}
|
||||
if len(fullConf.PublicKeyAuthAlgorithms) == 0 {
|
||||
fullConf.PublicKeyAuthAlgorithms = supportedPubKeyAuthAlgos
|
||||
} else {
|
||||
for _, algo := range fullConf.PublicKeyAuthAlgorithms {
|
||||
if !contains(supportedPubKeyAuthAlgos, algo) {
|
||||
c.Close()
|
||||
return nil, nil, nil, fmt.Errorf("ssh: unsupported public key authentication algorithm %s", algo)
|
||||
}
|
||||
}
|
||||
}
|
||||
// Check if the config contains any unsupported key exchanges
|
||||
for _, kex := range fullConf.KeyExchanges {
|
||||
if _, ok := serverForbiddenKexAlgos[kex]; ok {
|
||||
c.Close()
|
||||
return nil, nil, nil, fmt.Errorf("ssh: unsupported key exchange %s for server", kex)
|
||||
}
|
||||
}
|
||||
@@ -321,7 +339,7 @@ func checkSourceAddress(addr net.Addr, sourceAddrs string) error {
|
||||
return fmt.Errorf("ssh: remote address %v is not allowed because of source-address restriction", addr)
|
||||
}
|
||||
|
||||
func gssExchangeToken(gssapiConfig *GSSAPIWithMICConfig, firstToken []byte, s *connection,
|
||||
func gssExchangeToken(gssapiConfig *GSSAPIWithMICConfig, token []byte, s *connection,
|
||||
sessionID []byte, userAuthReq userAuthRequestMsg) (authErr error, perms *Permissions, err error) {
|
||||
gssAPIServer := gssapiConfig.Server
|
||||
defer gssAPIServer.DeleteSecContext()
|
||||
@@ -331,7 +349,7 @@ func gssExchangeToken(gssapiConfig *GSSAPIWithMICConfig, firstToken []byte, s *c
|
||||
outToken []byte
|
||||
needContinue bool
|
||||
)
|
||||
outToken, srcName, needContinue, err = gssAPIServer.AcceptSecContext(firstToken)
|
||||
outToken, srcName, needContinue, err = gssAPIServer.AcceptSecContext(token)
|
||||
if err != nil {
|
||||
return err, nil, nil
|
||||
}
|
||||
@@ -353,6 +371,7 @@ func gssExchangeToken(gssapiConfig *GSSAPIWithMICConfig, firstToken []byte, s *c
|
||||
if err := Unmarshal(packet, userAuthGSSAPITokenReq); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
token = userAuthGSSAPITokenReq.Token
|
||||
}
|
||||
packet, err := s.transport.readPacket()
|
||||
if err != nil {
|
||||
@@ -407,6 +426,35 @@ func (l ServerAuthError) Error() string {
|
||||
return "[" + strings.Join(errs, ", ") + "]"
|
||||
}
|
||||
|
||||
// ServerAuthCallbacks defines server-side authentication callbacks.
|
||||
type ServerAuthCallbacks struct {
|
||||
// PasswordCallback behaves like [ServerConfig.PasswordCallback].
|
||||
PasswordCallback func(conn ConnMetadata, password []byte) (*Permissions, error)
|
||||
|
||||
// PublicKeyCallback behaves like [ServerConfig.PublicKeyCallback].
|
||||
PublicKeyCallback func(conn ConnMetadata, key PublicKey) (*Permissions, error)
|
||||
|
||||
// KeyboardInteractiveCallback behaves like [ServerConfig.KeyboardInteractiveCallback].
|
||||
KeyboardInteractiveCallback func(conn ConnMetadata, client KeyboardInteractiveChallenge) (*Permissions, error)
|
||||
|
||||
// GSSAPIWithMICConfig behaves like [ServerConfig.GSSAPIWithMICConfig].
|
||||
GSSAPIWithMICConfig *GSSAPIWithMICConfig
|
||||
}
|
||||
|
||||
// PartialSuccessError can be returned by any of the [ServerConfig]
|
||||
// authentication callbacks to indicate to the client that authentication has
|
||||
// partially succeeded, but further steps are required.
|
||||
type PartialSuccessError struct {
|
||||
// Next defines the authentication callbacks to apply to further steps. The
|
||||
// available methods communicated to the client are based on the non-nil
|
||||
// ServerAuthCallbacks fields.
|
||||
Next ServerAuthCallbacks
|
||||
}
|
||||
|
||||
func (p *PartialSuccessError) Error() string {
|
||||
return "ssh: authenticated with partial success"
|
||||
}
|
||||
|
||||
// ErrNoAuth is the error value returned if no
|
||||
// authentication method has been passed yet. This happens as a normal
|
||||
// part of the authentication loop, since the client first tries
|
||||
@@ -420,8 +468,18 @@ func (s *connection) serverAuthenticate(config *ServerConfig) (*Permissions, err
|
||||
var perms *Permissions
|
||||
|
||||
authFailures := 0
|
||||
noneAuthCount := 0
|
||||
var authErrs []error
|
||||
var displayedBanner bool
|
||||
partialSuccessReturned := false
|
||||
// Set the initial authentication callbacks from the config. They can be
|
||||
// changed if a PartialSuccessError is returned.
|
||||
authConfig := ServerAuthCallbacks{
|
||||
PasswordCallback: config.PasswordCallback,
|
||||
PublicKeyCallback: config.PublicKeyCallback,
|
||||
KeyboardInteractiveCallback: config.KeyboardInteractiveCallback,
|
||||
GSSAPIWithMICConfig: config.GSSAPIWithMICConfig,
|
||||
}
|
||||
|
||||
userAuthLoop:
|
||||
for {
|
||||
@@ -452,6 +510,11 @@ userAuthLoop:
|
||||
return nil, errors.New("ssh: client attempted to negotiate for unknown service: " + userAuthReq.Service)
|
||||
}
|
||||
|
||||
if s.user != userAuthReq.User && partialSuccessReturned {
|
||||
return nil, fmt.Errorf("ssh: client changed the user after a partial success authentication, previous user %q, current user %q",
|
||||
s.user, userAuthReq.User)
|
||||
}
|
||||
|
||||
s.user = userAuthReq.User
|
||||
|
||||
if !displayedBanner && config.BannerCallback != nil {
|
||||
@@ -472,20 +535,18 @@ userAuthLoop:
|
||||
|
||||
switch userAuthReq.Method {
|
||||
case "none":
|
||||
if config.NoClientAuth {
|
||||
noneAuthCount++
|
||||
// We don't allow none authentication after a partial success
|
||||
// response.
|
||||
if config.NoClientAuth && !partialSuccessReturned {
|
||||
if config.NoClientAuthCallback != nil {
|
||||
perms, authErr = config.NoClientAuthCallback(s)
|
||||
} else {
|
||||
authErr = nil
|
||||
}
|
||||
}
|
||||
|
||||
// allow initial attempt of 'none' without penalty
|
||||
if authFailures == 0 {
|
||||
authFailures--
|
||||
}
|
||||
case "password":
|
||||
if config.PasswordCallback == nil {
|
||||
if authConfig.PasswordCallback == nil {
|
||||
authErr = errors.New("ssh: password auth not configured")
|
||||
break
|
||||
}
|
||||
@@ -499,17 +560,17 @@ userAuthLoop:
|
||||
return nil, parseError(msgUserAuthRequest)
|
||||
}
|
||||
|
||||
perms, authErr = config.PasswordCallback(s, password)
|
||||
perms, authErr = authConfig.PasswordCallback(s, password)
|
||||
case "keyboard-interactive":
|
||||
if config.KeyboardInteractiveCallback == nil {
|
||||
if authConfig.KeyboardInteractiveCallback == nil {
|
||||
authErr = errors.New("ssh: keyboard-interactive auth not configured")
|
||||
break
|
||||
}
|
||||
|
||||
prompter := &sshClientKeyboardInteractive{s}
|
||||
perms, authErr = config.KeyboardInteractiveCallback(s, prompter.Challenge)
|
||||
perms, authErr = authConfig.KeyboardInteractiveCallback(s, prompter.Challenge)
|
||||
case "publickey":
|
||||
if config.PublicKeyCallback == nil {
|
||||
if authConfig.PublicKeyCallback == nil {
|
||||
authErr = errors.New("ssh: publickey auth not configured")
|
||||
break
|
||||
}
|
||||
@@ -524,7 +585,7 @@ userAuthLoop:
|
||||
return nil, parseError(msgUserAuthRequest)
|
||||
}
|
||||
algo := string(algoBytes)
|
||||
if !contains(supportedPubKeyAuthAlgos, underlyingAlgo(algo)) {
|
||||
if !contains(config.PublicKeyAuthAlgorithms, underlyingAlgo(algo)) {
|
||||
authErr = fmt.Errorf("ssh: algorithm %q not accepted", algo)
|
||||
break
|
||||
}
|
||||
@@ -543,11 +604,18 @@ userAuthLoop:
|
||||
if !ok {
|
||||
candidate.user = s.user
|
||||
candidate.pubKeyData = pubKeyData
|
||||
candidate.perms, candidate.result = config.PublicKeyCallback(s, pubKey)
|
||||
if candidate.result == nil && candidate.perms != nil && candidate.perms.CriticalOptions != nil && candidate.perms.CriticalOptions[sourceAddressCriticalOption] != "" {
|
||||
candidate.result = checkSourceAddress(
|
||||
candidate.perms, candidate.result = authConfig.PublicKeyCallback(s, pubKey)
|
||||
_, isPartialSuccessError := candidate.result.(*PartialSuccessError)
|
||||
|
||||
if (candidate.result == nil || isPartialSuccessError) &&
|
||||
candidate.perms != nil &&
|
||||
candidate.perms.CriticalOptions != nil &&
|
||||
candidate.perms.CriticalOptions[sourceAddressCriticalOption] != "" {
|
||||
if err := checkSourceAddress(
|
||||
s.RemoteAddr(),
|
||||
candidate.perms.CriticalOptions[sourceAddressCriticalOption])
|
||||
candidate.perms.CriticalOptions[sourceAddressCriticalOption]); err != nil {
|
||||
candidate.result = err
|
||||
}
|
||||
}
|
||||
cache.add(candidate)
|
||||
}
|
||||
@@ -559,8 +627,8 @@ userAuthLoop:
|
||||
if len(payload) > 0 {
|
||||
return nil, parseError(msgUserAuthRequest)
|
||||
}
|
||||
|
||||
if candidate.result == nil {
|
||||
_, isPartialSuccessError := candidate.result.(*PartialSuccessError)
|
||||
if candidate.result == nil || isPartialSuccessError {
|
||||
okMsg := userAuthPubKeyOkMsg{
|
||||
Algo: algo,
|
||||
PubKey: pubKeyData,
|
||||
@@ -576,13 +644,22 @@ userAuthLoop:
|
||||
if !ok || len(payload) > 0 {
|
||||
return nil, parseError(msgUserAuthRequest)
|
||||
}
|
||||
|
||||
// Ensure the declared public key algo is compatible with the
|
||||
// decoded one. This check will ensure we don't accept e.g.
|
||||
// ssh-rsa-cert-v01@openssh.com algorithm with ssh-rsa public
|
||||
// key type. The algorithm and public key type must be
|
||||
// consistent: both must be certificate algorithms, or neither.
|
||||
if !contains(algorithmsForKeyFormat(pubKey.Type()), algo) {
|
||||
authErr = fmt.Errorf("ssh: public key type %q not compatible with selected algorithm %q",
|
||||
pubKey.Type(), algo)
|
||||
break
|
||||
}
|
||||
// Ensure the public key algo and signature algo
|
||||
// are supported. Compare the private key
|
||||
// algorithm name that corresponds to algo with
|
||||
// sig.Format. This is usually the same, but
|
||||
// for certs, the names differ.
|
||||
if !contains(supportedPubKeyAuthAlgos, sig.Format) {
|
||||
if !contains(config.PublicKeyAuthAlgorithms, sig.Format) {
|
||||
authErr = fmt.Errorf("ssh: algorithm %q not accepted", sig.Format)
|
||||
break
|
||||
}
|
||||
@@ -601,11 +678,11 @@ userAuthLoop:
|
||||
perms = candidate.perms
|
||||
}
|
||||
case "gssapi-with-mic":
|
||||
if config.GSSAPIWithMICConfig == nil {
|
||||
if authConfig.GSSAPIWithMICConfig == nil {
|
||||
authErr = errors.New("ssh: gssapi-with-mic auth not configured")
|
||||
break
|
||||
}
|
||||
gssapiConfig := config.GSSAPIWithMICConfig
|
||||
gssapiConfig := authConfig.GSSAPIWithMICConfig
|
||||
userAuthRequestGSSAPI, err := parseGSSAPIPayload(userAuthReq.Payload)
|
||||
if err != nil {
|
||||
return nil, parseError(msgUserAuthRequest)
|
||||
@@ -661,49 +738,70 @@ userAuthLoop:
|
||||
break userAuthLoop
|
||||
}
|
||||
|
||||
authFailures++
|
||||
if config.MaxAuthTries > 0 && authFailures >= config.MaxAuthTries {
|
||||
// If we have hit the max attempts, don't bother sending the
|
||||
// final SSH_MSG_USERAUTH_FAILURE message, since there are
|
||||
// no more authentication methods which can be attempted,
|
||||
// and this message may cause the client to re-attempt
|
||||
// authentication while we send the disconnect message.
|
||||
// Continue, and trigger the disconnect at the start of
|
||||
// the loop.
|
||||
//
|
||||
// The SSH specification is somewhat confusing about this,
|
||||
// RFC 4252 Section 5.1 requires each authentication failure
|
||||
// be responded to with a respective SSH_MSG_USERAUTH_FAILURE
|
||||
// message, but Section 4 says the server should disconnect
|
||||
// after some number of attempts, but it isn't explicit which
|
||||
// message should take precedence (i.e. should there be a failure
|
||||
// message than a disconnect message, or if we are going to
|
||||
// disconnect, should we only send that message.)
|
||||
//
|
||||
// Either way, OpenSSH disconnects immediately after the last
|
||||
// failed authnetication attempt, and given they are typically
|
||||
// considered the golden implementation it seems reasonable
|
||||
// to match that behavior.
|
||||
continue
|
||||
var failureMsg userAuthFailureMsg
|
||||
|
||||
if partialSuccess, ok := authErr.(*PartialSuccessError); ok {
|
||||
// After a partial success error we don't allow changing the user
|
||||
// name and execute the NoClientAuthCallback.
|
||||
partialSuccessReturned = true
|
||||
|
||||
// In case a partial success is returned, the server may send
|
||||
// a new set of authentication methods.
|
||||
authConfig = partialSuccess.Next
|
||||
|
||||
// Reset pubkey cache, as the new PublicKeyCallback might
|
||||
// accept a different set of public keys.
|
||||
cache = pubKeyCache{}
|
||||
|
||||
// Send back a partial success message to the user.
|
||||
failureMsg.PartialSuccess = true
|
||||
} else {
|
||||
// Allow initial attempt of 'none' without penalty.
|
||||
if authFailures > 0 || userAuthReq.Method != "none" || noneAuthCount != 1 {
|
||||
authFailures++
|
||||
}
|
||||
if config.MaxAuthTries > 0 && authFailures >= config.MaxAuthTries {
|
||||
// If we have hit the max attempts, don't bother sending the
|
||||
// final SSH_MSG_USERAUTH_FAILURE message, since there are
|
||||
// no more authentication methods which can be attempted,
|
||||
// and this message may cause the client to re-attempt
|
||||
// authentication while we send the disconnect message.
|
||||
// Continue, and trigger the disconnect at the start of
|
||||
// the loop.
|
||||
//
|
||||
// The SSH specification is somewhat confusing about this,
|
||||
// RFC 4252 Section 5.1 requires each authentication failure
|
||||
// be responded to with a respective SSH_MSG_USERAUTH_FAILURE
|
||||
// message, but Section 4 says the server should disconnect
|
||||
// after some number of attempts, but it isn't explicit which
|
||||
// message should take precedence (i.e. should there be a failure
|
||||
// message than a disconnect message, or if we are going to
|
||||
// disconnect, should we only send that message.)
|
||||
//
|
||||
// Either way, OpenSSH disconnects immediately after the last
|
||||
// failed authentication attempt, and given they are typically
|
||||
// considered the golden implementation it seems reasonable
|
||||
// to match that behavior.
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
var failureMsg userAuthFailureMsg
|
||||
if config.PasswordCallback != nil {
|
||||
if authConfig.PasswordCallback != nil {
|
||||
failureMsg.Methods = append(failureMsg.Methods, "password")
|
||||
}
|
||||
if config.PublicKeyCallback != nil {
|
||||
if authConfig.PublicKeyCallback != nil {
|
||||
failureMsg.Methods = append(failureMsg.Methods, "publickey")
|
||||
}
|
||||
if config.KeyboardInteractiveCallback != nil {
|
||||
if authConfig.KeyboardInteractiveCallback != nil {
|
||||
failureMsg.Methods = append(failureMsg.Methods, "keyboard-interactive")
|
||||
}
|
||||
if config.GSSAPIWithMICConfig != nil && config.GSSAPIWithMICConfig.Server != nil &&
|
||||
config.GSSAPIWithMICConfig.AllowLogin != nil {
|
||||
if authConfig.GSSAPIWithMICConfig != nil && authConfig.GSSAPIWithMICConfig.Server != nil &&
|
||||
authConfig.GSSAPIWithMICConfig.AllowLogin != nil {
|
||||
failureMsg.Methods = append(failureMsg.Methods, "gssapi-with-mic")
|
||||
}
|
||||
|
||||
if len(failureMsg.Methods) == 0 {
|
||||
return nil, errors.New("ssh: no authentication methods configured but NoClientAuth is also false")
|
||||
return nil, errors.New("ssh: no authentication methods available")
|
||||
}
|
||||
|
||||
if err := s.transport.writePacket(Marshal(&failureMsg)); err != nil {
|
||||
|
||||
35
vendor/golang.org/x/crypto/ssh/tcpip.go
generated
vendored
35
vendor/golang.org/x/crypto/ssh/tcpip.go
generated
vendored
@@ -5,6 +5,7 @@
|
||||
package ssh
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
@@ -332,6 +333,40 @@ func (l *tcpListener) Addr() net.Addr {
|
||||
return l.laddr
|
||||
}
|
||||
|
||||
// DialContext initiates a connection to the addr from the remote host.
|
||||
//
|
||||
// The provided Context must be non-nil. If the context expires before the
|
||||
// connection is complete, an error is returned. Once successfully connected,
|
||||
// any expiration of the context will not affect the connection.
|
||||
//
|
||||
// See func Dial for additional information.
|
||||
func (c *Client) DialContext(ctx context.Context, n, addr string) (net.Conn, error) {
|
||||
if err := ctx.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
type connErr struct {
|
||||
conn net.Conn
|
||||
err error
|
||||
}
|
||||
ch := make(chan connErr)
|
||||
go func() {
|
||||
conn, err := c.Dial(n, addr)
|
||||
select {
|
||||
case ch <- connErr{conn, err}:
|
||||
case <-ctx.Done():
|
||||
if conn != nil {
|
||||
conn.Close()
|
||||
}
|
||||
}
|
||||
}()
|
||||
select {
|
||||
case res := <-ch:
|
||||
return res.conn, res.err
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
}
|
||||
|
||||
// Dial initiates a connection to the addr from the remote host.
|
||||
// The resulting connection has a zero LocalAddr() and RemoteAddr().
|
||||
func (c *Client) Dial(n, addr string) (net.Conn, error) {
|
||||
|
||||
32
vendor/golang.org/x/crypto/ssh/transport.go
generated
vendored
32
vendor/golang.org/x/crypto/ssh/transport.go
generated
vendored
@@ -49,6 +49,9 @@ type transport struct {
|
||||
rand io.Reader
|
||||
isClient bool
|
||||
io.Closer
|
||||
|
||||
strictMode bool
|
||||
initialKEXDone bool
|
||||
}
|
||||
|
||||
// packetCipher represents a combination of SSH encryption/MAC
|
||||
@@ -74,6 +77,18 @@ type connectionState struct {
|
||||
pendingKeyChange chan packetCipher
|
||||
}
|
||||
|
||||
func (t *transport) setStrictMode() error {
|
||||
if t.reader.seqNum != 1 {
|
||||
return errors.New("ssh: sequence number != 1 when strict KEX mode requested")
|
||||
}
|
||||
t.strictMode = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *transport) setInitialKEXDone() {
|
||||
t.initialKEXDone = true
|
||||
}
|
||||
|
||||
// prepareKeyChange sets up key material for a keychange. The key changes in
|
||||
// both directions are triggered by reading and writing a msgNewKey packet
|
||||
// respectively.
|
||||
@@ -112,11 +127,12 @@ func (t *transport) printPacket(p []byte, write bool) {
|
||||
// Read and decrypt next packet.
|
||||
func (t *transport) readPacket() (p []byte, err error) {
|
||||
for {
|
||||
p, err = t.reader.readPacket(t.bufReader)
|
||||
p, err = t.reader.readPacket(t.bufReader, t.strictMode)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
if len(p) == 0 || (p[0] != msgIgnore && p[0] != msgDebug) {
|
||||
// in strict mode we pass through DEBUG and IGNORE packets only during the initial KEX
|
||||
if len(p) == 0 || (t.strictMode && !t.initialKEXDone) || (p[0] != msgIgnore && p[0] != msgDebug) {
|
||||
break
|
||||
}
|
||||
}
|
||||
@@ -127,7 +143,7 @@ func (t *transport) readPacket() (p []byte, err error) {
|
||||
return p, err
|
||||
}
|
||||
|
||||
func (s *connectionState) readPacket(r *bufio.Reader) ([]byte, error) {
|
||||
func (s *connectionState) readPacket(r *bufio.Reader, strictMode bool) ([]byte, error) {
|
||||
packet, err := s.packetCipher.readCipherPacket(s.seqNum, r)
|
||||
s.seqNum++
|
||||
if err == nil && len(packet) == 0 {
|
||||
@@ -140,6 +156,9 @@ func (s *connectionState) readPacket(r *bufio.Reader) ([]byte, error) {
|
||||
select {
|
||||
case cipher := <-s.pendingKeyChange:
|
||||
s.packetCipher = cipher
|
||||
if strictMode {
|
||||
s.seqNum = 0
|
||||
}
|
||||
default:
|
||||
return nil, errors.New("ssh: got bogus newkeys message")
|
||||
}
|
||||
@@ -170,10 +189,10 @@ func (t *transport) writePacket(packet []byte) error {
|
||||
if debugTransport {
|
||||
t.printPacket(packet, true)
|
||||
}
|
||||
return t.writer.writePacket(t.bufWriter, t.rand, packet)
|
||||
return t.writer.writePacket(t.bufWriter, t.rand, packet, t.strictMode)
|
||||
}
|
||||
|
||||
func (s *connectionState) writePacket(w *bufio.Writer, rand io.Reader, packet []byte) error {
|
||||
func (s *connectionState) writePacket(w *bufio.Writer, rand io.Reader, packet []byte, strictMode bool) error {
|
||||
changeKeys := len(packet) > 0 && packet[0] == msgNewKeys
|
||||
|
||||
err := s.packetCipher.writeCipherPacket(s.seqNum, w, rand, packet)
|
||||
@@ -188,6 +207,9 @@ func (s *connectionState) writePacket(w *bufio.Writer, rand io.Reader, packet []
|
||||
select {
|
||||
case cipher := <-s.pendingKeyChange:
|
||||
s.packetCipher = cipher
|
||||
if strictMode {
|
||||
s.seqNum = 0
|
||||
}
|
||||
default:
|
||||
panic("ssh: no key material for msgNewKeys")
|
||||
}
|
||||
|
||||
0
vendor/golang.org/x/mod/LICENSE → vendor/golang.org/x/exp/LICENSE
generated
vendored
0
vendor/golang.org/x/mod/LICENSE → vendor/golang.org/x/exp/LICENSE
generated
vendored
0
vendor/golang.org/x/mod/PATENTS → vendor/golang.org/x/exp/PATENTS
generated
vendored
0
vendor/golang.org/x/mod/PATENTS → vendor/golang.org/x/exp/PATENTS
generated
vendored
50
vendor/golang.org/x/exp/constraints/constraints.go
generated
vendored
Normal file
50
vendor/golang.org/x/exp/constraints/constraints.go
generated
vendored
Normal file
@@ -0,0 +1,50 @@
|
||||
// Copyright 2021 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package constraints defines a set of useful constraints to be used
|
||||
// with type parameters.
|
||||
package constraints
|
||||
|
||||
// Signed is a constraint that permits any signed integer type.
|
||||
// If future releases of Go add new predeclared signed integer types,
|
||||
// this constraint will be modified to include them.
|
||||
type Signed interface {
|
||||
~int | ~int8 | ~int16 | ~int32 | ~int64
|
||||
}
|
||||
|
||||
// Unsigned is a constraint that permits any unsigned integer type.
|
||||
// If future releases of Go add new predeclared unsigned integer types,
|
||||
// this constraint will be modified to include them.
|
||||
type Unsigned interface {
|
||||
~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr
|
||||
}
|
||||
|
||||
// Integer is a constraint that permits any integer type.
|
||||
// If future releases of Go add new predeclared integer types,
|
||||
// this constraint will be modified to include them.
|
||||
type Integer interface {
|
||||
Signed | Unsigned
|
||||
}
|
||||
|
||||
// Float is a constraint that permits any floating-point type.
|
||||
// If future releases of Go add new predeclared floating-point types,
|
||||
// this constraint will be modified to include them.
|
||||
type Float interface {
|
||||
~float32 | ~float64
|
||||
}
|
||||
|
||||
// Complex is a constraint that permits any complex numeric type.
|
||||
// If future releases of Go add new predeclared complex numeric types,
|
||||
// this constraint will be modified to include them.
|
||||
type Complex interface {
|
||||
~complex64 | ~complex128
|
||||
}
|
||||
|
||||
// Ordered is a constraint that permits any ordered type: any type
|
||||
// that supports the operators < <= >= >.
|
||||
// If future releases of Go add new ordered types,
|
||||
// this constraint will be modified to include them.
|
||||
type Ordered interface {
|
||||
Integer | Float | ~string
|
||||
}
|
||||
44
vendor/golang.org/x/exp/slices/cmp.go
generated
vendored
Normal file
44
vendor/golang.org/x/exp/slices/cmp.go
generated
vendored
Normal file
@@ -0,0 +1,44 @@
|
||||
// Copyright 2023 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package slices
|
||||
|
||||
import "golang.org/x/exp/constraints"
|
||||
|
||||
// min is a version of the predeclared function from the Go 1.21 release.
|
||||
func min[T constraints.Ordered](a, b T) T {
|
||||
if a < b || isNaN(a) {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// max is a version of the predeclared function from the Go 1.21 release.
|
||||
func max[T constraints.Ordered](a, b T) T {
|
||||
if a > b || isNaN(a) {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// cmpLess is a copy of cmp.Less from the Go 1.21 release.
|
||||
func cmpLess[T constraints.Ordered](x, y T) bool {
|
||||
return (isNaN(x) && !isNaN(y)) || x < y
|
||||
}
|
||||
|
||||
// cmpCompare is a copy of cmp.Compare from the Go 1.21 release.
|
||||
func cmpCompare[T constraints.Ordered](x, y T) int {
|
||||
xNaN := isNaN(x)
|
||||
yNaN := isNaN(y)
|
||||
if xNaN && yNaN {
|
||||
return 0
|
||||
}
|
||||
if xNaN || x < y {
|
||||
return -1
|
||||
}
|
||||
if yNaN || x > y {
|
||||
return +1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
515
vendor/golang.org/x/exp/slices/slices.go
generated
vendored
Normal file
515
vendor/golang.org/x/exp/slices/slices.go
generated
vendored
Normal file
@@ -0,0 +1,515 @@
|
||||
// Copyright 2021 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package slices defines various functions useful with slices of any type.
|
||||
package slices
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/exp/constraints"
|
||||
)
|
||||
|
||||
// Equal reports whether two slices are equal: the same length and all
|
||||
// elements equal. If the lengths are different, Equal returns false.
|
||||
// Otherwise, the elements are compared in increasing index order, and the
|
||||
// comparison stops at the first unequal pair.
|
||||
// Floating point NaNs are not considered equal.
|
||||
func Equal[S ~[]E, E comparable](s1, s2 S) bool {
|
||||
if len(s1) != len(s2) {
|
||||
return false
|
||||
}
|
||||
for i := range s1 {
|
||||
if s1[i] != s2[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// EqualFunc reports whether two slices are equal using an equality
|
||||
// function on each pair of elements. If the lengths are different,
|
||||
// EqualFunc returns false. Otherwise, the elements are compared in
|
||||
// increasing index order, and the comparison stops at the first index
|
||||
// for which eq returns false.
|
||||
func EqualFunc[S1 ~[]E1, S2 ~[]E2, E1, E2 any](s1 S1, s2 S2, eq func(E1, E2) bool) bool {
|
||||
if len(s1) != len(s2) {
|
||||
return false
|
||||
}
|
||||
for i, v1 := range s1 {
|
||||
v2 := s2[i]
|
||||
if !eq(v1, v2) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Compare compares the elements of s1 and s2, using [cmp.Compare] on each pair
|
||||
// of elements. The elements are compared sequentially, starting at index 0,
|
||||
// until one element is not equal to the other.
|
||||
// The result of comparing the first non-matching elements is returned.
|
||||
// If both slices are equal until one of them ends, the shorter slice is
|
||||
// considered less than the longer one.
|
||||
// The result is 0 if s1 == s2, -1 if s1 < s2, and +1 if s1 > s2.
|
||||
func Compare[S ~[]E, E constraints.Ordered](s1, s2 S) int {
|
||||
for i, v1 := range s1 {
|
||||
if i >= len(s2) {
|
||||
return +1
|
||||
}
|
||||
v2 := s2[i]
|
||||
if c := cmpCompare(v1, v2); c != 0 {
|
||||
return c
|
||||
}
|
||||
}
|
||||
if len(s1) < len(s2) {
|
||||
return -1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// CompareFunc is like [Compare] but uses a custom comparison function on each
|
||||
// pair of elements.
|
||||
// The result is the first non-zero result of cmp; if cmp always
|
||||
// returns 0 the result is 0 if len(s1) == len(s2), -1 if len(s1) < len(s2),
|
||||
// and +1 if len(s1) > len(s2).
|
||||
func CompareFunc[S1 ~[]E1, S2 ~[]E2, E1, E2 any](s1 S1, s2 S2, cmp func(E1, E2) int) int {
|
||||
for i, v1 := range s1 {
|
||||
if i >= len(s2) {
|
||||
return +1
|
||||
}
|
||||
v2 := s2[i]
|
||||
if c := cmp(v1, v2); c != 0 {
|
||||
return c
|
||||
}
|
||||
}
|
||||
if len(s1) < len(s2) {
|
||||
return -1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// Index returns the index of the first occurrence of v in s,
|
||||
// or -1 if not present.
|
||||
func Index[S ~[]E, E comparable](s S, v E) int {
|
||||
for i := range s {
|
||||
if v == s[i] {
|
||||
return i
|
||||
}
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
// IndexFunc returns the first index i satisfying f(s[i]),
|
||||
// or -1 if none do.
|
||||
func IndexFunc[S ~[]E, E any](s S, f func(E) bool) int {
|
||||
for i := range s {
|
||||
if f(s[i]) {
|
||||
return i
|
||||
}
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
// Contains reports whether v is present in s.
|
||||
func Contains[S ~[]E, E comparable](s S, v E) bool {
|
||||
return Index(s, v) >= 0
|
||||
}
|
||||
|
||||
// ContainsFunc reports whether at least one
|
||||
// element e of s satisfies f(e).
|
||||
func ContainsFunc[S ~[]E, E any](s S, f func(E) bool) bool {
|
||||
return IndexFunc(s, f) >= 0
|
||||
}
|
||||
|
||||
// Insert inserts the values v... into s at index i,
|
||||
// returning the modified slice.
|
||||
// The elements at s[i:] are shifted up to make room.
|
||||
// In the returned slice r, r[i] == v[0],
|
||||
// and r[i+len(v)] == value originally at r[i].
|
||||
// Insert panics if i is out of range.
|
||||
// This function is O(len(s) + len(v)).
|
||||
func Insert[S ~[]E, E any](s S, i int, v ...E) S {
|
||||
m := len(v)
|
||||
if m == 0 {
|
||||
return s
|
||||
}
|
||||
n := len(s)
|
||||
if i == n {
|
||||
return append(s, v...)
|
||||
}
|
||||
if n+m > cap(s) {
|
||||
// Use append rather than make so that we bump the size of
|
||||
// the slice up to the next storage class.
|
||||
// This is what Grow does but we don't call Grow because
|
||||
// that might copy the values twice.
|
||||
s2 := append(s[:i], make(S, n+m-i)...)
|
||||
copy(s2[i:], v)
|
||||
copy(s2[i+m:], s[i:])
|
||||
return s2
|
||||
}
|
||||
s = s[:n+m]
|
||||
|
||||
// before:
|
||||
// s: aaaaaaaabbbbccccccccdddd
|
||||
// ^ ^ ^ ^
|
||||
// i i+m n n+m
|
||||
// after:
|
||||
// s: aaaaaaaavvvvbbbbcccccccc
|
||||
// ^ ^ ^ ^
|
||||
// i i+m n n+m
|
||||
//
|
||||
// a are the values that don't move in s.
|
||||
// v are the values copied in from v.
|
||||
// b and c are the values from s that are shifted up in index.
|
||||
// d are the values that get overwritten, never to be seen again.
|
||||
|
||||
if !overlaps(v, s[i+m:]) {
|
||||
// Easy case - v does not overlap either the c or d regions.
|
||||
// (It might be in some of a or b, or elsewhere entirely.)
|
||||
// The data we copy up doesn't write to v at all, so just do it.
|
||||
|
||||
copy(s[i+m:], s[i:])
|
||||
|
||||
// Now we have
|
||||
// s: aaaaaaaabbbbbbbbcccccccc
|
||||
// ^ ^ ^ ^
|
||||
// i i+m n n+m
|
||||
// Note the b values are duplicated.
|
||||
|
||||
copy(s[i:], v)
|
||||
|
||||
// Now we have
|
||||
// s: aaaaaaaavvvvbbbbcccccccc
|
||||
// ^ ^ ^ ^
|
||||
// i i+m n n+m
|
||||
// That's the result we want.
|
||||
return s
|
||||
}
|
||||
|
||||
// The hard case - v overlaps c or d. We can't just shift up
|
||||
// the data because we'd move or clobber the values we're trying
|
||||
// to insert.
|
||||
// So instead, write v on top of d, then rotate.
|
||||
copy(s[n:], v)
|
||||
|
||||
// Now we have
|
||||
// s: aaaaaaaabbbbccccccccvvvv
|
||||
// ^ ^ ^ ^
|
||||
// i i+m n n+m
|
||||
|
||||
rotateRight(s[i:], m)
|
||||
|
||||
// Now we have
|
||||
// s: aaaaaaaavvvvbbbbcccccccc
|
||||
// ^ ^ ^ ^
|
||||
// i i+m n n+m
|
||||
// That's the result we want.
|
||||
return s
|
||||
}
|
||||
|
||||
// clearSlice sets all elements up to the length of s to the zero value of E.
|
||||
// We may use the builtin clear func instead, and remove clearSlice, when upgrading
|
||||
// to Go 1.21+.
|
||||
func clearSlice[S ~[]E, E any](s S) {
|
||||
var zero E
|
||||
for i := range s {
|
||||
s[i] = zero
|
||||
}
|
||||
}
|
||||
|
||||
// Delete removes the elements s[i:j] from s, returning the modified slice.
|
||||
// Delete panics if j > len(s) or s[i:j] is not a valid slice of s.
|
||||
// Delete is O(len(s)-i), so if many items must be deleted, it is better to
|
||||
// make a single call deleting them all together than to delete one at a time.
|
||||
// Delete zeroes the elements s[len(s)-(j-i):len(s)].
|
||||
func Delete[S ~[]E, E any](s S, i, j int) S {
|
||||
_ = s[i:j:len(s)] // bounds check
|
||||
|
||||
if i == j {
|
||||
return s
|
||||
}
|
||||
|
||||
oldlen := len(s)
|
||||
s = append(s[:i], s[j:]...)
|
||||
clearSlice(s[len(s):oldlen]) // zero/nil out the obsolete elements, for GC
|
||||
return s
|
||||
}
|
||||
|
||||
// DeleteFunc removes any elements from s for which del returns true,
|
||||
// returning the modified slice.
|
||||
// DeleteFunc zeroes the elements between the new length and the original length.
|
||||
func DeleteFunc[S ~[]E, E any](s S, del func(E) bool) S {
|
||||
i := IndexFunc(s, del)
|
||||
if i == -1 {
|
||||
return s
|
||||
}
|
||||
// Don't start copying elements until we find one to delete.
|
||||
for j := i + 1; j < len(s); j++ {
|
||||
if v := s[j]; !del(v) {
|
||||
s[i] = v
|
||||
i++
|
||||
}
|
||||
}
|
||||
clearSlice(s[i:]) // zero/nil out the obsolete elements, for GC
|
||||
return s[:i]
|
||||
}
|
||||
|
||||
// Replace replaces the elements s[i:j] by the given v, and returns the
|
||||
// modified slice. Replace panics if s[i:j] is not a valid slice of s.
|
||||
// When len(v) < (j-i), Replace zeroes the elements between the new length and the original length.
|
||||
func Replace[S ~[]E, E any](s S, i, j int, v ...E) S {
|
||||
_ = s[i:j] // verify that i:j is a valid subslice
|
||||
|
||||
if i == j {
|
||||
return Insert(s, i, v...)
|
||||
}
|
||||
if j == len(s) {
|
||||
return append(s[:i], v...)
|
||||
}
|
||||
|
||||
tot := len(s[:i]) + len(v) + len(s[j:])
|
||||
if tot > cap(s) {
|
||||
// Too big to fit, allocate and copy over.
|
||||
s2 := append(s[:i], make(S, tot-i)...) // See Insert
|
||||
copy(s2[i:], v)
|
||||
copy(s2[i+len(v):], s[j:])
|
||||
return s2
|
||||
}
|
||||
|
||||
r := s[:tot]
|
||||
|
||||
if i+len(v) <= j {
|
||||
// Easy, as v fits in the deleted portion.
|
||||
copy(r[i:], v)
|
||||
if i+len(v) != j {
|
||||
copy(r[i+len(v):], s[j:])
|
||||
}
|
||||
clearSlice(s[tot:]) // zero/nil out the obsolete elements, for GC
|
||||
return r
|
||||
}
|
||||
|
||||
// We are expanding (v is bigger than j-i).
|
||||
// The situation is something like this:
|
||||
// (example has i=4,j=8,len(s)=16,len(v)=6)
|
||||
// s: aaaaxxxxbbbbbbbbyy
|
||||
// ^ ^ ^ ^
|
||||
// i j len(s) tot
|
||||
// a: prefix of s
|
||||
// x: deleted range
|
||||
// b: more of s
|
||||
// y: area to expand into
|
||||
|
||||
if !overlaps(r[i+len(v):], v) {
|
||||
// Easy, as v is not clobbered by the first copy.
|
||||
copy(r[i+len(v):], s[j:])
|
||||
copy(r[i:], v)
|
||||
return r
|
||||
}
|
||||
|
||||
// This is a situation where we don't have a single place to which
|
||||
// we can copy v. Parts of it need to go to two different places.
|
||||
// We want to copy the prefix of v into y and the suffix into x, then
|
||||
// rotate |y| spots to the right.
|
||||
//
|
||||
// v[2:] v[:2]
|
||||
// | |
|
||||
// s: aaaavvvvbbbbbbbbvv
|
||||
// ^ ^ ^ ^
|
||||
// i j len(s) tot
|
||||
//
|
||||
// If either of those two destinations don't alias v, then we're good.
|
||||
y := len(v) - (j - i) // length of y portion
|
||||
|
||||
if !overlaps(r[i:j], v) {
|
||||
copy(r[i:j], v[y:])
|
||||
copy(r[len(s):], v[:y])
|
||||
rotateRight(r[i:], y)
|
||||
return r
|
||||
}
|
||||
if !overlaps(r[len(s):], v) {
|
||||
copy(r[len(s):], v[:y])
|
||||
copy(r[i:j], v[y:])
|
||||
rotateRight(r[i:], y)
|
||||
return r
|
||||
}
|
||||
|
||||
// Now we know that v overlaps both x and y.
|
||||
// That means that the entirety of b is *inside* v.
|
||||
// So we don't need to preserve b at all; instead we
|
||||
// can copy v first, then copy the b part of v out of
|
||||
// v to the right destination.
|
||||
k := startIdx(v, s[j:])
|
||||
copy(r[i:], v)
|
||||
copy(r[i+len(v):], r[i+k:])
|
||||
return r
|
||||
}
|
||||
|
||||
// Clone returns a copy of the slice.
|
||||
// The elements are copied using assignment, so this is a shallow clone.
|
||||
func Clone[S ~[]E, E any](s S) S {
|
||||
// Preserve nil in case it matters.
|
||||
if s == nil {
|
||||
return nil
|
||||
}
|
||||
return append(S([]E{}), s...)
|
||||
}
|
||||
|
||||
// Compact replaces consecutive runs of equal elements with a single copy.
|
||||
// This is like the uniq command found on Unix.
|
||||
// Compact modifies the contents of the slice s and returns the modified slice,
|
||||
// which may have a smaller length.
|
||||
// Compact zeroes the elements between the new length and the original length.
|
||||
func Compact[S ~[]E, E comparable](s S) S {
|
||||
if len(s) < 2 {
|
||||
return s
|
||||
}
|
||||
i := 1
|
||||
for k := 1; k < len(s); k++ {
|
||||
if s[k] != s[k-1] {
|
||||
if i != k {
|
||||
s[i] = s[k]
|
||||
}
|
||||
i++
|
||||
}
|
||||
}
|
||||
clearSlice(s[i:]) // zero/nil out the obsolete elements, for GC
|
||||
return s[:i]
|
||||
}
|
||||
|
||||
// CompactFunc is like [Compact] but uses an equality function to compare elements.
|
||||
// For runs of elements that compare equal, CompactFunc keeps the first one.
|
||||
// CompactFunc zeroes the elements between the new length and the original length.
|
||||
func CompactFunc[S ~[]E, E any](s S, eq func(E, E) bool) S {
|
||||
if len(s) < 2 {
|
||||
return s
|
||||
}
|
||||
i := 1
|
||||
for k := 1; k < len(s); k++ {
|
||||
if !eq(s[k], s[k-1]) {
|
||||
if i != k {
|
||||
s[i] = s[k]
|
||||
}
|
||||
i++
|
||||
}
|
||||
}
|
||||
clearSlice(s[i:]) // zero/nil out the obsolete elements, for GC
|
||||
return s[:i]
|
||||
}
|
||||
|
||||
// Grow increases the slice's capacity, if necessary, to guarantee space for
|
||||
// another n elements. After Grow(n), at least n elements can be appended
|
||||
// to the slice without another allocation. If n is negative or too large to
|
||||
// allocate the memory, Grow panics.
|
||||
func Grow[S ~[]E, E any](s S, n int) S {
|
||||
if n < 0 {
|
||||
panic("cannot be negative")
|
||||
}
|
||||
if n -= cap(s) - len(s); n > 0 {
|
||||
// TODO(https://go.dev/issue/53888): Make using []E instead of S
|
||||
// to workaround a compiler bug where the runtime.growslice optimization
|
||||
// does not take effect. Revert when the compiler is fixed.
|
||||
s = append([]E(s)[:cap(s)], make([]E, n)...)[:len(s)]
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// Clip removes unused capacity from the slice, returning s[:len(s):len(s)].
|
||||
func Clip[S ~[]E, E any](s S) S {
|
||||
return s[:len(s):len(s)]
|
||||
}
|
||||
|
||||
// Rotation algorithm explanation:
|
||||
//
|
||||
// rotate left by 2
|
||||
// start with
|
||||
// 0123456789
|
||||
// split up like this
|
||||
// 01 234567 89
|
||||
// swap first 2 and last 2
|
||||
// 89 234567 01
|
||||
// join first parts
|
||||
// 89234567 01
|
||||
// recursively rotate first left part by 2
|
||||
// 23456789 01
|
||||
// join at the end
|
||||
// 2345678901
|
||||
//
|
||||
// rotate left by 8
|
||||
// start with
|
||||
// 0123456789
|
||||
// split up like this
|
||||
// 01 234567 89
|
||||
// swap first 2 and last 2
|
||||
// 89 234567 01
|
||||
// join last parts
|
||||
// 89 23456701
|
||||
// recursively rotate second part left by 6
|
||||
// 89 01234567
|
||||
// join at the end
|
||||
// 8901234567
|
||||
|
||||
// TODO: There are other rotate algorithms.
|
||||
// This algorithm has the desirable property that it moves each element exactly twice.
|
||||
// The triple-reverse algorithm is simpler and more cache friendly, but takes more writes.
|
||||
// The follow-cycles algorithm can be 1-write but it is not very cache friendly.
|
||||
|
||||
// rotateLeft rotates b left by n spaces.
|
||||
// s_final[i] = s_orig[i+r], wrapping around.
|
||||
func rotateLeft[E any](s []E, r int) {
|
||||
for r != 0 && r != len(s) {
|
||||
if r*2 <= len(s) {
|
||||
swap(s[:r], s[len(s)-r:])
|
||||
s = s[:len(s)-r]
|
||||
} else {
|
||||
swap(s[:len(s)-r], s[r:])
|
||||
s, r = s[len(s)-r:], r*2-len(s)
|
||||
}
|
||||
}
|
||||
}
|
||||
func rotateRight[E any](s []E, r int) {
|
||||
rotateLeft(s, len(s)-r)
|
||||
}
|
||||
|
||||
// swap swaps the contents of x and y. x and y must be equal length and disjoint.
|
||||
func swap[E any](x, y []E) {
|
||||
for i := 0; i < len(x); i++ {
|
||||
x[i], y[i] = y[i], x[i]
|
||||
}
|
||||
}
|
||||
|
||||
// overlaps reports whether the memory ranges a[0:len(a)] and b[0:len(b)] overlap.
|
||||
func overlaps[E any](a, b []E) bool {
|
||||
if len(a) == 0 || len(b) == 0 {
|
||||
return false
|
||||
}
|
||||
elemSize := unsafe.Sizeof(a[0])
|
||||
if elemSize == 0 {
|
||||
return false
|
||||
}
|
||||
// TODO: use a runtime/unsafe facility once one becomes available. See issue 12445.
|
||||
// Also see crypto/internal/alias/alias.go:AnyOverlap
|
||||
return uintptr(unsafe.Pointer(&a[0])) <= uintptr(unsafe.Pointer(&b[len(b)-1]))+(elemSize-1) &&
|
||||
uintptr(unsafe.Pointer(&b[0])) <= uintptr(unsafe.Pointer(&a[len(a)-1]))+(elemSize-1)
|
||||
}
|
||||
|
||||
// startIdx returns the index in haystack where the needle starts.
|
||||
// prerequisite: the needle must be aliased entirely inside the haystack.
|
||||
func startIdx[E any](haystack, needle []E) int {
|
||||
p := &needle[0]
|
||||
for i := range haystack {
|
||||
if p == &haystack[i] {
|
||||
return i
|
||||
}
|
||||
}
|
||||
// TODO: what if the overlap is by a non-integral number of Es?
|
||||
panic("needle not found")
|
||||
}
|
||||
|
||||
// Reverse reverses the elements of the slice in place.
|
||||
func Reverse[S ~[]E, E any](s S) {
|
||||
for i, j := 0, len(s)-1; i < j; i, j = i+1, j-1 {
|
||||
s[i], s[j] = s[j], s[i]
|
||||
}
|
||||
}
|
||||
195
vendor/golang.org/x/exp/slices/sort.go
generated
vendored
Normal file
195
vendor/golang.org/x/exp/slices/sort.go
generated
vendored
Normal file
@@ -0,0 +1,195 @@
|
||||
// Copyright 2022 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:generate go run $GOROOT/src/sort/gen_sort_variants.go -exp
|
||||
|
||||
package slices
|
||||
|
||||
import (
|
||||
"math/bits"
|
||||
|
||||
"golang.org/x/exp/constraints"
|
||||
)
|
||||
|
||||
// Sort sorts a slice of any ordered type in ascending order.
|
||||
// When sorting floating-point numbers, NaNs are ordered before other values.
|
||||
func Sort[S ~[]E, E constraints.Ordered](x S) {
|
||||
n := len(x)
|
||||
pdqsortOrdered(x, 0, n, bits.Len(uint(n)))
|
||||
}
|
||||
|
||||
// SortFunc sorts the slice x in ascending order as determined by the cmp
|
||||
// function. This sort is not guaranteed to be stable.
|
||||
// cmp(a, b) should return a negative number when a < b, a positive number when
|
||||
// a > b and zero when a == b.
|
||||
//
|
||||
// SortFunc requires that cmp is a strict weak ordering.
|
||||
// See https://en.wikipedia.org/wiki/Weak_ordering#Strict_weak_orderings.
|
||||
func SortFunc[S ~[]E, E any](x S, cmp func(a, b E) int) {
|
||||
n := len(x)
|
||||
pdqsortCmpFunc(x, 0, n, bits.Len(uint(n)), cmp)
|
||||
}
|
||||
|
||||
// SortStableFunc sorts the slice x while keeping the original order of equal
|
||||
// elements, using cmp to compare elements in the same way as [SortFunc].
|
||||
func SortStableFunc[S ~[]E, E any](x S, cmp func(a, b E) int) {
|
||||
stableCmpFunc(x, len(x), cmp)
|
||||
}
|
||||
|
||||
// IsSorted reports whether x is sorted in ascending order.
|
||||
func IsSorted[S ~[]E, E constraints.Ordered](x S) bool {
|
||||
for i := len(x) - 1; i > 0; i-- {
|
||||
if cmpLess(x[i], x[i-1]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// IsSortedFunc reports whether x is sorted in ascending order, with cmp as the
|
||||
// comparison function as defined by [SortFunc].
|
||||
func IsSortedFunc[S ~[]E, E any](x S, cmp func(a, b E) int) bool {
|
||||
for i := len(x) - 1; i > 0; i-- {
|
||||
if cmp(x[i], x[i-1]) < 0 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Min returns the minimal value in x. It panics if x is empty.
|
||||
// For floating-point numbers, Min propagates NaNs (any NaN value in x
|
||||
// forces the output to be NaN).
|
||||
func Min[S ~[]E, E constraints.Ordered](x S) E {
|
||||
if len(x) < 1 {
|
||||
panic("slices.Min: empty list")
|
||||
}
|
||||
m := x[0]
|
||||
for i := 1; i < len(x); i++ {
|
||||
m = min(m, x[i])
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
// MinFunc returns the minimal value in x, using cmp to compare elements.
|
||||
// It panics if x is empty. If there is more than one minimal element
|
||||
// according to the cmp function, MinFunc returns the first one.
|
||||
func MinFunc[S ~[]E, E any](x S, cmp func(a, b E) int) E {
|
||||
if len(x) < 1 {
|
||||
panic("slices.MinFunc: empty list")
|
||||
}
|
||||
m := x[0]
|
||||
for i := 1; i < len(x); i++ {
|
||||
if cmp(x[i], m) < 0 {
|
||||
m = x[i]
|
||||
}
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
// Max returns the maximal value in x. It panics if x is empty.
|
||||
// For floating-point E, Max propagates NaNs (any NaN value in x
|
||||
// forces the output to be NaN).
|
||||
func Max[S ~[]E, E constraints.Ordered](x S) E {
|
||||
if len(x) < 1 {
|
||||
panic("slices.Max: empty list")
|
||||
}
|
||||
m := x[0]
|
||||
for i := 1; i < len(x); i++ {
|
||||
m = max(m, x[i])
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
// MaxFunc returns the maximal value in x, using cmp to compare elements.
|
||||
// It panics if x is empty. If there is more than one maximal element
|
||||
// according to the cmp function, MaxFunc returns the first one.
|
||||
func MaxFunc[S ~[]E, E any](x S, cmp func(a, b E) int) E {
|
||||
if len(x) < 1 {
|
||||
panic("slices.MaxFunc: empty list")
|
||||
}
|
||||
m := x[0]
|
||||
for i := 1; i < len(x); i++ {
|
||||
if cmp(x[i], m) > 0 {
|
||||
m = x[i]
|
||||
}
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
// BinarySearch searches for target in a sorted slice and returns the position
|
||||
// where target is found, or the position where target would appear in the
|
||||
// sort order; it also returns a bool saying whether the target is really found
|
||||
// in the slice. The slice must be sorted in increasing order.
|
||||
func BinarySearch[S ~[]E, E constraints.Ordered](x S, target E) (int, bool) {
|
||||
// Inlining is faster than calling BinarySearchFunc with a lambda.
|
||||
n := len(x)
|
||||
// Define x[-1] < target and x[n] >= target.
|
||||
// Invariant: x[i-1] < target, x[j] >= target.
|
||||
i, j := 0, n
|
||||
for i < j {
|
||||
h := int(uint(i+j) >> 1) // avoid overflow when computing h
|
||||
// i ≤ h < j
|
||||
if cmpLess(x[h], target) {
|
||||
i = h + 1 // preserves x[i-1] < target
|
||||
} else {
|
||||
j = h // preserves x[j] >= target
|
||||
}
|
||||
}
|
||||
// i == j, x[i-1] < target, and x[j] (= x[i]) >= target => answer is i.
|
||||
return i, i < n && (x[i] == target || (isNaN(x[i]) && isNaN(target)))
|
||||
}
|
||||
|
||||
// BinarySearchFunc works like [BinarySearch], but uses a custom comparison
|
||||
// function. The slice must be sorted in increasing order, where "increasing"
|
||||
// is defined by cmp. cmp should return 0 if the slice element matches
|
||||
// the target, a negative number if the slice element precedes the target,
|
||||
// or a positive number if the slice element follows the target.
|
||||
// cmp must implement the same ordering as the slice, such that if
|
||||
// cmp(a, t) < 0 and cmp(b, t) >= 0, then a must precede b in the slice.
|
||||
func BinarySearchFunc[S ~[]E, E, T any](x S, target T, cmp func(E, T) int) (int, bool) {
|
||||
n := len(x)
|
||||
// Define cmp(x[-1], target) < 0 and cmp(x[n], target) >= 0 .
|
||||
// Invariant: cmp(x[i - 1], target) < 0, cmp(x[j], target) >= 0.
|
||||
i, j := 0, n
|
||||
for i < j {
|
||||
h := int(uint(i+j) >> 1) // avoid overflow when computing h
|
||||
// i ≤ h < j
|
||||
if cmp(x[h], target) < 0 {
|
||||
i = h + 1 // preserves cmp(x[i - 1], target) < 0
|
||||
} else {
|
||||
j = h // preserves cmp(x[j], target) >= 0
|
||||
}
|
||||
}
|
||||
// i == j, cmp(x[i-1], target) < 0, and cmp(x[j], target) (= cmp(x[i], target)) >= 0 => answer is i.
|
||||
return i, i < n && cmp(x[i], target) == 0
|
||||
}
|
||||
|
||||
type sortedHint int // hint for pdqsort when choosing the pivot
|
||||
|
||||
const (
|
||||
unknownHint sortedHint = iota
|
||||
increasingHint
|
||||
decreasingHint
|
||||
)
|
||||
|
||||
// xorshift paper: https://www.jstatsoft.org/article/view/v008i14/xorshift.pdf
|
||||
type xorshift uint64
|
||||
|
||||
func (r *xorshift) Next() uint64 {
|
||||
*r ^= *r << 13
|
||||
*r ^= *r >> 17
|
||||
*r ^= *r << 5
|
||||
return uint64(*r)
|
||||
}
|
||||
|
||||
func nextPowerOfTwo(length int) uint {
|
||||
return 1 << bits.Len(uint(length))
|
||||
}
|
||||
|
||||
// isNaN reports whether x is a NaN without requiring the math package.
|
||||
// This will always return false if T is not floating-point.
|
||||
func isNaN[T constraints.Ordered](x T) bool {
|
||||
return x != x
|
||||
}
|
||||
479
vendor/golang.org/x/exp/slices/zsortanyfunc.go
generated
vendored
Normal file
479
vendor/golang.org/x/exp/slices/zsortanyfunc.go
generated
vendored
Normal file
@@ -0,0 +1,479 @@
|
||||
// Code generated by gen_sort_variants.go; DO NOT EDIT.
|
||||
|
||||
// Copyright 2022 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package slices
|
||||
|
||||
// insertionSortCmpFunc sorts data[a:b] using insertion sort.
|
||||
func insertionSortCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) {
|
||||
for i := a + 1; i < b; i++ {
|
||||
for j := i; j > a && (cmp(data[j], data[j-1]) < 0); j-- {
|
||||
data[j], data[j-1] = data[j-1], data[j]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// siftDownCmpFunc implements the heap property on data[lo:hi].
|
||||
// first is an offset into the array where the root of the heap lies.
|
||||
func siftDownCmpFunc[E any](data []E, lo, hi, first int, cmp func(a, b E) int) {
|
||||
root := lo
|
||||
for {
|
||||
child := 2*root + 1
|
||||
if child >= hi {
|
||||
break
|
||||
}
|
||||
if child+1 < hi && (cmp(data[first+child], data[first+child+1]) < 0) {
|
||||
child++
|
||||
}
|
||||
if !(cmp(data[first+root], data[first+child]) < 0) {
|
||||
return
|
||||
}
|
||||
data[first+root], data[first+child] = data[first+child], data[first+root]
|
||||
root = child
|
||||
}
|
||||
}
|
||||
|
||||
func heapSortCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) {
|
||||
first := a
|
||||
lo := 0
|
||||
hi := b - a
|
||||
|
||||
// Build heap with greatest element at top.
|
||||
for i := (hi - 1) / 2; i >= 0; i-- {
|
||||
siftDownCmpFunc(data, i, hi, first, cmp)
|
||||
}
|
||||
|
||||
// Pop elements, largest first, into end of data.
|
||||
for i := hi - 1; i >= 0; i-- {
|
||||
data[first], data[first+i] = data[first+i], data[first]
|
||||
siftDownCmpFunc(data, lo, i, first, cmp)
|
||||
}
|
||||
}
|
||||
|
||||
// pdqsortCmpFunc sorts data[a:b].
|
||||
// The algorithm based on pattern-defeating quicksort(pdqsort), but without the optimizations from BlockQuicksort.
|
||||
// pdqsort paper: https://arxiv.org/pdf/2106.05123.pdf
|
||||
// C++ implementation: https://github.com/orlp/pdqsort
|
||||
// Rust implementation: https://docs.rs/pdqsort/latest/pdqsort/
|
||||
// limit is the number of allowed bad (very unbalanced) pivots before falling back to heapsort.
|
||||
func pdqsortCmpFunc[E any](data []E, a, b, limit int, cmp func(a, b E) int) {
|
||||
const maxInsertion = 12
|
||||
|
||||
var (
|
||||
wasBalanced = true // whether the last partitioning was reasonably balanced
|
||||
wasPartitioned = true // whether the slice was already partitioned
|
||||
)
|
||||
|
||||
for {
|
||||
length := b - a
|
||||
|
||||
if length <= maxInsertion {
|
||||
insertionSortCmpFunc(data, a, b, cmp)
|
||||
return
|
||||
}
|
||||
|
||||
// Fall back to heapsort if too many bad choices were made.
|
||||
if limit == 0 {
|
||||
heapSortCmpFunc(data, a, b, cmp)
|
||||
return
|
||||
}
|
||||
|
||||
// If the last partitioning was imbalanced, we need to breaking patterns.
|
||||
if !wasBalanced {
|
||||
breakPatternsCmpFunc(data, a, b, cmp)
|
||||
limit--
|
||||
}
|
||||
|
||||
pivot, hint := choosePivotCmpFunc(data, a, b, cmp)
|
||||
if hint == decreasingHint {
|
||||
reverseRangeCmpFunc(data, a, b, cmp)
|
||||
// The chosen pivot was pivot-a elements after the start of the array.
|
||||
// After reversing it is pivot-a elements before the end of the array.
|
||||
// The idea came from Rust's implementation.
|
||||
pivot = (b - 1) - (pivot - a)
|
||||
hint = increasingHint
|
||||
}
|
||||
|
||||
// The slice is likely already sorted.
|
||||
if wasBalanced && wasPartitioned && hint == increasingHint {
|
||||
if partialInsertionSortCmpFunc(data, a, b, cmp) {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Probably the slice contains many duplicate elements, partition the slice into
|
||||
// elements equal to and elements greater than the pivot.
|
||||
if a > 0 && !(cmp(data[a-1], data[pivot]) < 0) {
|
||||
mid := partitionEqualCmpFunc(data, a, b, pivot, cmp)
|
||||
a = mid
|
||||
continue
|
||||
}
|
||||
|
||||
mid, alreadyPartitioned := partitionCmpFunc(data, a, b, pivot, cmp)
|
||||
wasPartitioned = alreadyPartitioned
|
||||
|
||||
leftLen, rightLen := mid-a, b-mid
|
||||
balanceThreshold := length / 8
|
||||
if leftLen < rightLen {
|
||||
wasBalanced = leftLen >= balanceThreshold
|
||||
pdqsortCmpFunc(data, a, mid, limit, cmp)
|
||||
a = mid + 1
|
||||
} else {
|
||||
wasBalanced = rightLen >= balanceThreshold
|
||||
pdqsortCmpFunc(data, mid+1, b, limit, cmp)
|
||||
b = mid
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// partitionCmpFunc does one quicksort partition.
|
||||
// Let p = data[pivot]
|
||||
// Moves elements in data[a:b] around, so that data[i]<p and data[j]>=p for i<newpivot and j>newpivot.
|
||||
// On return, data[newpivot] = p
|
||||
func partitionCmpFunc[E any](data []E, a, b, pivot int, cmp func(a, b E) int) (newpivot int, alreadyPartitioned bool) {
|
||||
data[a], data[pivot] = data[pivot], data[a]
|
||||
i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
|
||||
|
||||
for i <= j && (cmp(data[i], data[a]) < 0) {
|
||||
i++
|
||||
}
|
||||
for i <= j && !(cmp(data[j], data[a]) < 0) {
|
||||
j--
|
||||
}
|
||||
if i > j {
|
||||
data[j], data[a] = data[a], data[j]
|
||||
return j, true
|
||||
}
|
||||
data[i], data[j] = data[j], data[i]
|
||||
i++
|
||||
j--
|
||||
|
||||
for {
|
||||
for i <= j && (cmp(data[i], data[a]) < 0) {
|
||||
i++
|
||||
}
|
||||
for i <= j && !(cmp(data[j], data[a]) < 0) {
|
||||
j--
|
||||
}
|
||||
if i > j {
|
||||
break
|
||||
}
|
||||
data[i], data[j] = data[j], data[i]
|
||||
i++
|
||||
j--
|
||||
}
|
||||
data[j], data[a] = data[a], data[j]
|
||||
return j, false
|
||||
}
|
||||
|
||||
// partitionEqualCmpFunc partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot].
|
||||
// It assumed that data[a:b] does not contain elements smaller than the data[pivot].
|
||||
func partitionEqualCmpFunc[E any](data []E, a, b, pivot int, cmp func(a, b E) int) (newpivot int) {
|
||||
data[a], data[pivot] = data[pivot], data[a]
|
||||
i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
|
||||
|
||||
for {
|
||||
for i <= j && !(cmp(data[a], data[i]) < 0) {
|
||||
i++
|
||||
}
|
||||
for i <= j && (cmp(data[a], data[j]) < 0) {
|
||||
j--
|
||||
}
|
||||
if i > j {
|
||||
break
|
||||
}
|
||||
data[i], data[j] = data[j], data[i]
|
||||
i++
|
||||
j--
|
||||
}
|
||||
return i
|
||||
}
|
||||
|
||||
// partialInsertionSortCmpFunc partially sorts a slice, returns true if the slice is sorted at the end.
|
||||
func partialInsertionSortCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) bool {
|
||||
const (
|
||||
maxSteps = 5 // maximum number of adjacent out-of-order pairs that will get shifted
|
||||
shortestShifting = 50 // don't shift any elements on short arrays
|
||||
)
|
||||
i := a + 1
|
||||
for j := 0; j < maxSteps; j++ {
|
||||
for i < b && !(cmp(data[i], data[i-1]) < 0) {
|
||||
i++
|
||||
}
|
||||
|
||||
if i == b {
|
||||
return true
|
||||
}
|
||||
|
||||
if b-a < shortestShifting {
|
||||
return false
|
||||
}
|
||||
|
||||
data[i], data[i-1] = data[i-1], data[i]
|
||||
|
||||
// Shift the smaller one to the left.
|
||||
if i-a >= 2 {
|
||||
for j := i - 1; j >= 1; j-- {
|
||||
if !(cmp(data[j], data[j-1]) < 0) {
|
||||
break
|
||||
}
|
||||
data[j], data[j-1] = data[j-1], data[j]
|
||||
}
|
||||
}
|
||||
// Shift the greater one to the right.
|
||||
if b-i >= 2 {
|
||||
for j := i + 1; j < b; j++ {
|
||||
if !(cmp(data[j], data[j-1]) < 0) {
|
||||
break
|
||||
}
|
||||
data[j], data[j-1] = data[j-1], data[j]
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// breakPatternsCmpFunc scatters some elements around in an attempt to break some patterns
|
||||
// that might cause imbalanced partitions in quicksort.
|
||||
func breakPatternsCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) {
|
||||
length := b - a
|
||||
if length >= 8 {
|
||||
random := xorshift(length)
|
||||
modulus := nextPowerOfTwo(length)
|
||||
|
||||
for idx := a + (length/4)*2 - 1; idx <= a+(length/4)*2+1; idx++ {
|
||||
other := int(uint(random.Next()) & (modulus - 1))
|
||||
if other >= length {
|
||||
other -= length
|
||||
}
|
||||
data[idx], data[a+other] = data[a+other], data[idx]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// choosePivotCmpFunc chooses a pivot in data[a:b].
|
||||
//
|
||||
// [0,8): chooses a static pivot.
|
||||
// [8,shortestNinther): uses the simple median-of-three method.
|
||||
// [shortestNinther,∞): uses the Tukey ninther method.
|
||||
func choosePivotCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) (pivot int, hint sortedHint) {
|
||||
const (
|
||||
shortestNinther = 50
|
||||
maxSwaps = 4 * 3
|
||||
)
|
||||
|
||||
l := b - a
|
||||
|
||||
var (
|
||||
swaps int
|
||||
i = a + l/4*1
|
||||
j = a + l/4*2
|
||||
k = a + l/4*3
|
||||
)
|
||||
|
||||
if l >= 8 {
|
||||
if l >= shortestNinther {
|
||||
// Tukey ninther method, the idea came from Rust's implementation.
|
||||
i = medianAdjacentCmpFunc(data, i, &swaps, cmp)
|
||||
j = medianAdjacentCmpFunc(data, j, &swaps, cmp)
|
||||
k = medianAdjacentCmpFunc(data, k, &swaps, cmp)
|
||||
}
|
||||
// Find the median among i, j, k and stores it into j.
|
||||
j = medianCmpFunc(data, i, j, k, &swaps, cmp)
|
||||
}
|
||||
|
||||
switch swaps {
|
||||
case 0:
|
||||
return j, increasingHint
|
||||
case maxSwaps:
|
||||
return j, decreasingHint
|
||||
default:
|
||||
return j, unknownHint
|
||||
}
|
||||
}
|
||||
|
||||
// order2CmpFunc returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a.
|
||||
func order2CmpFunc[E any](data []E, a, b int, swaps *int, cmp func(a, b E) int) (int, int) {
|
||||
if cmp(data[b], data[a]) < 0 {
|
||||
*swaps++
|
||||
return b, a
|
||||
}
|
||||
return a, b
|
||||
}
|
||||
|
||||
// medianCmpFunc returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c.
|
||||
func medianCmpFunc[E any](data []E, a, b, c int, swaps *int, cmp func(a, b E) int) int {
|
||||
a, b = order2CmpFunc(data, a, b, swaps, cmp)
|
||||
b, c = order2CmpFunc(data, b, c, swaps, cmp)
|
||||
a, b = order2CmpFunc(data, a, b, swaps, cmp)
|
||||
return b
|
||||
}
|
||||
|
||||
// medianAdjacentCmpFunc finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a.
|
||||
func medianAdjacentCmpFunc[E any](data []E, a int, swaps *int, cmp func(a, b E) int) int {
|
||||
return medianCmpFunc(data, a-1, a, a+1, swaps, cmp)
|
||||
}
|
||||
|
||||
func reverseRangeCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) {
|
||||
i := a
|
||||
j := b - 1
|
||||
for i < j {
|
||||
data[i], data[j] = data[j], data[i]
|
||||
i++
|
||||
j--
|
||||
}
|
||||
}
|
||||
|
||||
func swapRangeCmpFunc[E any](data []E, a, b, n int, cmp func(a, b E) int) {
|
||||
for i := 0; i < n; i++ {
|
||||
data[a+i], data[b+i] = data[b+i], data[a+i]
|
||||
}
|
||||
}
|
||||
|
||||
func stableCmpFunc[E any](data []E, n int, cmp func(a, b E) int) {
|
||||
blockSize := 20 // must be > 0
|
||||
a, b := 0, blockSize
|
||||
for b <= n {
|
||||
insertionSortCmpFunc(data, a, b, cmp)
|
||||
a = b
|
||||
b += blockSize
|
||||
}
|
||||
insertionSortCmpFunc(data, a, n, cmp)
|
||||
|
||||
for blockSize < n {
|
||||
a, b = 0, 2*blockSize
|
||||
for b <= n {
|
||||
symMergeCmpFunc(data, a, a+blockSize, b, cmp)
|
||||
a = b
|
||||
b += 2 * blockSize
|
||||
}
|
||||
if m := a + blockSize; m < n {
|
||||
symMergeCmpFunc(data, a, m, n, cmp)
|
||||
}
|
||||
blockSize *= 2
|
||||
}
|
||||
}
|
||||
|
||||
// symMergeCmpFunc merges the two sorted subsequences data[a:m] and data[m:b] using
|
||||
// the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum
|
||||
// Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz
|
||||
// Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in
|
||||
// Computer Science, pages 714-723. Springer, 2004.
|
||||
//
|
||||
// Let M = m-a and N = b-n. Wolog M < N.
|
||||
// The recursion depth is bound by ceil(log(N+M)).
|
||||
// The algorithm needs O(M*log(N/M + 1)) calls to data.Less.
|
||||
// The algorithm needs O((M+N)*log(M)) calls to data.Swap.
|
||||
//
|
||||
// The paper gives O((M+N)*log(M)) as the number of assignments assuming a
|
||||
// rotation algorithm which uses O(M+N+gcd(M+N)) assignments. The argumentation
|
||||
// in the paper carries through for Swap operations, especially as the block
|
||||
// swapping rotate uses only O(M+N) Swaps.
|
||||
//
|
||||
// symMerge assumes non-degenerate arguments: a < m && m < b.
|
||||
// Having the caller check this condition eliminates many leaf recursion calls,
|
||||
// which improves performance.
|
||||
func symMergeCmpFunc[E any](data []E, a, m, b int, cmp func(a, b E) int) {
|
||||
// Avoid unnecessary recursions of symMerge
|
||||
// by direct insertion of data[a] into data[m:b]
|
||||
// if data[a:m] only contains one element.
|
||||
if m-a == 1 {
|
||||
// Use binary search to find the lowest index i
|
||||
// such that data[i] >= data[a] for m <= i < b.
|
||||
// Exit the search loop with i == b in case no such index exists.
|
||||
i := m
|
||||
j := b
|
||||
for i < j {
|
||||
h := int(uint(i+j) >> 1)
|
||||
if cmp(data[h], data[a]) < 0 {
|
||||
i = h + 1
|
||||
} else {
|
||||
j = h
|
||||
}
|
||||
}
|
||||
// Swap values until data[a] reaches the position before i.
|
||||
for k := a; k < i-1; k++ {
|
||||
data[k], data[k+1] = data[k+1], data[k]
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Avoid unnecessary recursions of symMerge
|
||||
// by direct insertion of data[m] into data[a:m]
|
||||
// if data[m:b] only contains one element.
|
||||
if b-m == 1 {
|
||||
// Use binary search to find the lowest index i
|
||||
// such that data[i] > data[m] for a <= i < m.
|
||||
// Exit the search loop with i == m in case no such index exists.
|
||||
i := a
|
||||
j := m
|
||||
for i < j {
|
||||
h := int(uint(i+j) >> 1)
|
||||
if !(cmp(data[m], data[h]) < 0) {
|
||||
i = h + 1
|
||||
} else {
|
||||
j = h
|
||||
}
|
||||
}
|
||||
// Swap values until data[m] reaches the position i.
|
||||
for k := m; k > i; k-- {
|
||||
data[k], data[k-1] = data[k-1], data[k]
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
mid := int(uint(a+b) >> 1)
|
||||
n := mid + m
|
||||
var start, r int
|
||||
if m > mid {
|
||||
start = n - b
|
||||
r = mid
|
||||
} else {
|
||||
start = a
|
||||
r = m
|
||||
}
|
||||
p := n - 1
|
||||
|
||||
for start < r {
|
||||
c := int(uint(start+r) >> 1)
|
||||
if !(cmp(data[p-c], data[c]) < 0) {
|
||||
start = c + 1
|
||||
} else {
|
||||
r = c
|
||||
}
|
||||
}
|
||||
|
||||
end := n - start
|
||||
if start < m && m < end {
|
||||
rotateCmpFunc(data, start, m, end, cmp)
|
||||
}
|
||||
if a < start && start < mid {
|
||||
symMergeCmpFunc(data, a, start, mid, cmp)
|
||||
}
|
||||
if mid < end && end < b {
|
||||
symMergeCmpFunc(data, mid, end, b, cmp)
|
||||
}
|
||||
}
|
||||
|
||||
// rotateCmpFunc rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data:
|
||||
// Data of the form 'x u v y' is changed to 'x v u y'.
|
||||
// rotate performs at most b-a many calls to data.Swap,
|
||||
// and it assumes non-degenerate arguments: a < m && m < b.
|
||||
func rotateCmpFunc[E any](data []E, a, m, b int, cmp func(a, b E) int) {
|
||||
i := m - a
|
||||
j := b - m
|
||||
|
||||
for i != j {
|
||||
if i > j {
|
||||
swapRangeCmpFunc(data, m-i, m, j, cmp)
|
||||
i -= j
|
||||
} else {
|
||||
swapRangeCmpFunc(data, m-i, m+j-i, i, cmp)
|
||||
j -= i
|
||||
}
|
||||
}
|
||||
// i == j
|
||||
swapRangeCmpFunc(data, m-i, m, i, cmp)
|
||||
}
|
||||
481
vendor/golang.org/x/exp/slices/zsortordered.go
generated
vendored
Normal file
481
vendor/golang.org/x/exp/slices/zsortordered.go
generated
vendored
Normal file
@@ -0,0 +1,481 @@
|
||||
// Code generated by gen_sort_variants.go; DO NOT EDIT.
|
||||
|
||||
// Copyright 2022 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package slices
|
||||
|
||||
import "golang.org/x/exp/constraints"
|
||||
|
||||
// insertionSortOrdered sorts data[a:b] using insertion sort.
|
||||
func insertionSortOrdered[E constraints.Ordered](data []E, a, b int) {
|
||||
for i := a + 1; i < b; i++ {
|
||||
for j := i; j > a && cmpLess(data[j], data[j-1]); j-- {
|
||||
data[j], data[j-1] = data[j-1], data[j]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// siftDownOrdered implements the heap property on data[lo:hi].
|
||||
// first is an offset into the array where the root of the heap lies.
|
||||
func siftDownOrdered[E constraints.Ordered](data []E, lo, hi, first int) {
|
||||
root := lo
|
||||
for {
|
||||
child := 2*root + 1
|
||||
if child >= hi {
|
||||
break
|
||||
}
|
||||
if child+1 < hi && cmpLess(data[first+child], data[first+child+1]) {
|
||||
child++
|
||||
}
|
||||
if !cmpLess(data[first+root], data[first+child]) {
|
||||
return
|
||||
}
|
||||
data[first+root], data[first+child] = data[first+child], data[first+root]
|
||||
root = child
|
||||
}
|
||||
}
|
||||
|
||||
func heapSortOrdered[E constraints.Ordered](data []E, a, b int) {
|
||||
first := a
|
||||
lo := 0
|
||||
hi := b - a
|
||||
|
||||
// Build heap with greatest element at top.
|
||||
for i := (hi - 1) / 2; i >= 0; i-- {
|
||||
siftDownOrdered(data, i, hi, first)
|
||||
}
|
||||
|
||||
// Pop elements, largest first, into end of data.
|
||||
for i := hi - 1; i >= 0; i-- {
|
||||
data[first], data[first+i] = data[first+i], data[first]
|
||||
siftDownOrdered(data, lo, i, first)
|
||||
}
|
||||
}
|
||||
|
||||
// pdqsortOrdered sorts data[a:b].
|
||||
// The algorithm based on pattern-defeating quicksort(pdqsort), but without the optimizations from BlockQuicksort.
|
||||
// pdqsort paper: https://arxiv.org/pdf/2106.05123.pdf
|
||||
// C++ implementation: https://github.com/orlp/pdqsort
|
||||
// Rust implementation: https://docs.rs/pdqsort/latest/pdqsort/
|
||||
// limit is the number of allowed bad (very unbalanced) pivots before falling back to heapsort.
|
||||
func pdqsortOrdered[E constraints.Ordered](data []E, a, b, limit int) {
|
||||
const maxInsertion = 12
|
||||
|
||||
var (
|
||||
wasBalanced = true // whether the last partitioning was reasonably balanced
|
||||
wasPartitioned = true // whether the slice was already partitioned
|
||||
)
|
||||
|
||||
for {
|
||||
length := b - a
|
||||
|
||||
if length <= maxInsertion {
|
||||
insertionSortOrdered(data, a, b)
|
||||
return
|
||||
}
|
||||
|
||||
// Fall back to heapsort if too many bad choices were made.
|
||||
if limit == 0 {
|
||||
heapSortOrdered(data, a, b)
|
||||
return
|
||||
}
|
||||
|
||||
// If the last partitioning was imbalanced, we need to breaking patterns.
|
||||
if !wasBalanced {
|
||||
breakPatternsOrdered(data, a, b)
|
||||
limit--
|
||||
}
|
||||
|
||||
pivot, hint := choosePivotOrdered(data, a, b)
|
||||
if hint == decreasingHint {
|
||||
reverseRangeOrdered(data, a, b)
|
||||
// The chosen pivot was pivot-a elements after the start of the array.
|
||||
// After reversing it is pivot-a elements before the end of the array.
|
||||
// The idea came from Rust's implementation.
|
||||
pivot = (b - 1) - (pivot - a)
|
||||
hint = increasingHint
|
||||
}
|
||||
|
||||
// The slice is likely already sorted.
|
||||
if wasBalanced && wasPartitioned && hint == increasingHint {
|
||||
if partialInsertionSortOrdered(data, a, b) {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Probably the slice contains many duplicate elements, partition the slice into
|
||||
// elements equal to and elements greater than the pivot.
|
||||
if a > 0 && !cmpLess(data[a-1], data[pivot]) {
|
||||
mid := partitionEqualOrdered(data, a, b, pivot)
|
||||
a = mid
|
||||
continue
|
||||
}
|
||||
|
||||
mid, alreadyPartitioned := partitionOrdered(data, a, b, pivot)
|
||||
wasPartitioned = alreadyPartitioned
|
||||
|
||||
leftLen, rightLen := mid-a, b-mid
|
||||
balanceThreshold := length / 8
|
||||
if leftLen < rightLen {
|
||||
wasBalanced = leftLen >= balanceThreshold
|
||||
pdqsortOrdered(data, a, mid, limit)
|
||||
a = mid + 1
|
||||
} else {
|
||||
wasBalanced = rightLen >= balanceThreshold
|
||||
pdqsortOrdered(data, mid+1, b, limit)
|
||||
b = mid
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// partitionOrdered does one quicksort partition.
|
||||
// Let p = data[pivot]
|
||||
// Moves elements in data[a:b] around, so that data[i]<p and data[j]>=p for i<newpivot and j>newpivot.
|
||||
// On return, data[newpivot] = p
|
||||
func partitionOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivot int, alreadyPartitioned bool) {
|
||||
data[a], data[pivot] = data[pivot], data[a]
|
||||
i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
|
||||
|
||||
for i <= j && cmpLess(data[i], data[a]) {
|
||||
i++
|
||||
}
|
||||
for i <= j && !cmpLess(data[j], data[a]) {
|
||||
j--
|
||||
}
|
||||
if i > j {
|
||||
data[j], data[a] = data[a], data[j]
|
||||
return j, true
|
||||
}
|
||||
data[i], data[j] = data[j], data[i]
|
||||
i++
|
||||
j--
|
||||
|
||||
for {
|
||||
for i <= j && cmpLess(data[i], data[a]) {
|
||||
i++
|
||||
}
|
||||
for i <= j && !cmpLess(data[j], data[a]) {
|
||||
j--
|
||||
}
|
||||
if i > j {
|
||||
break
|
||||
}
|
||||
data[i], data[j] = data[j], data[i]
|
||||
i++
|
||||
j--
|
||||
}
|
||||
data[j], data[a] = data[a], data[j]
|
||||
return j, false
|
||||
}
|
||||
|
||||
// partitionEqualOrdered partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot].
|
||||
// It assumed that data[a:b] does not contain elements smaller than the data[pivot].
|
||||
func partitionEqualOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivot int) {
|
||||
data[a], data[pivot] = data[pivot], data[a]
|
||||
i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
|
||||
|
||||
for {
|
||||
for i <= j && !cmpLess(data[a], data[i]) {
|
||||
i++
|
||||
}
|
||||
for i <= j && cmpLess(data[a], data[j]) {
|
||||
j--
|
||||
}
|
||||
if i > j {
|
||||
break
|
||||
}
|
||||
data[i], data[j] = data[j], data[i]
|
||||
i++
|
||||
j--
|
||||
}
|
||||
return i
|
||||
}
|
||||
|
||||
// partialInsertionSortOrdered partially sorts a slice, returns true if the slice is sorted at the end.
|
||||
func partialInsertionSortOrdered[E constraints.Ordered](data []E, a, b int) bool {
|
||||
const (
|
||||
maxSteps = 5 // maximum number of adjacent out-of-order pairs that will get shifted
|
||||
shortestShifting = 50 // don't shift any elements on short arrays
|
||||
)
|
||||
i := a + 1
|
||||
for j := 0; j < maxSteps; j++ {
|
||||
for i < b && !cmpLess(data[i], data[i-1]) {
|
||||
i++
|
||||
}
|
||||
|
||||
if i == b {
|
||||
return true
|
||||
}
|
||||
|
||||
if b-a < shortestShifting {
|
||||
return false
|
||||
}
|
||||
|
||||
data[i], data[i-1] = data[i-1], data[i]
|
||||
|
||||
// Shift the smaller one to the left.
|
||||
if i-a >= 2 {
|
||||
for j := i - 1; j >= 1; j-- {
|
||||
if !cmpLess(data[j], data[j-1]) {
|
||||
break
|
||||
}
|
||||
data[j], data[j-1] = data[j-1], data[j]
|
||||
}
|
||||
}
|
||||
// Shift the greater one to the right.
|
||||
if b-i >= 2 {
|
||||
for j := i + 1; j < b; j++ {
|
||||
if !cmpLess(data[j], data[j-1]) {
|
||||
break
|
||||
}
|
||||
data[j], data[j-1] = data[j-1], data[j]
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// breakPatternsOrdered scatters some elements around in an attempt to break some patterns
|
||||
// that might cause imbalanced partitions in quicksort.
|
||||
func breakPatternsOrdered[E constraints.Ordered](data []E, a, b int) {
|
||||
length := b - a
|
||||
if length >= 8 {
|
||||
random := xorshift(length)
|
||||
modulus := nextPowerOfTwo(length)
|
||||
|
||||
for idx := a + (length/4)*2 - 1; idx <= a+(length/4)*2+1; idx++ {
|
||||
other := int(uint(random.Next()) & (modulus - 1))
|
||||
if other >= length {
|
||||
other -= length
|
||||
}
|
||||
data[idx], data[a+other] = data[a+other], data[idx]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// choosePivotOrdered chooses a pivot in data[a:b].
|
||||
//
|
||||
// [0,8): chooses a static pivot.
|
||||
// [8,shortestNinther): uses the simple median-of-three method.
|
||||
// [shortestNinther,∞): uses the Tukey ninther method.
|
||||
func choosePivotOrdered[E constraints.Ordered](data []E, a, b int) (pivot int, hint sortedHint) {
|
||||
const (
|
||||
shortestNinther = 50
|
||||
maxSwaps = 4 * 3
|
||||
)
|
||||
|
||||
l := b - a
|
||||
|
||||
var (
|
||||
swaps int
|
||||
i = a + l/4*1
|
||||
j = a + l/4*2
|
||||
k = a + l/4*3
|
||||
)
|
||||
|
||||
if l >= 8 {
|
||||
if l >= shortestNinther {
|
||||
// Tukey ninther method, the idea came from Rust's implementation.
|
||||
i = medianAdjacentOrdered(data, i, &swaps)
|
||||
j = medianAdjacentOrdered(data, j, &swaps)
|
||||
k = medianAdjacentOrdered(data, k, &swaps)
|
||||
}
|
||||
// Find the median among i, j, k and stores it into j.
|
||||
j = medianOrdered(data, i, j, k, &swaps)
|
||||
}
|
||||
|
||||
switch swaps {
|
||||
case 0:
|
||||
return j, increasingHint
|
||||
case maxSwaps:
|
||||
return j, decreasingHint
|
||||
default:
|
||||
return j, unknownHint
|
||||
}
|
||||
}
|
||||
|
||||
// order2Ordered returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a.
|
||||
func order2Ordered[E constraints.Ordered](data []E, a, b int, swaps *int) (int, int) {
|
||||
if cmpLess(data[b], data[a]) {
|
||||
*swaps++
|
||||
return b, a
|
||||
}
|
||||
return a, b
|
||||
}
|
||||
|
||||
// medianOrdered returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c.
|
||||
func medianOrdered[E constraints.Ordered](data []E, a, b, c int, swaps *int) int {
|
||||
a, b = order2Ordered(data, a, b, swaps)
|
||||
b, c = order2Ordered(data, b, c, swaps)
|
||||
a, b = order2Ordered(data, a, b, swaps)
|
||||
return b
|
||||
}
|
||||
|
||||
// medianAdjacentOrdered finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a.
|
||||
func medianAdjacentOrdered[E constraints.Ordered](data []E, a int, swaps *int) int {
|
||||
return medianOrdered(data, a-1, a, a+1, swaps)
|
||||
}
|
||||
|
||||
func reverseRangeOrdered[E constraints.Ordered](data []E, a, b int) {
|
||||
i := a
|
||||
j := b - 1
|
||||
for i < j {
|
||||
data[i], data[j] = data[j], data[i]
|
||||
i++
|
||||
j--
|
||||
}
|
||||
}
|
||||
|
||||
func swapRangeOrdered[E constraints.Ordered](data []E, a, b, n int) {
|
||||
for i := 0; i < n; i++ {
|
||||
data[a+i], data[b+i] = data[b+i], data[a+i]
|
||||
}
|
||||
}
|
||||
|
||||
func stableOrdered[E constraints.Ordered](data []E, n int) {
|
||||
blockSize := 20 // must be > 0
|
||||
a, b := 0, blockSize
|
||||
for b <= n {
|
||||
insertionSortOrdered(data, a, b)
|
||||
a = b
|
||||
b += blockSize
|
||||
}
|
||||
insertionSortOrdered(data, a, n)
|
||||
|
||||
for blockSize < n {
|
||||
a, b = 0, 2*blockSize
|
||||
for b <= n {
|
||||
symMergeOrdered(data, a, a+blockSize, b)
|
||||
a = b
|
||||
b += 2 * blockSize
|
||||
}
|
||||
if m := a + blockSize; m < n {
|
||||
symMergeOrdered(data, a, m, n)
|
||||
}
|
||||
blockSize *= 2
|
||||
}
|
||||
}
|
||||
|
||||
// symMergeOrdered merges the two sorted subsequences data[a:m] and data[m:b] using
|
||||
// the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum
|
||||
// Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz
|
||||
// Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in
|
||||
// Computer Science, pages 714-723. Springer, 2004.
|
||||
//
|
||||
// Let M = m-a and N = b-n. Wolog M < N.
|
||||
// The recursion depth is bound by ceil(log(N+M)).
|
||||
// The algorithm needs O(M*log(N/M + 1)) calls to data.Less.
|
||||
// The algorithm needs O((M+N)*log(M)) calls to data.Swap.
|
||||
//
|
||||
// The paper gives O((M+N)*log(M)) as the number of assignments assuming a
|
||||
// rotation algorithm which uses O(M+N+gcd(M+N)) assignments. The argumentation
|
||||
// in the paper carries through for Swap operations, especially as the block
|
||||
// swapping rotate uses only O(M+N) Swaps.
|
||||
//
|
||||
// symMerge assumes non-degenerate arguments: a < m && m < b.
|
||||
// Having the caller check this condition eliminates many leaf recursion calls,
|
||||
// which improves performance.
|
||||
func symMergeOrdered[E constraints.Ordered](data []E, a, m, b int) {
|
||||
// Avoid unnecessary recursions of symMerge
|
||||
// by direct insertion of data[a] into data[m:b]
|
||||
// if data[a:m] only contains one element.
|
||||
if m-a == 1 {
|
||||
// Use binary search to find the lowest index i
|
||||
// such that data[i] >= data[a] for m <= i < b.
|
||||
// Exit the search loop with i == b in case no such index exists.
|
||||
i := m
|
||||
j := b
|
||||
for i < j {
|
||||
h := int(uint(i+j) >> 1)
|
||||
if cmpLess(data[h], data[a]) {
|
||||
i = h + 1
|
||||
} else {
|
||||
j = h
|
||||
}
|
||||
}
|
||||
// Swap values until data[a] reaches the position before i.
|
||||
for k := a; k < i-1; k++ {
|
||||
data[k], data[k+1] = data[k+1], data[k]
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Avoid unnecessary recursions of symMerge
|
||||
// by direct insertion of data[m] into data[a:m]
|
||||
// if data[m:b] only contains one element.
|
||||
if b-m == 1 {
|
||||
// Use binary search to find the lowest index i
|
||||
// such that data[i] > data[m] for a <= i < m.
|
||||
// Exit the search loop with i == m in case no such index exists.
|
||||
i := a
|
||||
j := m
|
||||
for i < j {
|
||||
h := int(uint(i+j) >> 1)
|
||||
if !cmpLess(data[m], data[h]) {
|
||||
i = h + 1
|
||||
} else {
|
||||
j = h
|
||||
}
|
||||
}
|
||||
// Swap values until data[m] reaches the position i.
|
||||
for k := m; k > i; k-- {
|
||||
data[k], data[k-1] = data[k-1], data[k]
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
mid := int(uint(a+b) >> 1)
|
||||
n := mid + m
|
||||
var start, r int
|
||||
if m > mid {
|
||||
start = n - b
|
||||
r = mid
|
||||
} else {
|
||||
start = a
|
||||
r = m
|
||||
}
|
||||
p := n - 1
|
||||
|
||||
for start < r {
|
||||
c := int(uint(start+r) >> 1)
|
||||
if !cmpLess(data[p-c], data[c]) {
|
||||
start = c + 1
|
||||
} else {
|
||||
r = c
|
||||
}
|
||||
}
|
||||
|
||||
end := n - start
|
||||
if start < m && m < end {
|
||||
rotateOrdered(data, start, m, end)
|
||||
}
|
||||
if a < start && start < mid {
|
||||
symMergeOrdered(data, a, start, mid)
|
||||
}
|
||||
if mid < end && end < b {
|
||||
symMergeOrdered(data, mid, end, b)
|
||||
}
|
||||
}
|
||||
|
||||
// rotateOrdered rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data:
|
||||
// Data of the form 'x u v y' is changed to 'x v u y'.
|
||||
// rotate performs at most b-a many calls to data.Swap,
|
||||
// and it assumes non-degenerate arguments: a < m && m < b.
|
||||
func rotateOrdered[E constraints.Ordered](data []E, a, m, b int) {
|
||||
i := m - a
|
||||
j := b - m
|
||||
|
||||
for i != j {
|
||||
if i > j {
|
||||
swapRangeOrdered(data, m-i, m, j)
|
||||
i -= j
|
||||
} else {
|
||||
swapRangeOrdered(data, m-i, m+j-i, i)
|
||||
j -= i
|
||||
}
|
||||
}
|
||||
// i == j
|
||||
swapRangeOrdered(data, m-i, m, i)
|
||||
}
|
||||
102
vendor/golang.org/x/exp/slog/attr.go
generated
vendored
Normal file
102
vendor/golang.org/x/exp/slog/attr.go
generated
vendored
Normal file
@@ -0,0 +1,102 @@
|
||||
// Copyright 2022 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package slog
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
// An Attr is a key-value pair.
|
||||
type Attr struct {
|
||||
Key string
|
||||
Value Value
|
||||
}
|
||||
|
||||
// String returns an Attr for a string value.
|
||||
func String(key, value string) Attr {
|
||||
return Attr{key, StringValue(value)}
|
||||
}
|
||||
|
||||
// Int64 returns an Attr for an int64.
|
||||
func Int64(key string, value int64) Attr {
|
||||
return Attr{key, Int64Value(value)}
|
||||
}
|
||||
|
||||
// Int converts an int to an int64 and returns
|
||||
// an Attr with that value.
|
||||
func Int(key string, value int) Attr {
|
||||
return Int64(key, int64(value))
|
||||
}
|
||||
|
||||
// Uint64 returns an Attr for a uint64.
|
||||
func Uint64(key string, v uint64) Attr {
|
||||
return Attr{key, Uint64Value(v)}
|
||||
}
|
||||
|
||||
// Float64 returns an Attr for a floating-point number.
|
||||
func Float64(key string, v float64) Attr {
|
||||
return Attr{key, Float64Value(v)}
|
||||
}
|
||||
|
||||
// Bool returns an Attr for a bool.
|
||||
func Bool(key string, v bool) Attr {
|
||||
return Attr{key, BoolValue(v)}
|
||||
}
|
||||
|
||||
// Time returns an Attr for a time.Time.
|
||||
// It discards the monotonic portion.
|
||||
func Time(key string, v time.Time) Attr {
|
||||
return Attr{key, TimeValue(v)}
|
||||
}
|
||||
|
||||
// Duration returns an Attr for a time.Duration.
|
||||
func Duration(key string, v time.Duration) Attr {
|
||||
return Attr{key, DurationValue(v)}
|
||||
}
|
||||
|
||||
// Group returns an Attr for a Group Value.
|
||||
// The first argument is the key; the remaining arguments
|
||||
// are converted to Attrs as in [Logger.Log].
|
||||
//
|
||||
// Use Group to collect several key-value pairs under a single
|
||||
// key on a log line, or as the result of LogValue
|
||||
// in order to log a single value as multiple Attrs.
|
||||
func Group(key string, args ...any) Attr {
|
||||
return Attr{key, GroupValue(argsToAttrSlice(args)...)}
|
||||
}
|
||||
|
||||
func argsToAttrSlice(args []any) []Attr {
|
||||
var (
|
||||
attr Attr
|
||||
attrs []Attr
|
||||
)
|
||||
for len(args) > 0 {
|
||||
attr, args = argsToAttr(args)
|
||||
attrs = append(attrs, attr)
|
||||
}
|
||||
return attrs
|
||||
}
|
||||
|
||||
// Any returns an Attr for the supplied value.
|
||||
// See [Value.AnyValue] for how values are treated.
|
||||
func Any(key string, value any) Attr {
|
||||
return Attr{key, AnyValue(value)}
|
||||
}
|
||||
|
||||
// Equal reports whether a and b have equal keys and values.
|
||||
func (a Attr) Equal(b Attr) bool {
|
||||
return a.Key == b.Key && a.Value.Equal(b.Value)
|
||||
}
|
||||
|
||||
func (a Attr) String() string {
|
||||
return fmt.Sprintf("%s=%s", a.Key, a.Value)
|
||||
}
|
||||
|
||||
// isEmpty reports whether a has an empty key and a nil value.
|
||||
// That can be written as Attr{} or Any("", nil).
|
||||
func (a Attr) isEmpty() bool {
|
||||
return a.Key == "" && a.Value.num == 0 && a.Value.any == nil
|
||||
}
|
||||
316
vendor/golang.org/x/exp/slog/doc.go
generated
vendored
Normal file
316
vendor/golang.org/x/exp/slog/doc.go
generated
vendored
Normal file
@@ -0,0 +1,316 @@
|
||||
// Copyright 2022 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
Package slog provides structured logging,
|
||||
in which log records include a message,
|
||||
a severity level, and various other attributes
|
||||
expressed as key-value pairs.
|
||||
|
||||
It defines a type, [Logger],
|
||||
which provides several methods (such as [Logger.Info] and [Logger.Error])
|
||||
for reporting events of interest.
|
||||
|
||||
Each Logger is associated with a [Handler].
|
||||
A Logger output method creates a [Record] from the method arguments
|
||||
and passes it to the Handler, which decides how to handle it.
|
||||
There is a default Logger accessible through top-level functions
|
||||
(such as [Info] and [Error]) that call the corresponding Logger methods.
|
||||
|
||||
A log record consists of a time, a level, a message, and a set of key-value
|
||||
pairs, where the keys are strings and the values may be of any type.
|
||||
As an example,
|
||||
|
||||
slog.Info("hello", "count", 3)
|
||||
|
||||
creates a record containing the time of the call,
|
||||
a level of Info, the message "hello", and a single
|
||||
pair with key "count" and value 3.
|
||||
|
||||
The [Info] top-level function calls the [Logger.Info] method on the default Logger.
|
||||
In addition to [Logger.Info], there are methods for Debug, Warn and Error levels.
|
||||
Besides these convenience methods for common levels,
|
||||
there is also a [Logger.Log] method which takes the level as an argument.
|
||||
Each of these methods has a corresponding top-level function that uses the
|
||||
default logger.
|
||||
|
||||
The default handler formats the log record's message, time, level, and attributes
|
||||
as a string and passes it to the [log] package.
|
||||
|
||||
2022/11/08 15:28:26 INFO hello count=3
|
||||
|
||||
For more control over the output format, create a logger with a different handler.
|
||||
This statement uses [New] to create a new logger with a TextHandler
|
||||
that writes structured records in text form to standard error:
|
||||
|
||||
logger := slog.New(slog.NewTextHandler(os.Stderr, nil))
|
||||
|
||||
[TextHandler] output is a sequence of key=value pairs, easily and unambiguously
|
||||
parsed by machine. This statement:
|
||||
|
||||
logger.Info("hello", "count", 3)
|
||||
|
||||
produces this output:
|
||||
|
||||
time=2022-11-08T15:28:26.000-05:00 level=INFO msg=hello count=3
|
||||
|
||||
The package also provides [JSONHandler], whose output is line-delimited JSON:
|
||||
|
||||
logger := slog.New(slog.NewJSONHandler(os.Stdout, nil))
|
||||
logger.Info("hello", "count", 3)
|
||||
|
||||
produces this output:
|
||||
|
||||
{"time":"2022-11-08T15:28:26.000000000-05:00","level":"INFO","msg":"hello","count":3}
|
||||
|
||||
Both [TextHandler] and [JSONHandler] can be configured with [HandlerOptions].
|
||||
There are options for setting the minimum level (see Levels, below),
|
||||
displaying the source file and line of the log call, and
|
||||
modifying attributes before they are logged.
|
||||
|
||||
Setting a logger as the default with
|
||||
|
||||
slog.SetDefault(logger)
|
||||
|
||||
will cause the top-level functions like [Info] to use it.
|
||||
[SetDefault] also updates the default logger used by the [log] package,
|
||||
so that existing applications that use [log.Printf] and related functions
|
||||
will send log records to the logger's handler without needing to be rewritten.
|
||||
|
||||
Some attributes are common to many log calls.
|
||||
For example, you may wish to include the URL or trace identifier of a server request
|
||||
with all log events arising from the request.
|
||||
Rather than repeat the attribute with every log call, you can use [Logger.With]
|
||||
to construct a new Logger containing the attributes:
|
||||
|
||||
logger2 := logger.With("url", r.URL)
|
||||
|
||||
The arguments to With are the same key-value pairs used in [Logger.Info].
|
||||
The result is a new Logger with the same handler as the original, but additional
|
||||
attributes that will appear in the output of every call.
|
||||
|
||||
# Levels
|
||||
|
||||
A [Level] is an integer representing the importance or severity of a log event.
|
||||
The higher the level, the more severe the event.
|
||||
This package defines constants for the most common levels,
|
||||
but any int can be used as a level.
|
||||
|
||||
In an application, you may wish to log messages only at a certain level or greater.
|
||||
One common configuration is to log messages at Info or higher levels,
|
||||
suppressing debug logging until it is needed.
|
||||
The built-in handlers can be configured with the minimum level to output by
|
||||
setting [HandlerOptions.Level].
|
||||
The program's `main` function typically does this.
|
||||
The default value is LevelInfo.
|
||||
|
||||
Setting the [HandlerOptions.Level] field to a [Level] value
|
||||
fixes the handler's minimum level throughout its lifetime.
|
||||
Setting it to a [LevelVar] allows the level to be varied dynamically.
|
||||
A LevelVar holds a Level and is safe to read or write from multiple
|
||||
goroutines.
|
||||
To vary the level dynamically for an entire program, first initialize
|
||||
a global LevelVar:
|
||||
|
||||
var programLevel = new(slog.LevelVar) // Info by default
|
||||
|
||||
Then use the LevelVar to construct a handler, and make it the default:
|
||||
|
||||
h := slog.NewJSONHandler(os.Stderr, &slog.HandlerOptions{Level: programLevel})
|
||||
slog.SetDefault(slog.New(h))
|
||||
|
||||
Now the program can change its logging level with a single statement:
|
||||
|
||||
programLevel.Set(slog.LevelDebug)
|
||||
|
||||
# Groups
|
||||
|
||||
Attributes can be collected into groups.
|
||||
A group has a name that is used to qualify the names of its attributes.
|
||||
How this qualification is displayed depends on the handler.
|
||||
[TextHandler] separates the group and attribute names with a dot.
|
||||
[JSONHandler] treats each group as a separate JSON object, with the group name as the key.
|
||||
|
||||
Use [Group] to create a Group attribute from a name and a list of key-value pairs:
|
||||
|
||||
slog.Group("request",
|
||||
"method", r.Method,
|
||||
"url", r.URL)
|
||||
|
||||
TextHandler would display this group as
|
||||
|
||||
request.method=GET request.url=http://example.com
|
||||
|
||||
JSONHandler would display it as
|
||||
|
||||
"request":{"method":"GET","url":"http://example.com"}
|
||||
|
||||
Use [Logger.WithGroup] to qualify all of a Logger's output
|
||||
with a group name. Calling WithGroup on a Logger results in a
|
||||
new Logger with the same Handler as the original, but with all
|
||||
its attributes qualified by the group name.
|
||||
|
||||
This can help prevent duplicate attribute keys in large systems,
|
||||
where subsystems might use the same keys.
|
||||
Pass each subsystem a different Logger with its own group name so that
|
||||
potential duplicates are qualified:
|
||||
|
||||
logger := slog.Default().With("id", systemID)
|
||||
parserLogger := logger.WithGroup("parser")
|
||||
parseInput(input, parserLogger)
|
||||
|
||||
When parseInput logs with parserLogger, its keys will be qualified with "parser",
|
||||
so even if it uses the common key "id", the log line will have distinct keys.
|
||||
|
||||
# Contexts
|
||||
|
||||
Some handlers may wish to include information from the [context.Context] that is
|
||||
available at the call site. One example of such information
|
||||
is the identifier for the current span when tracing is enabled.
|
||||
|
||||
The [Logger.Log] and [Logger.LogAttrs] methods take a context as a first
|
||||
argument, as do their corresponding top-level functions.
|
||||
|
||||
Although the convenience methods on Logger (Info and so on) and the
|
||||
corresponding top-level functions do not take a context, the alternatives ending
|
||||
in "Context" do. For example,
|
||||
|
||||
slog.InfoContext(ctx, "message")
|
||||
|
||||
It is recommended to pass a context to an output method if one is available.
|
||||
|
||||
# Attrs and Values
|
||||
|
||||
An [Attr] is a key-value pair. The Logger output methods accept Attrs as well as
|
||||
alternating keys and values. The statement
|
||||
|
||||
slog.Info("hello", slog.Int("count", 3))
|
||||
|
||||
behaves the same as
|
||||
|
||||
slog.Info("hello", "count", 3)
|
||||
|
||||
There are convenience constructors for [Attr] such as [Int], [String], and [Bool]
|
||||
for common types, as well as the function [Any] for constructing Attrs of any
|
||||
type.
|
||||
|
||||
The value part of an Attr is a type called [Value].
|
||||
Like an [any], a Value can hold any Go value,
|
||||
but it can represent typical values, including all numbers and strings,
|
||||
without an allocation.
|
||||
|
||||
For the most efficient log output, use [Logger.LogAttrs].
|
||||
It is similar to [Logger.Log] but accepts only Attrs, not alternating
|
||||
keys and values; this allows it, too, to avoid allocation.
|
||||
|
||||
The call
|
||||
|
||||
logger.LogAttrs(nil, slog.LevelInfo, "hello", slog.Int("count", 3))
|
||||
|
||||
is the most efficient way to achieve the same output as
|
||||
|
||||
slog.Info("hello", "count", 3)
|
||||
|
||||
# Customizing a type's logging behavior
|
||||
|
||||
If a type implements the [LogValuer] interface, the [Value] returned from its LogValue
|
||||
method is used for logging. You can use this to control how values of the type
|
||||
appear in logs. For example, you can redact secret information like passwords,
|
||||
or gather a struct's fields in a Group. See the examples under [LogValuer] for
|
||||
details.
|
||||
|
||||
A LogValue method may return a Value that itself implements [LogValuer]. The [Value.Resolve]
|
||||
method handles these cases carefully, avoiding infinite loops and unbounded recursion.
|
||||
Handler authors and others may wish to use Value.Resolve instead of calling LogValue directly.
|
||||
|
||||
# Wrapping output methods
|
||||
|
||||
The logger functions use reflection over the call stack to find the file name
|
||||
and line number of the logging call within the application. This can produce
|
||||
incorrect source information for functions that wrap slog. For instance, if you
|
||||
define this function in file mylog.go:
|
||||
|
||||
func Infof(format string, args ...any) {
|
||||
slog.Default().Info(fmt.Sprintf(format, args...))
|
||||
}
|
||||
|
||||
and you call it like this in main.go:
|
||||
|
||||
Infof(slog.Default(), "hello, %s", "world")
|
||||
|
||||
then slog will report the source file as mylog.go, not main.go.
|
||||
|
||||
A correct implementation of Infof will obtain the source location
|
||||
(pc) and pass it to NewRecord.
|
||||
The Infof function in the package-level example called "wrapping"
|
||||
demonstrates how to do this.
|
||||
|
||||
# Working with Records
|
||||
|
||||
Sometimes a Handler will need to modify a Record
|
||||
before passing it on to another Handler or backend.
|
||||
A Record contains a mixture of simple public fields (e.g. Time, Level, Message)
|
||||
and hidden fields that refer to state (such as attributes) indirectly. This
|
||||
means that modifying a simple copy of a Record (e.g. by calling
|
||||
[Record.Add] or [Record.AddAttrs] to add attributes)
|
||||
may have unexpected effects on the original.
|
||||
Before modifying a Record, use [Clone] to
|
||||
create a copy that shares no state with the original,
|
||||
or create a new Record with [NewRecord]
|
||||
and build up its Attrs by traversing the old ones with [Record.Attrs].
|
||||
|
||||
# Performance considerations
|
||||
|
||||
If profiling your application demonstrates that logging is taking significant time,
|
||||
the following suggestions may help.
|
||||
|
||||
If many log lines have a common attribute, use [Logger.With] to create a Logger with
|
||||
that attribute. The built-in handlers will format that attribute only once, at the
|
||||
call to [Logger.With]. The [Handler] interface is designed to allow that optimization,
|
||||
and a well-written Handler should take advantage of it.
|
||||
|
||||
The arguments to a log call are always evaluated, even if the log event is discarded.
|
||||
If possible, defer computation so that it happens only if the value is actually logged.
|
||||
For example, consider the call
|
||||
|
||||
slog.Info("starting request", "url", r.URL.String()) // may compute String unnecessarily
|
||||
|
||||
The URL.String method will be called even if the logger discards Info-level events.
|
||||
Instead, pass the URL directly:
|
||||
|
||||
slog.Info("starting request", "url", &r.URL) // calls URL.String only if needed
|
||||
|
||||
The built-in [TextHandler] will call its String method, but only
|
||||
if the log event is enabled.
|
||||
Avoiding the call to String also preserves the structure of the underlying value.
|
||||
For example [JSONHandler] emits the components of the parsed URL as a JSON object.
|
||||
If you want to avoid eagerly paying the cost of the String call
|
||||
without causing the handler to potentially inspect the structure of the value,
|
||||
wrap the value in a fmt.Stringer implementation that hides its Marshal methods.
|
||||
|
||||
You can also use the [LogValuer] interface to avoid unnecessary work in disabled log
|
||||
calls. Say you need to log some expensive value:
|
||||
|
||||
slog.Debug("frobbing", "value", computeExpensiveValue(arg))
|
||||
|
||||
Even if this line is disabled, computeExpensiveValue will be called.
|
||||
To avoid that, define a type implementing LogValuer:
|
||||
|
||||
type expensive struct { arg int }
|
||||
|
||||
func (e expensive) LogValue() slog.Value {
|
||||
return slog.AnyValue(computeExpensiveValue(e.arg))
|
||||
}
|
||||
|
||||
Then use a value of that type in log calls:
|
||||
|
||||
slog.Debug("frobbing", "value", expensive{arg})
|
||||
|
||||
Now computeExpensiveValue will only be called when the line is enabled.
|
||||
|
||||
The built-in handlers acquire a lock before calling [io.Writer.Write]
|
||||
to ensure that each record is written in one piece. User-defined
|
||||
handlers are responsible for their own locking.
|
||||
*/
|
||||
package slog
|
||||
577
vendor/golang.org/x/exp/slog/handler.go
generated
vendored
Normal file
577
vendor/golang.org/x/exp/slog/handler.go
generated
vendored
Normal file
@@ -0,0 +1,577 @@
|
||||
// Copyright 2022 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package slog
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/exp/slices"
|
||||
"golang.org/x/exp/slog/internal/buffer"
|
||||
)
|
||||
|
||||
// A Handler handles log records produced by a Logger..
|
||||
//
|
||||
// A typical handler may print log records to standard error,
|
||||
// or write them to a file or database, or perhaps augment them
|
||||
// with additional attributes and pass them on to another handler.
|
||||
//
|
||||
// Any of the Handler's methods may be called concurrently with itself
|
||||
// or with other methods. It is the responsibility of the Handler to
|
||||
// manage this concurrency.
|
||||
//
|
||||
// Users of the slog package should not invoke Handler methods directly.
|
||||
// They should use the methods of [Logger] instead.
|
||||
type Handler interface {
|
||||
// Enabled reports whether the handler handles records at the given level.
|
||||
// The handler ignores records whose level is lower.
|
||||
// It is called early, before any arguments are processed,
|
||||
// to save effort if the log event should be discarded.
|
||||
// If called from a Logger method, the first argument is the context
|
||||
// passed to that method, or context.Background() if nil was passed
|
||||
// or the method does not take a context.
|
||||
// The context is passed so Enabled can use its values
|
||||
// to make a decision.
|
||||
Enabled(context.Context, Level) bool
|
||||
|
||||
// Handle handles the Record.
|
||||
// It will only be called when Enabled returns true.
|
||||
// The Context argument is as for Enabled.
|
||||
// It is present solely to provide Handlers access to the context's values.
|
||||
// Canceling the context should not affect record processing.
|
||||
// (Among other things, log messages may be necessary to debug a
|
||||
// cancellation-related problem.)
|
||||
//
|
||||
// Handle methods that produce output should observe the following rules:
|
||||
// - If r.Time is the zero time, ignore the time.
|
||||
// - If r.PC is zero, ignore it.
|
||||
// - Attr's values should be resolved.
|
||||
// - If an Attr's key and value are both the zero value, ignore the Attr.
|
||||
// This can be tested with attr.Equal(Attr{}).
|
||||
// - If a group's key is empty, inline the group's Attrs.
|
||||
// - If a group has no Attrs (even if it has a non-empty key),
|
||||
// ignore it.
|
||||
Handle(context.Context, Record) error
|
||||
|
||||
// WithAttrs returns a new Handler whose attributes consist of
|
||||
// both the receiver's attributes and the arguments.
|
||||
// The Handler owns the slice: it may retain, modify or discard it.
|
||||
WithAttrs(attrs []Attr) Handler
|
||||
|
||||
// WithGroup returns a new Handler with the given group appended to
|
||||
// the receiver's existing groups.
|
||||
// The keys of all subsequent attributes, whether added by With or in a
|
||||
// Record, should be qualified by the sequence of group names.
|
||||
//
|
||||
// How this qualification happens is up to the Handler, so long as
|
||||
// this Handler's attribute keys differ from those of another Handler
|
||||
// with a different sequence of group names.
|
||||
//
|
||||
// A Handler should treat WithGroup as starting a Group of Attrs that ends
|
||||
// at the end of the log event. That is,
|
||||
//
|
||||
// logger.WithGroup("s").LogAttrs(level, msg, slog.Int("a", 1), slog.Int("b", 2))
|
||||
//
|
||||
// should behave like
|
||||
//
|
||||
// logger.LogAttrs(level, msg, slog.Group("s", slog.Int("a", 1), slog.Int("b", 2)))
|
||||
//
|
||||
// If the name is empty, WithGroup returns the receiver.
|
||||
WithGroup(name string) Handler
|
||||
}
|
||||
|
||||
type defaultHandler struct {
|
||||
ch *commonHandler
|
||||
// log.Output, except for testing
|
||||
output func(calldepth int, message string) error
|
||||
}
|
||||
|
||||
func newDefaultHandler(output func(int, string) error) *defaultHandler {
|
||||
return &defaultHandler{
|
||||
ch: &commonHandler{json: false},
|
||||
output: output,
|
||||
}
|
||||
}
|
||||
|
||||
func (*defaultHandler) Enabled(_ context.Context, l Level) bool {
|
||||
return l >= LevelInfo
|
||||
}
|
||||
|
||||
// Collect the level, attributes and message in a string and
|
||||
// write it with the default log.Logger.
|
||||
// Let the log.Logger handle time and file/line.
|
||||
func (h *defaultHandler) Handle(ctx context.Context, r Record) error {
|
||||
buf := buffer.New()
|
||||
buf.WriteString(r.Level.String())
|
||||
buf.WriteByte(' ')
|
||||
buf.WriteString(r.Message)
|
||||
state := h.ch.newHandleState(buf, true, " ", nil)
|
||||
defer state.free()
|
||||
state.appendNonBuiltIns(r)
|
||||
|
||||
// skip [h.output, defaultHandler.Handle, handlerWriter.Write, log.Output]
|
||||
return h.output(4, buf.String())
|
||||
}
|
||||
|
||||
func (h *defaultHandler) WithAttrs(as []Attr) Handler {
|
||||
return &defaultHandler{h.ch.withAttrs(as), h.output}
|
||||
}
|
||||
|
||||
func (h *defaultHandler) WithGroup(name string) Handler {
|
||||
return &defaultHandler{h.ch.withGroup(name), h.output}
|
||||
}
|
||||
|
||||
// HandlerOptions are options for a TextHandler or JSONHandler.
|
||||
// A zero HandlerOptions consists entirely of default values.
|
||||
type HandlerOptions struct {
|
||||
// AddSource causes the handler to compute the source code position
|
||||
// of the log statement and add a SourceKey attribute to the output.
|
||||
AddSource bool
|
||||
|
||||
// Level reports the minimum record level that will be logged.
|
||||
// The handler discards records with lower levels.
|
||||
// If Level is nil, the handler assumes LevelInfo.
|
||||
// The handler calls Level.Level for each record processed;
|
||||
// to adjust the minimum level dynamically, use a LevelVar.
|
||||
Level Leveler
|
||||
|
||||
// ReplaceAttr is called to rewrite each non-group attribute before it is logged.
|
||||
// The attribute's value has been resolved (see [Value.Resolve]).
|
||||
// If ReplaceAttr returns an Attr with Key == "", the attribute is discarded.
|
||||
//
|
||||
// The built-in attributes with keys "time", "level", "source", and "msg"
|
||||
// are passed to this function, except that time is omitted
|
||||
// if zero, and source is omitted if AddSource is false.
|
||||
//
|
||||
// The first argument is a list of currently open groups that contain the
|
||||
// Attr. It must not be retained or modified. ReplaceAttr is never called
|
||||
// for Group attributes, only their contents. For example, the attribute
|
||||
// list
|
||||
//
|
||||
// Int("a", 1), Group("g", Int("b", 2)), Int("c", 3)
|
||||
//
|
||||
// results in consecutive calls to ReplaceAttr with the following arguments:
|
||||
//
|
||||
// nil, Int("a", 1)
|
||||
// []string{"g"}, Int("b", 2)
|
||||
// nil, Int("c", 3)
|
||||
//
|
||||
// ReplaceAttr can be used to change the default keys of the built-in
|
||||
// attributes, convert types (for example, to replace a `time.Time` with the
|
||||
// integer seconds since the Unix epoch), sanitize personal information, or
|
||||
// remove attributes from the output.
|
||||
ReplaceAttr func(groups []string, a Attr) Attr
|
||||
}
|
||||
|
||||
// Keys for "built-in" attributes.
|
||||
const (
|
||||
// TimeKey is the key used by the built-in handlers for the time
|
||||
// when the log method is called. The associated Value is a [time.Time].
|
||||
TimeKey = "time"
|
||||
// LevelKey is the key used by the built-in handlers for the level
|
||||
// of the log call. The associated value is a [Level].
|
||||
LevelKey = "level"
|
||||
// MessageKey is the key used by the built-in handlers for the
|
||||
// message of the log call. The associated value is a string.
|
||||
MessageKey = "msg"
|
||||
// SourceKey is the key used by the built-in handlers for the source file
|
||||
// and line of the log call. The associated value is a string.
|
||||
SourceKey = "source"
|
||||
)
|
||||
|
||||
type commonHandler struct {
|
||||
json bool // true => output JSON; false => output text
|
||||
opts HandlerOptions
|
||||
preformattedAttrs []byte
|
||||
groupPrefix string // for text: prefix of groups opened in preformatting
|
||||
groups []string // all groups started from WithGroup
|
||||
nOpenGroups int // the number of groups opened in preformattedAttrs
|
||||
mu sync.Mutex
|
||||
w io.Writer
|
||||
}
|
||||
|
||||
func (h *commonHandler) clone() *commonHandler {
|
||||
// We can't use assignment because we can't copy the mutex.
|
||||
return &commonHandler{
|
||||
json: h.json,
|
||||
opts: h.opts,
|
||||
preformattedAttrs: slices.Clip(h.preformattedAttrs),
|
||||
groupPrefix: h.groupPrefix,
|
||||
groups: slices.Clip(h.groups),
|
||||
nOpenGroups: h.nOpenGroups,
|
||||
w: h.w,
|
||||
}
|
||||
}
|
||||
|
||||
// enabled reports whether l is greater than or equal to the
|
||||
// minimum level.
|
||||
func (h *commonHandler) enabled(l Level) bool {
|
||||
minLevel := LevelInfo
|
||||
if h.opts.Level != nil {
|
||||
minLevel = h.opts.Level.Level()
|
||||
}
|
||||
return l >= minLevel
|
||||
}
|
||||
|
||||
func (h *commonHandler) withAttrs(as []Attr) *commonHandler {
|
||||
h2 := h.clone()
|
||||
// Pre-format the attributes as an optimization.
|
||||
prefix := buffer.New()
|
||||
defer prefix.Free()
|
||||
prefix.WriteString(h.groupPrefix)
|
||||
state := h2.newHandleState((*buffer.Buffer)(&h2.preformattedAttrs), false, "", prefix)
|
||||
defer state.free()
|
||||
if len(h2.preformattedAttrs) > 0 {
|
||||
state.sep = h.attrSep()
|
||||
}
|
||||
state.openGroups()
|
||||
for _, a := range as {
|
||||
state.appendAttr(a)
|
||||
}
|
||||
// Remember the new prefix for later keys.
|
||||
h2.groupPrefix = state.prefix.String()
|
||||
// Remember how many opened groups are in preformattedAttrs,
|
||||
// so we don't open them again when we handle a Record.
|
||||
h2.nOpenGroups = len(h2.groups)
|
||||
return h2
|
||||
}
|
||||
|
||||
func (h *commonHandler) withGroup(name string) *commonHandler {
|
||||
if name == "" {
|
||||
return h
|
||||
}
|
||||
h2 := h.clone()
|
||||
h2.groups = append(h2.groups, name)
|
||||
return h2
|
||||
}
|
||||
|
||||
func (h *commonHandler) handle(r Record) error {
|
||||
state := h.newHandleState(buffer.New(), true, "", nil)
|
||||
defer state.free()
|
||||
if h.json {
|
||||
state.buf.WriteByte('{')
|
||||
}
|
||||
// Built-in attributes. They are not in a group.
|
||||
stateGroups := state.groups
|
||||
state.groups = nil // So ReplaceAttrs sees no groups instead of the pre groups.
|
||||
rep := h.opts.ReplaceAttr
|
||||
// time
|
||||
if !r.Time.IsZero() {
|
||||
key := TimeKey
|
||||
val := r.Time.Round(0) // strip monotonic to match Attr behavior
|
||||
if rep == nil {
|
||||
state.appendKey(key)
|
||||
state.appendTime(val)
|
||||
} else {
|
||||
state.appendAttr(Time(key, val))
|
||||
}
|
||||
}
|
||||
// level
|
||||
key := LevelKey
|
||||
val := r.Level
|
||||
if rep == nil {
|
||||
state.appendKey(key)
|
||||
state.appendString(val.String())
|
||||
} else {
|
||||
state.appendAttr(Any(key, val))
|
||||
}
|
||||
// source
|
||||
if h.opts.AddSource {
|
||||
state.appendAttr(Any(SourceKey, r.source()))
|
||||
}
|
||||
key = MessageKey
|
||||
msg := r.Message
|
||||
if rep == nil {
|
||||
state.appendKey(key)
|
||||
state.appendString(msg)
|
||||
} else {
|
||||
state.appendAttr(String(key, msg))
|
||||
}
|
||||
state.groups = stateGroups // Restore groups passed to ReplaceAttrs.
|
||||
state.appendNonBuiltIns(r)
|
||||
state.buf.WriteByte('\n')
|
||||
|
||||
h.mu.Lock()
|
||||
defer h.mu.Unlock()
|
||||
_, err := h.w.Write(*state.buf)
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *handleState) appendNonBuiltIns(r Record) {
|
||||
// preformatted Attrs
|
||||
if len(s.h.preformattedAttrs) > 0 {
|
||||
s.buf.WriteString(s.sep)
|
||||
s.buf.Write(s.h.preformattedAttrs)
|
||||
s.sep = s.h.attrSep()
|
||||
}
|
||||
// Attrs in Record -- unlike the built-in ones, they are in groups started
|
||||
// from WithGroup.
|
||||
s.prefix = buffer.New()
|
||||
defer s.prefix.Free()
|
||||
s.prefix.WriteString(s.h.groupPrefix)
|
||||
s.openGroups()
|
||||
r.Attrs(func(a Attr) bool {
|
||||
s.appendAttr(a)
|
||||
return true
|
||||
})
|
||||
if s.h.json {
|
||||
// Close all open groups.
|
||||
for range s.h.groups {
|
||||
s.buf.WriteByte('}')
|
||||
}
|
||||
// Close the top-level object.
|
||||
s.buf.WriteByte('}')
|
||||
}
|
||||
}
|
||||
|
||||
// attrSep returns the separator between attributes.
|
||||
func (h *commonHandler) attrSep() string {
|
||||
if h.json {
|
||||
return ","
|
||||
}
|
||||
return " "
|
||||
}
|
||||
|
||||
// handleState holds state for a single call to commonHandler.handle.
|
||||
// The initial value of sep determines whether to emit a separator
|
||||
// before the next key, after which it stays true.
|
||||
type handleState struct {
|
||||
h *commonHandler
|
||||
buf *buffer.Buffer
|
||||
freeBuf bool // should buf be freed?
|
||||
sep string // separator to write before next key
|
||||
prefix *buffer.Buffer // for text: key prefix
|
||||
groups *[]string // pool-allocated slice of active groups, for ReplaceAttr
|
||||
}
|
||||
|
||||
var groupPool = sync.Pool{New: func() any {
|
||||
s := make([]string, 0, 10)
|
||||
return &s
|
||||
}}
|
||||
|
||||
func (h *commonHandler) newHandleState(buf *buffer.Buffer, freeBuf bool, sep string, prefix *buffer.Buffer) handleState {
|
||||
s := handleState{
|
||||
h: h,
|
||||
buf: buf,
|
||||
freeBuf: freeBuf,
|
||||
sep: sep,
|
||||
prefix: prefix,
|
||||
}
|
||||
if h.opts.ReplaceAttr != nil {
|
||||
s.groups = groupPool.Get().(*[]string)
|
||||
*s.groups = append(*s.groups, h.groups[:h.nOpenGroups]...)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *handleState) free() {
|
||||
if s.freeBuf {
|
||||
s.buf.Free()
|
||||
}
|
||||
if gs := s.groups; gs != nil {
|
||||
*gs = (*gs)[:0]
|
||||
groupPool.Put(gs)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *handleState) openGroups() {
|
||||
for _, n := range s.h.groups[s.h.nOpenGroups:] {
|
||||
s.openGroup(n)
|
||||
}
|
||||
}
|
||||
|
||||
// Separator for group names and keys.
|
||||
const keyComponentSep = '.'
|
||||
|
||||
// openGroup starts a new group of attributes
|
||||
// with the given name.
|
||||
func (s *handleState) openGroup(name string) {
|
||||
if s.h.json {
|
||||
s.appendKey(name)
|
||||
s.buf.WriteByte('{')
|
||||
s.sep = ""
|
||||
} else {
|
||||
s.prefix.WriteString(name)
|
||||
s.prefix.WriteByte(keyComponentSep)
|
||||
}
|
||||
// Collect group names for ReplaceAttr.
|
||||
if s.groups != nil {
|
||||
*s.groups = append(*s.groups, name)
|
||||
}
|
||||
}
|
||||
|
||||
// closeGroup ends the group with the given name.
|
||||
func (s *handleState) closeGroup(name string) {
|
||||
if s.h.json {
|
||||
s.buf.WriteByte('}')
|
||||
} else {
|
||||
(*s.prefix) = (*s.prefix)[:len(*s.prefix)-len(name)-1 /* for keyComponentSep */]
|
||||
}
|
||||
s.sep = s.h.attrSep()
|
||||
if s.groups != nil {
|
||||
*s.groups = (*s.groups)[:len(*s.groups)-1]
|
||||
}
|
||||
}
|
||||
|
||||
// appendAttr appends the Attr's key and value using app.
|
||||
// It handles replacement and checking for an empty key.
|
||||
// after replacement).
|
||||
func (s *handleState) appendAttr(a Attr) {
|
||||
if rep := s.h.opts.ReplaceAttr; rep != nil && a.Value.Kind() != KindGroup {
|
||||
var gs []string
|
||||
if s.groups != nil {
|
||||
gs = *s.groups
|
||||
}
|
||||
// Resolve before calling ReplaceAttr, so the user doesn't have to.
|
||||
a.Value = a.Value.Resolve()
|
||||
a = rep(gs, a)
|
||||
}
|
||||
a.Value = a.Value.Resolve()
|
||||
// Elide empty Attrs.
|
||||
if a.isEmpty() {
|
||||
return
|
||||
}
|
||||
// Special case: Source.
|
||||
if v := a.Value; v.Kind() == KindAny {
|
||||
if src, ok := v.Any().(*Source); ok {
|
||||
if s.h.json {
|
||||
a.Value = src.group()
|
||||
} else {
|
||||
a.Value = StringValue(fmt.Sprintf("%s:%d", src.File, src.Line))
|
||||
}
|
||||
}
|
||||
}
|
||||
if a.Value.Kind() == KindGroup {
|
||||
attrs := a.Value.Group()
|
||||
// Output only non-empty groups.
|
||||
if len(attrs) > 0 {
|
||||
// Inline a group with an empty key.
|
||||
if a.Key != "" {
|
||||
s.openGroup(a.Key)
|
||||
}
|
||||
for _, aa := range attrs {
|
||||
s.appendAttr(aa)
|
||||
}
|
||||
if a.Key != "" {
|
||||
s.closeGroup(a.Key)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
s.appendKey(a.Key)
|
||||
s.appendValue(a.Value)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *handleState) appendError(err error) {
|
||||
s.appendString(fmt.Sprintf("!ERROR:%v", err))
|
||||
}
|
||||
|
||||
func (s *handleState) appendKey(key string) {
|
||||
s.buf.WriteString(s.sep)
|
||||
if s.prefix != nil {
|
||||
// TODO: optimize by avoiding allocation.
|
||||
s.appendString(string(*s.prefix) + key)
|
||||
} else {
|
||||
s.appendString(key)
|
||||
}
|
||||
if s.h.json {
|
||||
s.buf.WriteByte(':')
|
||||
} else {
|
||||
s.buf.WriteByte('=')
|
||||
}
|
||||
s.sep = s.h.attrSep()
|
||||
}
|
||||
|
||||
func (s *handleState) appendString(str string) {
|
||||
if s.h.json {
|
||||
s.buf.WriteByte('"')
|
||||
*s.buf = appendEscapedJSONString(*s.buf, str)
|
||||
s.buf.WriteByte('"')
|
||||
} else {
|
||||
// text
|
||||
if needsQuoting(str) {
|
||||
*s.buf = strconv.AppendQuote(*s.buf, str)
|
||||
} else {
|
||||
s.buf.WriteString(str)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *handleState) appendValue(v Value) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
// If it panics with a nil pointer, the most likely cases are
|
||||
// an encoding.TextMarshaler or error fails to guard against nil,
|
||||
// in which case "<nil>" seems to be the feasible choice.
|
||||
//
|
||||
// Adapted from the code in fmt/print.go.
|
||||
if v := reflect.ValueOf(v.any); v.Kind() == reflect.Pointer && v.IsNil() {
|
||||
s.appendString("<nil>")
|
||||
return
|
||||
}
|
||||
|
||||
// Otherwise just print the original panic message.
|
||||
s.appendString(fmt.Sprintf("!PANIC: %v", r))
|
||||
}
|
||||
}()
|
||||
|
||||
var err error
|
||||
if s.h.json {
|
||||
err = appendJSONValue(s, v)
|
||||
} else {
|
||||
err = appendTextValue(s, v)
|
||||
}
|
||||
if err != nil {
|
||||
s.appendError(err)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *handleState) appendTime(t time.Time) {
|
||||
if s.h.json {
|
||||
appendJSONTime(s, t)
|
||||
} else {
|
||||
writeTimeRFC3339Millis(s.buf, t)
|
||||
}
|
||||
}
|
||||
|
||||
// This takes half the time of Time.AppendFormat.
|
||||
func writeTimeRFC3339Millis(buf *buffer.Buffer, t time.Time) {
|
||||
year, month, day := t.Date()
|
||||
buf.WritePosIntWidth(year, 4)
|
||||
buf.WriteByte('-')
|
||||
buf.WritePosIntWidth(int(month), 2)
|
||||
buf.WriteByte('-')
|
||||
buf.WritePosIntWidth(day, 2)
|
||||
buf.WriteByte('T')
|
||||
hour, min, sec := t.Clock()
|
||||
buf.WritePosIntWidth(hour, 2)
|
||||
buf.WriteByte(':')
|
||||
buf.WritePosIntWidth(min, 2)
|
||||
buf.WriteByte(':')
|
||||
buf.WritePosIntWidth(sec, 2)
|
||||
ns := t.Nanosecond()
|
||||
buf.WriteByte('.')
|
||||
buf.WritePosIntWidth(ns/1e6, 3)
|
||||
_, offsetSeconds := t.Zone()
|
||||
if offsetSeconds == 0 {
|
||||
buf.WriteByte('Z')
|
||||
} else {
|
||||
offsetMinutes := offsetSeconds / 60
|
||||
if offsetMinutes < 0 {
|
||||
buf.WriteByte('-')
|
||||
offsetMinutes = -offsetMinutes
|
||||
} else {
|
||||
buf.WriteByte('+')
|
||||
}
|
||||
buf.WritePosIntWidth(offsetMinutes/60, 2)
|
||||
buf.WriteByte(':')
|
||||
buf.WritePosIntWidth(offsetMinutes%60, 2)
|
||||
}
|
||||
}
|
||||
84
vendor/golang.org/x/exp/slog/internal/buffer/buffer.go
generated
vendored
Normal file
84
vendor/golang.org/x/exp/slog/internal/buffer/buffer.go
generated
vendored
Normal file
@@ -0,0 +1,84 @@
|
||||
// Copyright 2022 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package buffer provides a pool-allocated byte buffer.
|
||||
package buffer
|
||||
|
||||
import (
|
||||
"sync"
|
||||
)
|
||||
|
||||
// Buffer adapted from go/src/fmt/print.go
|
||||
type Buffer []byte
|
||||
|
||||
// Having an initial size gives a dramatic speedup.
|
||||
var bufPool = sync.Pool{
|
||||
New: func() any {
|
||||
b := make([]byte, 0, 1024)
|
||||
return (*Buffer)(&b)
|
||||
},
|
||||
}
|
||||
|
||||
func New() *Buffer {
|
||||
return bufPool.Get().(*Buffer)
|
||||
}
|
||||
|
||||
func (b *Buffer) Free() {
|
||||
// To reduce peak allocation, return only smaller buffers to the pool.
|
||||
const maxBufferSize = 16 << 10
|
||||
if cap(*b) <= maxBufferSize {
|
||||
*b = (*b)[:0]
|
||||
bufPool.Put(b)
|
||||
}
|
||||
}
|
||||
|
||||
func (b *Buffer) Reset() {
|
||||
*b = (*b)[:0]
|
||||
}
|
||||
|
||||
func (b *Buffer) Write(p []byte) (int, error) {
|
||||
*b = append(*b, p...)
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
func (b *Buffer) WriteString(s string) {
|
||||
*b = append(*b, s...)
|
||||
}
|
||||
|
||||
func (b *Buffer) WriteByte(c byte) {
|
||||
*b = append(*b, c)
|
||||
}
|
||||
|
||||
func (b *Buffer) WritePosInt(i int) {
|
||||
b.WritePosIntWidth(i, 0)
|
||||
}
|
||||
|
||||
// WritePosIntWidth writes non-negative integer i to the buffer, padded on the left
|
||||
// by zeroes to the given width. Use a width of 0 to omit padding.
|
||||
func (b *Buffer) WritePosIntWidth(i, width int) {
|
||||
// Cheap integer to fixed-width decimal ASCII.
|
||||
// Copied from log/log.go.
|
||||
|
||||
if i < 0 {
|
||||
panic("negative int")
|
||||
}
|
||||
|
||||
// Assemble decimal in reverse order.
|
||||
var bb [20]byte
|
||||
bp := len(bb) - 1
|
||||
for i >= 10 || width > 1 {
|
||||
width--
|
||||
q := i / 10
|
||||
bb[bp] = byte('0' + i - q*10)
|
||||
bp--
|
||||
i = q
|
||||
}
|
||||
// i < 10
|
||||
bb[bp] = byte('0' + i)
|
||||
b.Write(bb[bp:])
|
||||
}
|
||||
|
||||
func (b *Buffer) String() string {
|
||||
return string(*b)
|
||||
}
|
||||
9
vendor/golang.org/x/exp/slog/internal/ignorepc.go
generated
vendored
Normal file
9
vendor/golang.org/x/exp/slog/internal/ignorepc.go
generated
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
// Copyright 2023 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package internal
|
||||
|
||||
// If IgnorePC is true, do not invoke runtime.Callers to get the pc.
|
||||
// This is solely for benchmarking the slowdown from runtime.Callers.
|
||||
var IgnorePC = false
|
||||
336
vendor/golang.org/x/exp/slog/json_handler.go
generated
vendored
Normal file
336
vendor/golang.org/x/exp/slog/json_handler.go
generated
vendored
Normal file
@@ -0,0 +1,336 @@
|
||||
// Copyright 2022 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package slog
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"strconv"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
|
||||
"golang.org/x/exp/slog/internal/buffer"
|
||||
)
|
||||
|
||||
// JSONHandler is a Handler that writes Records to an io.Writer as
|
||||
// line-delimited JSON objects.
|
||||
type JSONHandler struct {
|
||||
*commonHandler
|
||||
}
|
||||
|
||||
// NewJSONHandler creates a JSONHandler that writes to w,
|
||||
// using the given options.
|
||||
// If opts is nil, the default options are used.
|
||||
func NewJSONHandler(w io.Writer, opts *HandlerOptions) *JSONHandler {
|
||||
if opts == nil {
|
||||
opts = &HandlerOptions{}
|
||||
}
|
||||
return &JSONHandler{
|
||||
&commonHandler{
|
||||
json: true,
|
||||
w: w,
|
||||
opts: *opts,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Enabled reports whether the handler handles records at the given level.
|
||||
// The handler ignores records whose level is lower.
|
||||
func (h *JSONHandler) Enabled(_ context.Context, level Level) bool {
|
||||
return h.commonHandler.enabled(level)
|
||||
}
|
||||
|
||||
// WithAttrs returns a new JSONHandler whose attributes consists
|
||||
// of h's attributes followed by attrs.
|
||||
func (h *JSONHandler) WithAttrs(attrs []Attr) Handler {
|
||||
return &JSONHandler{commonHandler: h.commonHandler.withAttrs(attrs)}
|
||||
}
|
||||
|
||||
func (h *JSONHandler) WithGroup(name string) Handler {
|
||||
return &JSONHandler{commonHandler: h.commonHandler.withGroup(name)}
|
||||
}
|
||||
|
||||
// Handle formats its argument Record as a JSON object on a single line.
|
||||
//
|
||||
// If the Record's time is zero, the time is omitted.
|
||||
// Otherwise, the key is "time"
|
||||
// and the value is output as with json.Marshal.
|
||||
//
|
||||
// If the Record's level is zero, the level is omitted.
|
||||
// Otherwise, the key is "level"
|
||||
// and the value of [Level.String] is output.
|
||||
//
|
||||
// If the AddSource option is set and source information is available,
|
||||
// the key is "source"
|
||||
// and the value is output as "FILE:LINE".
|
||||
//
|
||||
// The message's key is "msg".
|
||||
//
|
||||
// To modify these or other attributes, or remove them from the output, use
|
||||
// [HandlerOptions.ReplaceAttr].
|
||||
//
|
||||
// Values are formatted as with an [encoding/json.Encoder] with SetEscapeHTML(false),
|
||||
// with two exceptions.
|
||||
//
|
||||
// First, an Attr whose Value is of type error is formatted as a string, by
|
||||
// calling its Error method. Only errors in Attrs receive this special treatment,
|
||||
// not errors embedded in structs, slices, maps or other data structures that
|
||||
// are processed by the encoding/json package.
|
||||
//
|
||||
// Second, an encoding failure does not cause Handle to return an error.
|
||||
// Instead, the error message is formatted as a string.
|
||||
//
|
||||
// Each call to Handle results in a single serialized call to io.Writer.Write.
|
||||
func (h *JSONHandler) Handle(_ context.Context, r Record) error {
|
||||
return h.commonHandler.handle(r)
|
||||
}
|
||||
|
||||
// Adapted from time.Time.MarshalJSON to avoid allocation.
|
||||
func appendJSONTime(s *handleState, t time.Time) {
|
||||
if y := t.Year(); y < 0 || y >= 10000 {
|
||||
// RFC 3339 is clear that years are 4 digits exactly.
|
||||
// See golang.org/issue/4556#c15 for more discussion.
|
||||
s.appendError(errors.New("time.Time year outside of range [0,9999]"))
|
||||
}
|
||||
s.buf.WriteByte('"')
|
||||
*s.buf = t.AppendFormat(*s.buf, time.RFC3339Nano)
|
||||
s.buf.WriteByte('"')
|
||||
}
|
||||
|
||||
func appendJSONValue(s *handleState, v Value) error {
|
||||
switch v.Kind() {
|
||||
case KindString:
|
||||
s.appendString(v.str())
|
||||
case KindInt64:
|
||||
*s.buf = strconv.AppendInt(*s.buf, v.Int64(), 10)
|
||||
case KindUint64:
|
||||
*s.buf = strconv.AppendUint(*s.buf, v.Uint64(), 10)
|
||||
case KindFloat64:
|
||||
// json.Marshal is funny about floats; it doesn't
|
||||
// always match strconv.AppendFloat. So just call it.
|
||||
// That's expensive, but floats are rare.
|
||||
if err := appendJSONMarshal(s.buf, v.Float64()); err != nil {
|
||||
return err
|
||||
}
|
||||
case KindBool:
|
||||
*s.buf = strconv.AppendBool(*s.buf, v.Bool())
|
||||
case KindDuration:
|
||||
// Do what json.Marshal does.
|
||||
*s.buf = strconv.AppendInt(*s.buf, int64(v.Duration()), 10)
|
||||
case KindTime:
|
||||
s.appendTime(v.Time())
|
||||
case KindAny:
|
||||
a := v.Any()
|
||||
_, jm := a.(json.Marshaler)
|
||||
if err, ok := a.(error); ok && !jm {
|
||||
s.appendString(err.Error())
|
||||
} else {
|
||||
return appendJSONMarshal(s.buf, a)
|
||||
}
|
||||
default:
|
||||
panic(fmt.Sprintf("bad kind: %s", v.Kind()))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func appendJSONMarshal(buf *buffer.Buffer, v any) error {
|
||||
// Use a json.Encoder to avoid escaping HTML.
|
||||
var bb bytes.Buffer
|
||||
enc := json.NewEncoder(&bb)
|
||||
enc.SetEscapeHTML(false)
|
||||
if err := enc.Encode(v); err != nil {
|
||||
return err
|
||||
}
|
||||
bs := bb.Bytes()
|
||||
buf.Write(bs[:len(bs)-1]) // remove final newline
|
||||
return nil
|
||||
}
|
||||
|
||||
// appendEscapedJSONString escapes s for JSON and appends it to buf.
|
||||
// It does not surround the string in quotation marks.
|
||||
//
|
||||
// Modified from encoding/json/encode.go:encodeState.string,
|
||||
// with escapeHTML set to false.
|
||||
func appendEscapedJSONString(buf []byte, s string) []byte {
|
||||
char := func(b byte) { buf = append(buf, b) }
|
||||
str := func(s string) { buf = append(buf, s...) }
|
||||
|
||||
start := 0
|
||||
for i := 0; i < len(s); {
|
||||
if b := s[i]; b < utf8.RuneSelf {
|
||||
if safeSet[b] {
|
||||
i++
|
||||
continue
|
||||
}
|
||||
if start < i {
|
||||
str(s[start:i])
|
||||
}
|
||||
char('\\')
|
||||
switch b {
|
||||
case '\\', '"':
|
||||
char(b)
|
||||
case '\n':
|
||||
char('n')
|
||||
case '\r':
|
||||
char('r')
|
||||
case '\t':
|
||||
char('t')
|
||||
default:
|
||||
// This encodes bytes < 0x20 except for \t, \n and \r.
|
||||
str(`u00`)
|
||||
char(hex[b>>4])
|
||||
char(hex[b&0xF])
|
||||
}
|
||||
i++
|
||||
start = i
|
||||
continue
|
||||
}
|
||||
c, size := utf8.DecodeRuneInString(s[i:])
|
||||
if c == utf8.RuneError && size == 1 {
|
||||
if start < i {
|
||||
str(s[start:i])
|
||||
}
|
||||
str(`\ufffd`)
|
||||
i += size
|
||||
start = i
|
||||
continue
|
||||
}
|
||||
// U+2028 is LINE SEPARATOR.
|
||||
// U+2029 is PARAGRAPH SEPARATOR.
|
||||
// They are both technically valid characters in JSON strings,
|
||||
// but don't work in JSONP, which has to be evaluated as JavaScript,
|
||||
// and can lead to security holes there. It is valid JSON to
|
||||
// escape them, so we do so unconditionally.
|
||||
// See http://timelessrepo.com/json-isnt-a-javascript-subset for discussion.
|
||||
if c == '\u2028' || c == '\u2029' {
|
||||
if start < i {
|
||||
str(s[start:i])
|
||||
}
|
||||
str(`\u202`)
|
||||
char(hex[c&0xF])
|
||||
i += size
|
||||
start = i
|
||||
continue
|
||||
}
|
||||
i += size
|
||||
}
|
||||
if start < len(s) {
|
||||
str(s[start:])
|
||||
}
|
||||
return buf
|
||||
}
|
||||
|
||||
var hex = "0123456789abcdef"
|
||||
|
||||
// Copied from encoding/json/tables.go.
|
||||
//
|
||||
// safeSet holds the value true if the ASCII character with the given array
|
||||
// position can be represented inside a JSON string without any further
|
||||
// escaping.
|
||||
//
|
||||
// All values are true except for the ASCII control characters (0-31), the
|
||||
// double quote ("), and the backslash character ("\").
|
||||
var safeSet = [utf8.RuneSelf]bool{
|
||||
' ': true,
|
||||
'!': true,
|
||||
'"': false,
|
||||
'#': true,
|
||||
'$': true,
|
||||
'%': true,
|
||||
'&': true,
|
||||
'\'': true,
|
||||
'(': true,
|
||||
')': true,
|
||||
'*': true,
|
||||
'+': true,
|
||||
',': true,
|
||||
'-': true,
|
||||
'.': true,
|
||||
'/': true,
|
||||
'0': true,
|
||||
'1': true,
|
||||
'2': true,
|
||||
'3': true,
|
||||
'4': true,
|
||||
'5': true,
|
||||
'6': true,
|
||||
'7': true,
|
||||
'8': true,
|
||||
'9': true,
|
||||
':': true,
|
||||
';': true,
|
||||
'<': true,
|
||||
'=': true,
|
||||
'>': true,
|
||||
'?': true,
|
||||
'@': true,
|
||||
'A': true,
|
||||
'B': true,
|
||||
'C': true,
|
||||
'D': true,
|
||||
'E': true,
|
||||
'F': true,
|
||||
'G': true,
|
||||
'H': true,
|
||||
'I': true,
|
||||
'J': true,
|
||||
'K': true,
|
||||
'L': true,
|
||||
'M': true,
|
||||
'N': true,
|
||||
'O': true,
|
||||
'P': true,
|
||||
'Q': true,
|
||||
'R': true,
|
||||
'S': true,
|
||||
'T': true,
|
||||
'U': true,
|
||||
'V': true,
|
||||
'W': true,
|
||||
'X': true,
|
||||
'Y': true,
|
||||
'Z': true,
|
||||
'[': true,
|
||||
'\\': false,
|
||||
']': true,
|
||||
'^': true,
|
||||
'_': true,
|
||||
'`': true,
|
||||
'a': true,
|
||||
'b': true,
|
||||
'c': true,
|
||||
'd': true,
|
||||
'e': true,
|
||||
'f': true,
|
||||
'g': true,
|
||||
'h': true,
|
||||
'i': true,
|
||||
'j': true,
|
||||
'k': true,
|
||||
'l': true,
|
||||
'm': true,
|
||||
'n': true,
|
||||
'o': true,
|
||||
'p': true,
|
||||
'q': true,
|
||||
'r': true,
|
||||
's': true,
|
||||
't': true,
|
||||
'u': true,
|
||||
'v': true,
|
||||
'w': true,
|
||||
'x': true,
|
||||
'y': true,
|
||||
'z': true,
|
||||
'{': true,
|
||||
'|': true,
|
||||
'}': true,
|
||||
'~': true,
|
||||
'\u007f': true,
|
||||
}
|
||||
201
vendor/golang.org/x/exp/slog/level.go
generated
vendored
Normal file
201
vendor/golang.org/x/exp/slog/level.go
generated
vendored
Normal file
@@ -0,0 +1,201 @@
|
||||
// Copyright 2022 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package slog
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
// A Level is the importance or severity of a log event.
|
||||
// The higher the level, the more important or severe the event.
|
||||
type Level int
|
||||
|
||||
// Level numbers are inherently arbitrary,
|
||||
// but we picked them to satisfy three constraints.
|
||||
// Any system can map them to another numbering scheme if it wishes.
|
||||
//
|
||||
// First, we wanted the default level to be Info, Since Levels are ints, Info is
|
||||
// the default value for int, zero.
|
||||
//
|
||||
|
||||
// Second, we wanted to make it easy to use levels to specify logger verbosity.
|
||||
// Since a larger level means a more severe event, a logger that accepts events
|
||||
// with smaller (or more negative) level means a more verbose logger. Logger
|
||||
// verbosity is thus the negation of event severity, and the default verbosity
|
||||
// of 0 accepts all events at least as severe as INFO.
|
||||
//
|
||||
// Third, we wanted some room between levels to accommodate schemes with named
|
||||
// levels between ours. For example, Google Cloud Logging defines a Notice level
|
||||
// between Info and Warn. Since there are only a few of these intermediate
|
||||
// levels, the gap between the numbers need not be large. Our gap of 4 matches
|
||||
// OpenTelemetry's mapping. Subtracting 9 from an OpenTelemetry level in the
|
||||
// DEBUG, INFO, WARN and ERROR ranges converts it to the corresponding slog
|
||||
// Level range. OpenTelemetry also has the names TRACE and FATAL, which slog
|
||||
// does not. But those OpenTelemetry levels can still be represented as slog
|
||||
// Levels by using the appropriate integers.
|
||||
//
|
||||
// Names for common levels.
|
||||
const (
|
||||
LevelDebug Level = -4
|
||||
LevelInfo Level = 0
|
||||
LevelWarn Level = 4
|
||||
LevelError Level = 8
|
||||
)
|
||||
|
||||
// String returns a name for the level.
|
||||
// If the level has a name, then that name
|
||||
// in uppercase is returned.
|
||||
// If the level is between named values, then
|
||||
// an integer is appended to the uppercased name.
|
||||
// Examples:
|
||||
//
|
||||
// LevelWarn.String() => "WARN"
|
||||
// (LevelInfo+2).String() => "INFO+2"
|
||||
func (l Level) String() string {
|
||||
str := func(base string, val Level) string {
|
||||
if val == 0 {
|
||||
return base
|
||||
}
|
||||
return fmt.Sprintf("%s%+d", base, val)
|
||||
}
|
||||
|
||||
switch {
|
||||
case l < LevelInfo:
|
||||
return str("DEBUG", l-LevelDebug)
|
||||
case l < LevelWarn:
|
||||
return str("INFO", l-LevelInfo)
|
||||
case l < LevelError:
|
||||
return str("WARN", l-LevelWarn)
|
||||
default:
|
||||
return str("ERROR", l-LevelError)
|
||||
}
|
||||
}
|
||||
|
||||
// MarshalJSON implements [encoding/json.Marshaler]
|
||||
// by quoting the output of [Level.String].
|
||||
func (l Level) MarshalJSON() ([]byte, error) {
|
||||
// AppendQuote is sufficient for JSON-encoding all Level strings.
|
||||
// They don't contain any runes that would produce invalid JSON
|
||||
// when escaped.
|
||||
return strconv.AppendQuote(nil, l.String()), nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements [encoding/json.Unmarshaler]
|
||||
// It accepts any string produced by [Level.MarshalJSON],
|
||||
// ignoring case.
|
||||
// It also accepts numeric offsets that would result in a different string on
|
||||
// output. For example, "Error-8" would marshal as "INFO".
|
||||
func (l *Level) UnmarshalJSON(data []byte) error {
|
||||
s, err := strconv.Unquote(string(data))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return l.parse(s)
|
||||
}
|
||||
|
||||
// MarshalText implements [encoding.TextMarshaler]
|
||||
// by calling [Level.String].
|
||||
func (l Level) MarshalText() ([]byte, error) {
|
||||
return []byte(l.String()), nil
|
||||
}
|
||||
|
||||
// UnmarshalText implements [encoding.TextUnmarshaler].
|
||||
// It accepts any string produced by [Level.MarshalText],
|
||||
// ignoring case.
|
||||
// It also accepts numeric offsets that would result in a different string on
|
||||
// output. For example, "Error-8" would marshal as "INFO".
|
||||
func (l *Level) UnmarshalText(data []byte) error {
|
||||
return l.parse(string(data))
|
||||
}
|
||||
|
||||
func (l *Level) parse(s string) (err error) {
|
||||
defer func() {
|
||||
if err != nil {
|
||||
err = fmt.Errorf("slog: level string %q: %w", s, err)
|
||||
}
|
||||
}()
|
||||
|
||||
name := s
|
||||
offset := 0
|
||||
if i := strings.IndexAny(s, "+-"); i >= 0 {
|
||||
name = s[:i]
|
||||
offset, err = strconv.Atoi(s[i:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
switch strings.ToUpper(name) {
|
||||
case "DEBUG":
|
||||
*l = LevelDebug
|
||||
case "INFO":
|
||||
*l = LevelInfo
|
||||
case "WARN":
|
||||
*l = LevelWarn
|
||||
case "ERROR":
|
||||
*l = LevelError
|
||||
default:
|
||||
return errors.New("unknown name")
|
||||
}
|
||||
*l += Level(offset)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Level returns the receiver.
|
||||
// It implements Leveler.
|
||||
func (l Level) Level() Level { return l }
|
||||
|
||||
// A LevelVar is a Level variable, to allow a Handler level to change
|
||||
// dynamically.
|
||||
// It implements Leveler as well as a Set method,
|
||||
// and it is safe for use by multiple goroutines.
|
||||
// The zero LevelVar corresponds to LevelInfo.
|
||||
type LevelVar struct {
|
||||
val atomic.Int64
|
||||
}
|
||||
|
||||
// Level returns v's level.
|
||||
func (v *LevelVar) Level() Level {
|
||||
return Level(int(v.val.Load()))
|
||||
}
|
||||
|
||||
// Set sets v's level to l.
|
||||
func (v *LevelVar) Set(l Level) {
|
||||
v.val.Store(int64(l))
|
||||
}
|
||||
|
||||
func (v *LevelVar) String() string {
|
||||
return fmt.Sprintf("LevelVar(%s)", v.Level())
|
||||
}
|
||||
|
||||
// MarshalText implements [encoding.TextMarshaler]
|
||||
// by calling [Level.MarshalText].
|
||||
func (v *LevelVar) MarshalText() ([]byte, error) {
|
||||
return v.Level().MarshalText()
|
||||
}
|
||||
|
||||
// UnmarshalText implements [encoding.TextUnmarshaler]
|
||||
// by calling [Level.UnmarshalText].
|
||||
func (v *LevelVar) UnmarshalText(data []byte) error {
|
||||
var l Level
|
||||
if err := l.UnmarshalText(data); err != nil {
|
||||
return err
|
||||
}
|
||||
v.Set(l)
|
||||
return nil
|
||||
}
|
||||
|
||||
// A Leveler provides a Level value.
|
||||
//
|
||||
// As Level itself implements Leveler, clients typically supply
|
||||
// a Level value wherever a Leveler is needed, such as in HandlerOptions.
|
||||
// Clients who need to vary the level dynamically can provide a more complex
|
||||
// Leveler implementation such as *LevelVar.
|
||||
type Leveler interface {
|
||||
Level() Level
|
||||
}
|
||||
343
vendor/golang.org/x/exp/slog/logger.go
generated
vendored
Normal file
343
vendor/golang.org/x/exp/slog/logger.go
generated
vendored
Normal file
@@ -0,0 +1,343 @@
|
||||
// Copyright 2022 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package slog
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
"runtime"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"golang.org/x/exp/slog/internal"
|
||||
)
|
||||
|
||||
var defaultLogger atomic.Value
|
||||
|
||||
func init() {
|
||||
defaultLogger.Store(New(newDefaultHandler(log.Output)))
|
||||
}
|
||||
|
||||
// Default returns the default Logger.
|
||||
func Default() *Logger { return defaultLogger.Load().(*Logger) }
|
||||
|
||||
// SetDefault makes l the default Logger.
|
||||
// After this call, output from the log package's default Logger
|
||||
// (as with [log.Print], etc.) will be logged at LevelInfo using l's Handler.
|
||||
func SetDefault(l *Logger) {
|
||||
defaultLogger.Store(l)
|
||||
// If the default's handler is a defaultHandler, then don't use a handleWriter,
|
||||
// or we'll deadlock as they both try to acquire the log default mutex.
|
||||
// The defaultHandler will use whatever the log default writer is currently
|
||||
// set to, which is correct.
|
||||
// This can occur with SetDefault(Default()).
|
||||
// See TestSetDefault.
|
||||
if _, ok := l.Handler().(*defaultHandler); !ok {
|
||||
capturePC := log.Flags()&(log.Lshortfile|log.Llongfile) != 0
|
||||
log.SetOutput(&handlerWriter{l.Handler(), LevelInfo, capturePC})
|
||||
log.SetFlags(0) // we want just the log message, no time or location
|
||||
}
|
||||
}
|
||||
|
||||
// handlerWriter is an io.Writer that calls a Handler.
|
||||
// It is used to link the default log.Logger to the default slog.Logger.
|
||||
type handlerWriter struct {
|
||||
h Handler
|
||||
level Level
|
||||
capturePC bool
|
||||
}
|
||||
|
||||
func (w *handlerWriter) Write(buf []byte) (int, error) {
|
||||
if !w.h.Enabled(context.Background(), w.level) {
|
||||
return 0, nil
|
||||
}
|
||||
var pc uintptr
|
||||
if !internal.IgnorePC && w.capturePC {
|
||||
// skip [runtime.Callers, w.Write, Logger.Output, log.Print]
|
||||
var pcs [1]uintptr
|
||||
runtime.Callers(4, pcs[:])
|
||||
pc = pcs[0]
|
||||
}
|
||||
|
||||
// Remove final newline.
|
||||
origLen := len(buf) // Report that the entire buf was written.
|
||||
if len(buf) > 0 && buf[len(buf)-1] == '\n' {
|
||||
buf = buf[:len(buf)-1]
|
||||
}
|
||||
r := NewRecord(time.Now(), w.level, string(buf), pc)
|
||||
return origLen, w.h.Handle(context.Background(), r)
|
||||
}
|
||||
|
||||
// A Logger records structured information about each call to its
|
||||
// Log, Debug, Info, Warn, and Error methods.
|
||||
// For each call, it creates a Record and passes it to a Handler.
|
||||
//
|
||||
// To create a new Logger, call [New] or a Logger method
|
||||
// that begins "With".
|
||||
type Logger struct {
|
||||
handler Handler // for structured logging
|
||||
}
|
||||
|
||||
func (l *Logger) clone() *Logger {
|
||||
c := *l
|
||||
return &c
|
||||
}
|
||||
|
||||
// Handler returns l's Handler.
|
||||
func (l *Logger) Handler() Handler { return l.handler }
|
||||
|
||||
// With returns a new Logger that includes the given arguments, converted to
|
||||
// Attrs as in [Logger.Log].
|
||||
// The Attrs will be added to each output from the Logger.
|
||||
// The new Logger shares the old Logger's context.
|
||||
// The new Logger's handler is the result of calling WithAttrs on the receiver's
|
||||
// handler.
|
||||
func (l *Logger) With(args ...any) *Logger {
|
||||
c := l.clone()
|
||||
c.handler = l.handler.WithAttrs(argsToAttrSlice(args))
|
||||
return c
|
||||
}
|
||||
|
||||
// WithGroup returns a new Logger that starts a group. The keys of all
|
||||
// attributes added to the Logger will be qualified by the given name.
|
||||
// (How that qualification happens depends on the [Handler.WithGroup]
|
||||
// method of the Logger's Handler.)
|
||||
// The new Logger shares the old Logger's context.
|
||||
//
|
||||
// The new Logger's handler is the result of calling WithGroup on the receiver's
|
||||
// handler.
|
||||
func (l *Logger) WithGroup(name string) *Logger {
|
||||
c := l.clone()
|
||||
c.handler = l.handler.WithGroup(name)
|
||||
return c
|
||||
|
||||
}
|
||||
|
||||
// New creates a new Logger with the given non-nil Handler and a nil context.
|
||||
func New(h Handler) *Logger {
|
||||
if h == nil {
|
||||
panic("nil Handler")
|
||||
}
|
||||
return &Logger{handler: h}
|
||||
}
|
||||
|
||||
// With calls Logger.With on the default logger.
|
||||
func With(args ...any) *Logger {
|
||||
return Default().With(args...)
|
||||
}
|
||||
|
||||
// Enabled reports whether l emits log records at the given context and level.
|
||||
func (l *Logger) Enabled(ctx context.Context, level Level) bool {
|
||||
if ctx == nil {
|
||||
ctx = context.Background()
|
||||
}
|
||||
return l.Handler().Enabled(ctx, level)
|
||||
}
|
||||
|
||||
// NewLogLogger returns a new log.Logger such that each call to its Output method
|
||||
// dispatches a Record to the specified handler. The logger acts as a bridge from
|
||||
// the older log API to newer structured logging handlers.
|
||||
func NewLogLogger(h Handler, level Level) *log.Logger {
|
||||
return log.New(&handlerWriter{h, level, true}, "", 0)
|
||||
}
|
||||
|
||||
// Log emits a log record with the current time and the given level and message.
|
||||
// The Record's Attrs consist of the Logger's attributes followed by
|
||||
// the Attrs specified by args.
|
||||
//
|
||||
// The attribute arguments are processed as follows:
|
||||
// - If an argument is an Attr, it is used as is.
|
||||
// - If an argument is a string and this is not the last argument,
|
||||
// the following argument is treated as the value and the two are combined
|
||||
// into an Attr.
|
||||
// - Otherwise, the argument is treated as a value with key "!BADKEY".
|
||||
func (l *Logger) Log(ctx context.Context, level Level, msg string, args ...any) {
|
||||
l.log(ctx, level, msg, args...)
|
||||
}
|
||||
|
||||
// LogAttrs is a more efficient version of [Logger.Log] that accepts only Attrs.
|
||||
func (l *Logger) LogAttrs(ctx context.Context, level Level, msg string, attrs ...Attr) {
|
||||
l.logAttrs(ctx, level, msg, attrs...)
|
||||
}
|
||||
|
||||
// Debug logs at LevelDebug.
|
||||
func (l *Logger) Debug(msg string, args ...any) {
|
||||
l.log(nil, LevelDebug, msg, args...)
|
||||
}
|
||||
|
||||
// DebugContext logs at LevelDebug with the given context.
|
||||
func (l *Logger) DebugContext(ctx context.Context, msg string, args ...any) {
|
||||
l.log(ctx, LevelDebug, msg, args...)
|
||||
}
|
||||
|
||||
// DebugCtx logs at LevelDebug with the given context.
|
||||
// Deprecated: Use Logger.DebugContext.
|
||||
func (l *Logger) DebugCtx(ctx context.Context, msg string, args ...any) {
|
||||
l.log(ctx, LevelDebug, msg, args...)
|
||||
}
|
||||
|
||||
// Info logs at LevelInfo.
|
||||
func (l *Logger) Info(msg string, args ...any) {
|
||||
l.log(nil, LevelInfo, msg, args...)
|
||||
}
|
||||
|
||||
// InfoContext logs at LevelInfo with the given context.
|
||||
func (l *Logger) InfoContext(ctx context.Context, msg string, args ...any) {
|
||||
l.log(ctx, LevelInfo, msg, args...)
|
||||
}
|
||||
|
||||
// InfoCtx logs at LevelInfo with the given context.
|
||||
// Deprecated: Use Logger.InfoContext.
|
||||
func (l *Logger) InfoCtx(ctx context.Context, msg string, args ...any) {
|
||||
l.log(ctx, LevelInfo, msg, args...)
|
||||
}
|
||||
|
||||
// Warn logs at LevelWarn.
|
||||
func (l *Logger) Warn(msg string, args ...any) {
|
||||
l.log(nil, LevelWarn, msg, args...)
|
||||
}
|
||||
|
||||
// WarnContext logs at LevelWarn with the given context.
|
||||
func (l *Logger) WarnContext(ctx context.Context, msg string, args ...any) {
|
||||
l.log(ctx, LevelWarn, msg, args...)
|
||||
}
|
||||
|
||||
// WarnCtx logs at LevelWarn with the given context.
|
||||
// Deprecated: Use Logger.WarnContext.
|
||||
func (l *Logger) WarnCtx(ctx context.Context, msg string, args ...any) {
|
||||
l.log(ctx, LevelWarn, msg, args...)
|
||||
}
|
||||
|
||||
// Error logs at LevelError.
|
||||
func (l *Logger) Error(msg string, args ...any) {
|
||||
l.log(nil, LevelError, msg, args...)
|
||||
}
|
||||
|
||||
// ErrorContext logs at LevelError with the given context.
|
||||
func (l *Logger) ErrorContext(ctx context.Context, msg string, args ...any) {
|
||||
l.log(ctx, LevelError, msg, args...)
|
||||
}
|
||||
|
||||
// ErrorCtx logs at LevelError with the given context.
|
||||
// Deprecated: Use Logger.ErrorContext.
|
||||
func (l *Logger) ErrorCtx(ctx context.Context, msg string, args ...any) {
|
||||
l.log(ctx, LevelError, msg, args...)
|
||||
}
|
||||
|
||||
// log is the low-level logging method for methods that take ...any.
|
||||
// It must always be called directly by an exported logging method
|
||||
// or function, because it uses a fixed call depth to obtain the pc.
|
||||
func (l *Logger) log(ctx context.Context, level Level, msg string, args ...any) {
|
||||
if !l.Enabled(ctx, level) {
|
||||
return
|
||||
}
|
||||
var pc uintptr
|
||||
if !internal.IgnorePC {
|
||||
var pcs [1]uintptr
|
||||
// skip [runtime.Callers, this function, this function's caller]
|
||||
runtime.Callers(3, pcs[:])
|
||||
pc = pcs[0]
|
||||
}
|
||||
r := NewRecord(time.Now(), level, msg, pc)
|
||||
r.Add(args...)
|
||||
if ctx == nil {
|
||||
ctx = context.Background()
|
||||
}
|
||||
_ = l.Handler().Handle(ctx, r)
|
||||
}
|
||||
|
||||
// logAttrs is like [Logger.log], but for methods that take ...Attr.
|
||||
func (l *Logger) logAttrs(ctx context.Context, level Level, msg string, attrs ...Attr) {
|
||||
if !l.Enabled(ctx, level) {
|
||||
return
|
||||
}
|
||||
var pc uintptr
|
||||
if !internal.IgnorePC {
|
||||
var pcs [1]uintptr
|
||||
// skip [runtime.Callers, this function, this function's caller]
|
||||
runtime.Callers(3, pcs[:])
|
||||
pc = pcs[0]
|
||||
}
|
||||
r := NewRecord(time.Now(), level, msg, pc)
|
||||
r.AddAttrs(attrs...)
|
||||
if ctx == nil {
|
||||
ctx = context.Background()
|
||||
}
|
||||
_ = l.Handler().Handle(ctx, r)
|
||||
}
|
||||
|
||||
// Debug calls Logger.Debug on the default logger.
|
||||
func Debug(msg string, args ...any) {
|
||||
Default().log(nil, LevelDebug, msg, args...)
|
||||
}
|
||||
|
||||
// DebugContext calls Logger.DebugContext on the default logger.
|
||||
func DebugContext(ctx context.Context, msg string, args ...any) {
|
||||
Default().log(ctx, LevelDebug, msg, args...)
|
||||
}
|
||||
|
||||
// Info calls Logger.Info on the default logger.
|
||||
func Info(msg string, args ...any) {
|
||||
Default().log(nil, LevelInfo, msg, args...)
|
||||
}
|
||||
|
||||
// InfoContext calls Logger.InfoContext on the default logger.
|
||||
func InfoContext(ctx context.Context, msg string, args ...any) {
|
||||
Default().log(ctx, LevelInfo, msg, args...)
|
||||
}
|
||||
|
||||
// Warn calls Logger.Warn on the default logger.
|
||||
func Warn(msg string, args ...any) {
|
||||
Default().log(nil, LevelWarn, msg, args...)
|
||||
}
|
||||
|
||||
// WarnContext calls Logger.WarnContext on the default logger.
|
||||
func WarnContext(ctx context.Context, msg string, args ...any) {
|
||||
Default().log(ctx, LevelWarn, msg, args...)
|
||||
}
|
||||
|
||||
// Error calls Logger.Error on the default logger.
|
||||
func Error(msg string, args ...any) {
|
||||
Default().log(nil, LevelError, msg, args...)
|
||||
}
|
||||
|
||||
// ErrorContext calls Logger.ErrorContext on the default logger.
|
||||
func ErrorContext(ctx context.Context, msg string, args ...any) {
|
||||
Default().log(ctx, LevelError, msg, args...)
|
||||
}
|
||||
|
||||
// DebugCtx calls Logger.DebugContext on the default logger.
|
||||
// Deprecated: call DebugContext.
|
||||
func DebugCtx(ctx context.Context, msg string, args ...any) {
|
||||
Default().log(ctx, LevelDebug, msg, args...)
|
||||
}
|
||||
|
||||
// InfoCtx calls Logger.InfoContext on the default logger.
|
||||
// Deprecated: call InfoContext.
|
||||
func InfoCtx(ctx context.Context, msg string, args ...any) {
|
||||
Default().log(ctx, LevelInfo, msg, args...)
|
||||
}
|
||||
|
||||
// WarnCtx calls Logger.WarnContext on the default logger.
|
||||
// Deprecated: call WarnContext.
|
||||
func WarnCtx(ctx context.Context, msg string, args ...any) {
|
||||
Default().log(ctx, LevelWarn, msg, args...)
|
||||
}
|
||||
|
||||
// ErrorCtx calls Logger.ErrorContext on the default logger.
|
||||
// Deprecated: call ErrorContext.
|
||||
func ErrorCtx(ctx context.Context, msg string, args ...any) {
|
||||
Default().log(ctx, LevelError, msg, args...)
|
||||
}
|
||||
|
||||
// Log calls Logger.Log on the default logger.
|
||||
func Log(ctx context.Context, level Level, msg string, args ...any) {
|
||||
Default().log(ctx, level, msg, args...)
|
||||
}
|
||||
|
||||
// LogAttrs calls Logger.LogAttrs on the default logger.
|
||||
func LogAttrs(ctx context.Context, level Level, msg string, attrs ...Attr) {
|
||||
Default().logAttrs(ctx, level, msg, attrs...)
|
||||
}
|
||||
36
vendor/golang.org/x/exp/slog/noplog.bench
generated
vendored
Normal file
36
vendor/golang.org/x/exp/slog/noplog.bench
generated
vendored
Normal file
@@ -0,0 +1,36 @@
|
||||
goos: linux
|
||||
goarch: amd64
|
||||
pkg: golang.org/x/exp/slog
|
||||
cpu: Intel(R) Xeon(R) CPU @ 2.20GHz
|
||||
BenchmarkNopLog/attrs-8 1000000 1090 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkNopLog/attrs-8 1000000 1097 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkNopLog/attrs-8 1000000 1078 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkNopLog/attrs-8 1000000 1095 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkNopLog/attrs-8 1000000 1096 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkNopLog/attrs-parallel-8 4007268 308.2 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkNopLog/attrs-parallel-8 4016138 299.7 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkNopLog/attrs-parallel-8 4020529 305.9 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkNopLog/attrs-parallel-8 3977829 303.4 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkNopLog/attrs-parallel-8 3225438 318.5 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkNopLog/keys-values-8 1179256 994.2 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkNopLog/keys-values-8 1000000 1002 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkNopLog/keys-values-8 1216710 993.2 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkNopLog/keys-values-8 1000000 1013 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkNopLog/keys-values-8 1000000 1016 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkNopLog/WithContext-8 989066 1163 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkNopLog/WithContext-8 994116 1163 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkNopLog/WithContext-8 1000000 1152 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkNopLog/WithContext-8 991675 1165 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkNopLog/WithContext-8 965268 1166 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkNopLog/WithContext-parallel-8 3955503 303.3 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkNopLog/WithContext-parallel-8 3861188 307.8 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkNopLog/WithContext-parallel-8 3967752 303.9 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkNopLog/WithContext-parallel-8 3955203 302.7 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkNopLog/WithContext-parallel-8 3948278 301.1 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkNopLog/Ctx-8 940622 1247 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkNopLog/Ctx-8 936381 1257 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkNopLog/Ctx-8 959730 1266 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkNopLog/Ctx-8 943473 1290 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkNopLog/Ctx-8 919414 1259 ns/op 0 B/op 0 allocs/op
|
||||
PASS
|
||||
ok golang.org/x/exp/slog 40.566s
|
||||
207
vendor/golang.org/x/exp/slog/record.go
generated
vendored
Normal file
207
vendor/golang.org/x/exp/slog/record.go
generated
vendored
Normal file
@@ -0,0 +1,207 @@
|
||||
// Copyright 2022 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package slog
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
|
||||
const nAttrsInline = 5
|
||||
|
||||
// A Record holds information about a log event.
|
||||
// Copies of a Record share state.
|
||||
// Do not modify a Record after handing out a copy to it.
|
||||
// Use [Record.Clone] to create a copy with no shared state.
|
||||
type Record struct {
|
||||
// The time at which the output method (Log, Info, etc.) was called.
|
||||
Time time.Time
|
||||
|
||||
// The log message.
|
||||
Message string
|
||||
|
||||
// The level of the event.
|
||||
Level Level
|
||||
|
||||
// The program counter at the time the record was constructed, as determined
|
||||
// by runtime.Callers. If zero, no program counter is available.
|
||||
//
|
||||
// The only valid use for this value is as an argument to
|
||||
// [runtime.CallersFrames]. In particular, it must not be passed to
|
||||
// [runtime.FuncForPC].
|
||||
PC uintptr
|
||||
|
||||
// Allocation optimization: an inline array sized to hold
|
||||
// the majority of log calls (based on examination of open-source
|
||||
// code). It holds the start of the list of Attrs.
|
||||
front [nAttrsInline]Attr
|
||||
|
||||
// The number of Attrs in front.
|
||||
nFront int
|
||||
|
||||
// The list of Attrs except for those in front.
|
||||
// Invariants:
|
||||
// - len(back) > 0 iff nFront == len(front)
|
||||
// - Unused array elements are zero. Used to detect mistakes.
|
||||
back []Attr
|
||||
}
|
||||
|
||||
// NewRecord creates a Record from the given arguments.
|
||||
// Use [Record.AddAttrs] to add attributes to the Record.
|
||||
//
|
||||
// NewRecord is intended for logging APIs that want to support a [Handler] as
|
||||
// a backend.
|
||||
func NewRecord(t time.Time, level Level, msg string, pc uintptr) Record {
|
||||
return Record{
|
||||
Time: t,
|
||||
Message: msg,
|
||||
Level: level,
|
||||
PC: pc,
|
||||
}
|
||||
}
|
||||
|
||||
// Clone returns a copy of the record with no shared state.
|
||||
// The original record and the clone can both be modified
|
||||
// without interfering with each other.
|
||||
func (r Record) Clone() Record {
|
||||
r.back = slices.Clip(r.back) // prevent append from mutating shared array
|
||||
return r
|
||||
}
|
||||
|
||||
// NumAttrs returns the number of attributes in the Record.
|
||||
func (r Record) NumAttrs() int {
|
||||
return r.nFront + len(r.back)
|
||||
}
|
||||
|
||||
// Attrs calls f on each Attr in the Record.
|
||||
// Iteration stops if f returns false.
|
||||
func (r Record) Attrs(f func(Attr) bool) {
|
||||
for i := 0; i < r.nFront; i++ {
|
||||
if !f(r.front[i]) {
|
||||
return
|
||||
}
|
||||
}
|
||||
for _, a := range r.back {
|
||||
if !f(a) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// AddAttrs appends the given Attrs to the Record's list of Attrs.
|
||||
func (r *Record) AddAttrs(attrs ...Attr) {
|
||||
n := copy(r.front[r.nFront:], attrs)
|
||||
r.nFront += n
|
||||
// Check if a copy was modified by slicing past the end
|
||||
// and seeing if the Attr there is non-zero.
|
||||
if cap(r.back) > len(r.back) {
|
||||
end := r.back[:len(r.back)+1][len(r.back)]
|
||||
if !end.isEmpty() {
|
||||
panic("copies of a slog.Record were both modified")
|
||||
}
|
||||
}
|
||||
r.back = append(r.back, attrs[n:]...)
|
||||
}
|
||||
|
||||
// Add converts the args to Attrs as described in [Logger.Log],
|
||||
// then appends the Attrs to the Record's list of Attrs.
|
||||
func (r *Record) Add(args ...any) {
|
||||
var a Attr
|
||||
for len(args) > 0 {
|
||||
a, args = argsToAttr(args)
|
||||
if r.nFront < len(r.front) {
|
||||
r.front[r.nFront] = a
|
||||
r.nFront++
|
||||
} else {
|
||||
if r.back == nil {
|
||||
r.back = make([]Attr, 0, countAttrs(args))
|
||||
}
|
||||
r.back = append(r.back, a)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// countAttrs returns the number of Attrs that would be created from args.
|
||||
func countAttrs(args []any) int {
|
||||
n := 0
|
||||
for i := 0; i < len(args); i++ {
|
||||
n++
|
||||
if _, ok := args[i].(string); ok {
|
||||
i++
|
||||
}
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
const badKey = "!BADKEY"
|
||||
|
||||
// argsToAttr turns a prefix of the nonempty args slice into an Attr
|
||||
// and returns the unconsumed portion of the slice.
|
||||
// If args[0] is an Attr, it returns it.
|
||||
// If args[0] is a string, it treats the first two elements as
|
||||
// a key-value pair.
|
||||
// Otherwise, it treats args[0] as a value with a missing key.
|
||||
func argsToAttr(args []any) (Attr, []any) {
|
||||
switch x := args[0].(type) {
|
||||
case string:
|
||||
if len(args) == 1 {
|
||||
return String(badKey, x), nil
|
||||
}
|
||||
return Any(x, args[1]), args[2:]
|
||||
|
||||
case Attr:
|
||||
return x, args[1:]
|
||||
|
||||
default:
|
||||
return Any(badKey, x), args[1:]
|
||||
}
|
||||
}
|
||||
|
||||
// Source describes the location of a line of source code.
|
||||
type Source struct {
|
||||
// Function is the package path-qualified function name containing the
|
||||
// source line. If non-empty, this string uniquely identifies a single
|
||||
// function in the program. This may be the empty string if not known.
|
||||
Function string `json:"function"`
|
||||
// File and Line are the file name and line number (1-based) of the source
|
||||
// line. These may be the empty string and zero, respectively, if not known.
|
||||
File string `json:"file"`
|
||||
Line int `json:"line"`
|
||||
}
|
||||
|
||||
// attrs returns the non-zero fields of s as a slice of attrs.
|
||||
// It is similar to a LogValue method, but we don't want Source
|
||||
// to implement LogValuer because it would be resolved before
|
||||
// the ReplaceAttr function was called.
|
||||
func (s *Source) group() Value {
|
||||
var as []Attr
|
||||
if s.Function != "" {
|
||||
as = append(as, String("function", s.Function))
|
||||
}
|
||||
if s.File != "" {
|
||||
as = append(as, String("file", s.File))
|
||||
}
|
||||
if s.Line != 0 {
|
||||
as = append(as, Int("line", s.Line))
|
||||
}
|
||||
return GroupValue(as...)
|
||||
}
|
||||
|
||||
// source returns a Source for the log event.
|
||||
// If the Record was created without the necessary information,
|
||||
// or if the location is unavailable, it returns a non-nil *Source
|
||||
// with zero fields.
|
||||
func (r Record) source() *Source {
|
||||
fs := runtime.CallersFrames([]uintptr{r.PC})
|
||||
f, _ := fs.Next()
|
||||
return &Source{
|
||||
Function: f.Function,
|
||||
File: f.File,
|
||||
Line: f.Line,
|
||||
}
|
||||
}
|
||||
161
vendor/golang.org/x/exp/slog/text_handler.go
generated
vendored
Normal file
161
vendor/golang.org/x/exp/slog/text_handler.go
generated
vendored
Normal file
@@ -0,0 +1,161 @@
|
||||
// Copyright 2022 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package slog
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding"
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// TextHandler is a Handler that writes Records to an io.Writer as a
|
||||
// sequence of key=value pairs separated by spaces and followed by a newline.
|
||||
type TextHandler struct {
|
||||
*commonHandler
|
||||
}
|
||||
|
||||
// NewTextHandler creates a TextHandler that writes to w,
|
||||
// using the given options.
|
||||
// If opts is nil, the default options are used.
|
||||
func NewTextHandler(w io.Writer, opts *HandlerOptions) *TextHandler {
|
||||
if opts == nil {
|
||||
opts = &HandlerOptions{}
|
||||
}
|
||||
return &TextHandler{
|
||||
&commonHandler{
|
||||
json: false,
|
||||
w: w,
|
||||
opts: *opts,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Enabled reports whether the handler handles records at the given level.
|
||||
// The handler ignores records whose level is lower.
|
||||
func (h *TextHandler) Enabled(_ context.Context, level Level) bool {
|
||||
return h.commonHandler.enabled(level)
|
||||
}
|
||||
|
||||
// WithAttrs returns a new TextHandler whose attributes consists
|
||||
// of h's attributes followed by attrs.
|
||||
func (h *TextHandler) WithAttrs(attrs []Attr) Handler {
|
||||
return &TextHandler{commonHandler: h.commonHandler.withAttrs(attrs)}
|
||||
}
|
||||
|
||||
func (h *TextHandler) WithGroup(name string) Handler {
|
||||
return &TextHandler{commonHandler: h.commonHandler.withGroup(name)}
|
||||
}
|
||||
|
||||
// Handle formats its argument Record as a single line of space-separated
|
||||
// key=value items.
|
||||
//
|
||||
// If the Record's time is zero, the time is omitted.
|
||||
// Otherwise, the key is "time"
|
||||
// and the value is output in RFC3339 format with millisecond precision.
|
||||
//
|
||||
// If the Record's level is zero, the level is omitted.
|
||||
// Otherwise, the key is "level"
|
||||
// and the value of [Level.String] is output.
|
||||
//
|
||||
// If the AddSource option is set and source information is available,
|
||||
// the key is "source" and the value is output as FILE:LINE.
|
||||
//
|
||||
// The message's key is "msg".
|
||||
//
|
||||
// To modify these or other attributes, or remove them from the output, use
|
||||
// [HandlerOptions.ReplaceAttr].
|
||||
//
|
||||
// If a value implements [encoding.TextMarshaler], the result of MarshalText is
|
||||
// written. Otherwise, the result of fmt.Sprint is written.
|
||||
//
|
||||
// Keys and values are quoted with [strconv.Quote] if they contain Unicode space
|
||||
// characters, non-printing characters, '"' or '='.
|
||||
//
|
||||
// Keys inside groups consist of components (keys or group names) separated by
|
||||
// dots. No further escaping is performed.
|
||||
// Thus there is no way to determine from the key "a.b.c" whether there
|
||||
// are two groups "a" and "b" and a key "c", or a single group "a.b" and a key "c",
|
||||
// or single group "a" and a key "b.c".
|
||||
// If it is necessary to reconstruct the group structure of a key
|
||||
// even in the presence of dots inside components, use
|
||||
// [HandlerOptions.ReplaceAttr] to encode that information in the key.
|
||||
//
|
||||
// Each call to Handle results in a single serialized call to
|
||||
// io.Writer.Write.
|
||||
func (h *TextHandler) Handle(_ context.Context, r Record) error {
|
||||
return h.commonHandler.handle(r)
|
||||
}
|
||||
|
||||
func appendTextValue(s *handleState, v Value) error {
|
||||
switch v.Kind() {
|
||||
case KindString:
|
||||
s.appendString(v.str())
|
||||
case KindTime:
|
||||
s.appendTime(v.time())
|
||||
case KindAny:
|
||||
if tm, ok := v.any.(encoding.TextMarshaler); ok {
|
||||
data, err := tm.MarshalText()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// TODO: avoid the conversion to string.
|
||||
s.appendString(string(data))
|
||||
return nil
|
||||
}
|
||||
if bs, ok := byteSlice(v.any); ok {
|
||||
// As of Go 1.19, this only allocates for strings longer than 32 bytes.
|
||||
s.buf.WriteString(strconv.Quote(string(bs)))
|
||||
return nil
|
||||
}
|
||||
s.appendString(fmt.Sprintf("%+v", v.Any()))
|
||||
default:
|
||||
*s.buf = v.append(*s.buf)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// byteSlice returns its argument as a []byte if the argument's
|
||||
// underlying type is []byte, along with a second return value of true.
|
||||
// Otherwise it returns nil, false.
|
||||
func byteSlice(a any) ([]byte, bool) {
|
||||
if bs, ok := a.([]byte); ok {
|
||||
return bs, true
|
||||
}
|
||||
// Like Printf's %s, we allow both the slice type and the byte element type to be named.
|
||||
t := reflect.TypeOf(a)
|
||||
if t != nil && t.Kind() == reflect.Slice && t.Elem().Kind() == reflect.Uint8 {
|
||||
return reflect.ValueOf(a).Bytes(), true
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
func needsQuoting(s string) bool {
|
||||
if len(s) == 0 {
|
||||
return true
|
||||
}
|
||||
for i := 0; i < len(s); {
|
||||
b := s[i]
|
||||
if b < utf8.RuneSelf {
|
||||
// Quote anything except a backslash that would need quoting in a
|
||||
// JSON string, as well as space and '='
|
||||
if b != '\\' && (b == ' ' || b == '=' || !safeSet[b]) {
|
||||
return true
|
||||
}
|
||||
i++
|
||||
continue
|
||||
}
|
||||
r, size := utf8.DecodeRuneInString(s[i:])
|
||||
if r == utf8.RuneError || unicode.IsSpace(r) || !unicode.IsPrint(r) {
|
||||
return true
|
||||
}
|
||||
i += size
|
||||
}
|
||||
return false
|
||||
}
|
||||
456
vendor/golang.org/x/exp/slog/value.go
generated
vendored
Normal file
456
vendor/golang.org/x/exp/slog/value.go
generated
vendored
Normal file
@@ -0,0 +1,456 @@
|
||||
// Copyright 2022 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package slog
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
|
||||
// A Value can represent any Go value, but unlike type any,
|
||||
// it can represent most small values without an allocation.
|
||||
// The zero Value corresponds to nil.
|
||||
type Value struct {
|
||||
_ [0]func() // disallow ==
|
||||
// num holds the value for Kinds Int64, Uint64, Float64, Bool and Duration,
|
||||
// the string length for KindString, and nanoseconds since the epoch for KindTime.
|
||||
num uint64
|
||||
// If any is of type Kind, then the value is in num as described above.
|
||||
// If any is of type *time.Location, then the Kind is Time and time.Time value
|
||||
// can be constructed from the Unix nanos in num and the location (monotonic time
|
||||
// is not preserved).
|
||||
// If any is of type stringptr, then the Kind is String and the string value
|
||||
// consists of the length in num and the pointer in any.
|
||||
// Otherwise, the Kind is Any and any is the value.
|
||||
// (This implies that Attrs cannot store values of type Kind, *time.Location
|
||||
// or stringptr.)
|
||||
any any
|
||||
}
|
||||
|
||||
// Kind is the kind of a Value.
|
||||
type Kind int
|
||||
|
||||
// The following list is sorted alphabetically, but it's also important that
|
||||
// KindAny is 0 so that a zero Value represents nil.
|
||||
|
||||
const (
|
||||
KindAny Kind = iota
|
||||
KindBool
|
||||
KindDuration
|
||||
KindFloat64
|
||||
KindInt64
|
||||
KindString
|
||||
KindTime
|
||||
KindUint64
|
||||
KindGroup
|
||||
KindLogValuer
|
||||
)
|
||||
|
||||
var kindStrings = []string{
|
||||
"Any",
|
||||
"Bool",
|
||||
"Duration",
|
||||
"Float64",
|
||||
"Int64",
|
||||
"String",
|
||||
"Time",
|
||||
"Uint64",
|
||||
"Group",
|
||||
"LogValuer",
|
||||
}
|
||||
|
||||
func (k Kind) String() string {
|
||||
if k >= 0 && int(k) < len(kindStrings) {
|
||||
return kindStrings[k]
|
||||
}
|
||||
return "<unknown slog.Kind>"
|
||||
}
|
||||
|
||||
// Unexported version of Kind, just so we can store Kinds in Values.
|
||||
// (No user-provided value has this type.)
|
||||
type kind Kind
|
||||
|
||||
// Kind returns v's Kind.
|
||||
func (v Value) Kind() Kind {
|
||||
switch x := v.any.(type) {
|
||||
case Kind:
|
||||
return x
|
||||
case stringptr:
|
||||
return KindString
|
||||
case timeLocation:
|
||||
return KindTime
|
||||
case groupptr:
|
||||
return KindGroup
|
||||
case LogValuer:
|
||||
return KindLogValuer
|
||||
case kind: // a kind is just a wrapper for a Kind
|
||||
return KindAny
|
||||
default:
|
||||
return KindAny
|
||||
}
|
||||
}
|
||||
|
||||
//////////////// Constructors
|
||||
|
||||
// IntValue returns a Value for an int.
|
||||
func IntValue(v int) Value {
|
||||
return Int64Value(int64(v))
|
||||
}
|
||||
|
||||
// Int64Value returns a Value for an int64.
|
||||
func Int64Value(v int64) Value {
|
||||
return Value{num: uint64(v), any: KindInt64}
|
||||
}
|
||||
|
||||
// Uint64Value returns a Value for a uint64.
|
||||
func Uint64Value(v uint64) Value {
|
||||
return Value{num: v, any: KindUint64}
|
||||
}
|
||||
|
||||
// Float64Value returns a Value for a floating-point number.
|
||||
func Float64Value(v float64) Value {
|
||||
return Value{num: math.Float64bits(v), any: KindFloat64}
|
||||
}
|
||||
|
||||
// BoolValue returns a Value for a bool.
|
||||
func BoolValue(v bool) Value {
|
||||
u := uint64(0)
|
||||
if v {
|
||||
u = 1
|
||||
}
|
||||
return Value{num: u, any: KindBool}
|
||||
}
|
||||
|
||||
// Unexported version of *time.Location, just so we can store *time.Locations in
|
||||
// Values. (No user-provided value has this type.)
|
||||
type timeLocation *time.Location
|
||||
|
||||
// TimeValue returns a Value for a time.Time.
|
||||
// It discards the monotonic portion.
|
||||
func TimeValue(v time.Time) Value {
|
||||
if v.IsZero() {
|
||||
// UnixNano on the zero time is undefined, so represent the zero time
|
||||
// with a nil *time.Location instead. time.Time.Location method never
|
||||
// returns nil, so a Value with any == timeLocation(nil) cannot be
|
||||
// mistaken for any other Value, time.Time or otherwise.
|
||||
return Value{any: timeLocation(nil)}
|
||||
}
|
||||
return Value{num: uint64(v.UnixNano()), any: timeLocation(v.Location())}
|
||||
}
|
||||
|
||||
// DurationValue returns a Value for a time.Duration.
|
||||
func DurationValue(v time.Duration) Value {
|
||||
return Value{num: uint64(v.Nanoseconds()), any: KindDuration}
|
||||
}
|
||||
|
||||
// AnyValue returns a Value for the supplied value.
|
||||
//
|
||||
// If the supplied value is of type Value, it is returned
|
||||
// unmodified.
|
||||
//
|
||||
// Given a value of one of Go's predeclared string, bool, or
|
||||
// (non-complex) numeric types, AnyValue returns a Value of kind
|
||||
// String, Bool, Uint64, Int64, or Float64. The width of the
|
||||
// original numeric type is not preserved.
|
||||
//
|
||||
// Given a time.Time or time.Duration value, AnyValue returns a Value of kind
|
||||
// KindTime or KindDuration. The monotonic time is not preserved.
|
||||
//
|
||||
// For nil, or values of all other types, including named types whose
|
||||
// underlying type is numeric, AnyValue returns a value of kind KindAny.
|
||||
func AnyValue(v any) Value {
|
||||
switch v := v.(type) {
|
||||
case string:
|
||||
return StringValue(v)
|
||||
case int:
|
||||
return Int64Value(int64(v))
|
||||
case uint:
|
||||
return Uint64Value(uint64(v))
|
||||
case int64:
|
||||
return Int64Value(v)
|
||||
case uint64:
|
||||
return Uint64Value(v)
|
||||
case bool:
|
||||
return BoolValue(v)
|
||||
case time.Duration:
|
||||
return DurationValue(v)
|
||||
case time.Time:
|
||||
return TimeValue(v)
|
||||
case uint8:
|
||||
return Uint64Value(uint64(v))
|
||||
case uint16:
|
||||
return Uint64Value(uint64(v))
|
||||
case uint32:
|
||||
return Uint64Value(uint64(v))
|
||||
case uintptr:
|
||||
return Uint64Value(uint64(v))
|
||||
case int8:
|
||||
return Int64Value(int64(v))
|
||||
case int16:
|
||||
return Int64Value(int64(v))
|
||||
case int32:
|
||||
return Int64Value(int64(v))
|
||||
case float64:
|
||||
return Float64Value(v)
|
||||
case float32:
|
||||
return Float64Value(float64(v))
|
||||
case []Attr:
|
||||
return GroupValue(v...)
|
||||
case Kind:
|
||||
return Value{any: kind(v)}
|
||||
case Value:
|
||||
return v
|
||||
default:
|
||||
return Value{any: v}
|
||||
}
|
||||
}
|
||||
|
||||
//////////////// Accessors
|
||||
|
||||
// Any returns v's value as an any.
|
||||
func (v Value) Any() any {
|
||||
switch v.Kind() {
|
||||
case KindAny:
|
||||
if k, ok := v.any.(kind); ok {
|
||||
return Kind(k)
|
||||
}
|
||||
return v.any
|
||||
case KindLogValuer:
|
||||
return v.any
|
||||
case KindGroup:
|
||||
return v.group()
|
||||
case KindInt64:
|
||||
return int64(v.num)
|
||||
case KindUint64:
|
||||
return v.num
|
||||
case KindFloat64:
|
||||
return v.float()
|
||||
case KindString:
|
||||
return v.str()
|
||||
case KindBool:
|
||||
return v.bool()
|
||||
case KindDuration:
|
||||
return v.duration()
|
||||
case KindTime:
|
||||
return v.time()
|
||||
default:
|
||||
panic(fmt.Sprintf("bad kind: %s", v.Kind()))
|
||||
}
|
||||
}
|
||||
|
||||
// Int64 returns v's value as an int64. It panics
|
||||
// if v is not a signed integer.
|
||||
func (v Value) Int64() int64 {
|
||||
if g, w := v.Kind(), KindInt64; g != w {
|
||||
panic(fmt.Sprintf("Value kind is %s, not %s", g, w))
|
||||
}
|
||||
return int64(v.num)
|
||||
}
|
||||
|
||||
// Uint64 returns v's value as a uint64. It panics
|
||||
// if v is not an unsigned integer.
|
||||
func (v Value) Uint64() uint64 {
|
||||
if g, w := v.Kind(), KindUint64; g != w {
|
||||
panic(fmt.Sprintf("Value kind is %s, not %s", g, w))
|
||||
}
|
||||
return v.num
|
||||
}
|
||||
|
||||
// Bool returns v's value as a bool. It panics
|
||||
// if v is not a bool.
|
||||
func (v Value) Bool() bool {
|
||||
if g, w := v.Kind(), KindBool; g != w {
|
||||
panic(fmt.Sprintf("Value kind is %s, not %s", g, w))
|
||||
}
|
||||
return v.bool()
|
||||
}
|
||||
|
||||
func (v Value) bool() bool {
|
||||
return v.num == 1
|
||||
}
|
||||
|
||||
// Duration returns v's value as a time.Duration. It panics
|
||||
// if v is not a time.Duration.
|
||||
func (v Value) Duration() time.Duration {
|
||||
if g, w := v.Kind(), KindDuration; g != w {
|
||||
panic(fmt.Sprintf("Value kind is %s, not %s", g, w))
|
||||
}
|
||||
|
||||
return v.duration()
|
||||
}
|
||||
|
||||
func (v Value) duration() time.Duration {
|
||||
return time.Duration(int64(v.num))
|
||||
}
|
||||
|
||||
// Float64 returns v's value as a float64. It panics
|
||||
// if v is not a float64.
|
||||
func (v Value) Float64() float64 {
|
||||
if g, w := v.Kind(), KindFloat64; g != w {
|
||||
panic(fmt.Sprintf("Value kind is %s, not %s", g, w))
|
||||
}
|
||||
|
||||
return v.float()
|
||||
}
|
||||
|
||||
func (v Value) float() float64 {
|
||||
return math.Float64frombits(v.num)
|
||||
}
|
||||
|
||||
// Time returns v's value as a time.Time. It panics
|
||||
// if v is not a time.Time.
|
||||
func (v Value) Time() time.Time {
|
||||
if g, w := v.Kind(), KindTime; g != w {
|
||||
panic(fmt.Sprintf("Value kind is %s, not %s", g, w))
|
||||
}
|
||||
return v.time()
|
||||
}
|
||||
|
||||
func (v Value) time() time.Time {
|
||||
loc := v.any.(timeLocation)
|
||||
if loc == nil {
|
||||
return time.Time{}
|
||||
}
|
||||
return time.Unix(0, int64(v.num)).In(loc)
|
||||
}
|
||||
|
||||
// LogValuer returns v's value as a LogValuer. It panics
|
||||
// if v is not a LogValuer.
|
||||
func (v Value) LogValuer() LogValuer {
|
||||
return v.any.(LogValuer)
|
||||
}
|
||||
|
||||
// Group returns v's value as a []Attr.
|
||||
// It panics if v's Kind is not KindGroup.
|
||||
func (v Value) Group() []Attr {
|
||||
if sp, ok := v.any.(groupptr); ok {
|
||||
return unsafe.Slice((*Attr)(sp), v.num)
|
||||
}
|
||||
panic("Group: bad kind")
|
||||
}
|
||||
|
||||
func (v Value) group() []Attr {
|
||||
return unsafe.Slice((*Attr)(v.any.(groupptr)), v.num)
|
||||
}
|
||||
|
||||
//////////////// Other
|
||||
|
||||
// Equal reports whether v and w represent the same Go value.
|
||||
func (v Value) Equal(w Value) bool {
|
||||
k1 := v.Kind()
|
||||
k2 := w.Kind()
|
||||
if k1 != k2 {
|
||||
return false
|
||||
}
|
||||
switch k1 {
|
||||
case KindInt64, KindUint64, KindBool, KindDuration:
|
||||
return v.num == w.num
|
||||
case KindString:
|
||||
return v.str() == w.str()
|
||||
case KindFloat64:
|
||||
return v.float() == w.float()
|
||||
case KindTime:
|
||||
return v.time().Equal(w.time())
|
||||
case KindAny, KindLogValuer:
|
||||
return v.any == w.any // may panic if non-comparable
|
||||
case KindGroup:
|
||||
return slices.EqualFunc(v.group(), w.group(), Attr.Equal)
|
||||
default:
|
||||
panic(fmt.Sprintf("bad kind: %s", k1))
|
||||
}
|
||||
}
|
||||
|
||||
// append appends a text representation of v to dst.
|
||||
// v is formatted as with fmt.Sprint.
|
||||
func (v Value) append(dst []byte) []byte {
|
||||
switch v.Kind() {
|
||||
case KindString:
|
||||
return append(dst, v.str()...)
|
||||
case KindInt64:
|
||||
return strconv.AppendInt(dst, int64(v.num), 10)
|
||||
case KindUint64:
|
||||
return strconv.AppendUint(dst, v.num, 10)
|
||||
case KindFloat64:
|
||||
return strconv.AppendFloat(dst, v.float(), 'g', -1, 64)
|
||||
case KindBool:
|
||||
return strconv.AppendBool(dst, v.bool())
|
||||
case KindDuration:
|
||||
return append(dst, v.duration().String()...)
|
||||
case KindTime:
|
||||
return append(dst, v.time().String()...)
|
||||
case KindGroup:
|
||||
return fmt.Append(dst, v.group())
|
||||
case KindAny, KindLogValuer:
|
||||
return fmt.Append(dst, v.any)
|
||||
default:
|
||||
panic(fmt.Sprintf("bad kind: %s", v.Kind()))
|
||||
}
|
||||
}
|
||||
|
||||
// A LogValuer is any Go value that can convert itself into a Value for logging.
|
||||
//
|
||||
// This mechanism may be used to defer expensive operations until they are
|
||||
// needed, or to expand a single value into a sequence of components.
|
||||
type LogValuer interface {
|
||||
LogValue() Value
|
||||
}
|
||||
|
||||
const maxLogValues = 100
|
||||
|
||||
// Resolve repeatedly calls LogValue on v while it implements LogValuer,
|
||||
// and returns the result.
|
||||
// If v resolves to a group, the group's attributes' values are not recursively
|
||||
// resolved.
|
||||
// If the number of LogValue calls exceeds a threshold, a Value containing an
|
||||
// error is returned.
|
||||
// Resolve's return value is guaranteed not to be of Kind KindLogValuer.
|
||||
func (v Value) Resolve() (rv Value) {
|
||||
orig := v
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
rv = AnyValue(fmt.Errorf("LogValue panicked\n%s", stack(3, 5)))
|
||||
}
|
||||
}()
|
||||
|
||||
for i := 0; i < maxLogValues; i++ {
|
||||
if v.Kind() != KindLogValuer {
|
||||
return v
|
||||
}
|
||||
v = v.LogValuer().LogValue()
|
||||
}
|
||||
err := fmt.Errorf("LogValue called too many times on Value of type %T", orig.Any())
|
||||
return AnyValue(err)
|
||||
}
|
||||
|
||||
func stack(skip, nFrames int) string {
|
||||
pcs := make([]uintptr, nFrames+1)
|
||||
n := runtime.Callers(skip+1, pcs)
|
||||
if n == 0 {
|
||||
return "(no stack)"
|
||||
}
|
||||
frames := runtime.CallersFrames(pcs[:n])
|
||||
var b strings.Builder
|
||||
i := 0
|
||||
for {
|
||||
frame, more := frames.Next()
|
||||
fmt.Fprintf(&b, "called from %s (%s:%d)\n", frame.Function, frame.File, frame.Line)
|
||||
if !more {
|
||||
break
|
||||
}
|
||||
i++
|
||||
if i >= nFrames {
|
||||
fmt.Fprintf(&b, "(rest of stack elided)\n")
|
||||
break
|
||||
}
|
||||
}
|
||||
return b.String()
|
||||
}
|
||||
53
vendor/golang.org/x/exp/slog/value_119.go
generated
vendored
Normal file
53
vendor/golang.org/x/exp/slog/value_119.go
generated
vendored
Normal file
@@ -0,0 +1,53 @@
|
||||
// Copyright 2022 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build go1.19 && !go1.20
|
||||
|
||||
package slog
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
type (
|
||||
stringptr unsafe.Pointer // used in Value.any when the Value is a string
|
||||
groupptr unsafe.Pointer // used in Value.any when the Value is a []Attr
|
||||
)
|
||||
|
||||
// StringValue returns a new Value for a string.
|
||||
func StringValue(value string) Value {
|
||||
hdr := (*reflect.StringHeader)(unsafe.Pointer(&value))
|
||||
return Value{num: uint64(hdr.Len), any: stringptr(hdr.Data)}
|
||||
}
|
||||
|
||||
func (v Value) str() string {
|
||||
var s string
|
||||
hdr := (*reflect.StringHeader)(unsafe.Pointer(&s))
|
||||
hdr.Data = uintptr(v.any.(stringptr))
|
||||
hdr.Len = int(v.num)
|
||||
return s
|
||||
}
|
||||
|
||||
// String returns Value's value as a string, formatted like fmt.Sprint. Unlike
|
||||
// the methods Int64, Float64, and so on, which panic if v is of the
|
||||
// wrong kind, String never panics.
|
||||
func (v Value) String() string {
|
||||
if sp, ok := v.any.(stringptr); ok {
|
||||
// Inlining this code makes a huge difference.
|
||||
var s string
|
||||
hdr := (*reflect.StringHeader)(unsafe.Pointer(&s))
|
||||
hdr.Data = uintptr(sp)
|
||||
hdr.Len = int(v.num)
|
||||
return s
|
||||
}
|
||||
return string(v.append(nil))
|
||||
}
|
||||
|
||||
// GroupValue returns a new Value for a list of Attrs.
|
||||
// The caller must not subsequently mutate the argument slice.
|
||||
func GroupValue(as ...Attr) Value {
|
||||
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&as))
|
||||
return Value{num: uint64(hdr.Len), any: groupptr(hdr.Data)}
|
||||
}
|
||||
39
vendor/golang.org/x/exp/slog/value_120.go
generated
vendored
Normal file
39
vendor/golang.org/x/exp/slog/value_120.go
generated
vendored
Normal file
@@ -0,0 +1,39 @@
|
||||
// Copyright 2022 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build go1.20
|
||||
|
||||
package slog
|
||||
|
||||
import "unsafe"
|
||||
|
||||
type (
|
||||
stringptr *byte // used in Value.any when the Value is a string
|
||||
groupptr *Attr // used in Value.any when the Value is a []Attr
|
||||
)
|
||||
|
||||
// StringValue returns a new Value for a string.
|
||||
func StringValue(value string) Value {
|
||||
return Value{num: uint64(len(value)), any: stringptr(unsafe.StringData(value))}
|
||||
}
|
||||
|
||||
// GroupValue returns a new Value for a list of Attrs.
|
||||
// The caller must not subsequently mutate the argument slice.
|
||||
func GroupValue(as ...Attr) Value {
|
||||
return Value{num: uint64(len(as)), any: groupptr(unsafe.SliceData(as))}
|
||||
}
|
||||
|
||||
// String returns Value's value as a string, formatted like fmt.Sprint. Unlike
|
||||
// the methods Int64, Float64, and so on, which panic if v is of the
|
||||
// wrong kind, String never panics.
|
||||
func (v Value) String() string {
|
||||
if sp, ok := v.any.(stringptr); ok {
|
||||
return unsafe.String(sp, v.num)
|
||||
}
|
||||
return string(v.append(nil))
|
||||
}
|
||||
|
||||
func (v Value) str() string {
|
||||
return unsafe.String(v.any.(stringptr), v.num)
|
||||
}
|
||||
5
vendor/golang.org/x/image/webp/decode.go
generated
vendored
5
vendor/golang.org/x/image/webp/decode.go
generated
vendored
@@ -39,6 +39,7 @@ func decode(r io.Reader, configOnly bool) (image.Image, image.Config, error) {
|
||||
alpha []byte
|
||||
alphaStride int
|
||||
wantAlpha bool
|
||||
seenVP8X bool
|
||||
widthMinusOne uint32
|
||||
heightMinusOne uint32
|
||||
buf [10]byte
|
||||
@@ -113,6 +114,10 @@ func decode(r io.Reader, configOnly bool) (image.Image, image.Config, error) {
|
||||
return m, image.Config{}, err
|
||||
|
||||
case fccVP8X:
|
||||
if seenVP8X {
|
||||
return nil, image.Config{}, errInvalidFormat
|
||||
}
|
||||
seenVP8X = true
|
||||
if chunkLen != 10 {
|
||||
return nil, image.Config{}, errInvalidFormat
|
||||
}
|
||||
|
||||
401
vendor/golang.org/x/mod/semver/semver.go
generated
vendored
401
vendor/golang.org/x/mod/semver/semver.go
generated
vendored
@@ -1,401 +0,0 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package semver implements comparison of semantic version strings.
|
||||
// In this package, semantic version strings must begin with a leading "v",
|
||||
// as in "v1.0.0".
|
||||
//
|
||||
// The general form of a semantic version string accepted by this package is
|
||||
//
|
||||
// vMAJOR[.MINOR[.PATCH[-PRERELEASE][+BUILD]]]
|
||||
//
|
||||
// where square brackets indicate optional parts of the syntax;
|
||||
// MAJOR, MINOR, and PATCH are decimal integers without extra leading zeros;
|
||||
// PRERELEASE and BUILD are each a series of non-empty dot-separated identifiers
|
||||
// using only alphanumeric characters and hyphens; and
|
||||
// all-numeric PRERELEASE identifiers must not have leading zeros.
|
||||
//
|
||||
// This package follows Semantic Versioning 2.0.0 (see semver.org)
|
||||
// with two exceptions. First, it requires the "v" prefix. Second, it recognizes
|
||||
// vMAJOR and vMAJOR.MINOR (with no prerelease or build suffixes)
|
||||
// as shorthands for vMAJOR.0.0 and vMAJOR.MINOR.0.
|
||||
package semver
|
||||
|
||||
import "sort"
|
||||
|
||||
// parsed returns the parsed form of a semantic version string.
|
||||
type parsed struct {
|
||||
major string
|
||||
minor string
|
||||
patch string
|
||||
short string
|
||||
prerelease string
|
||||
build string
|
||||
}
|
||||
|
||||
// IsValid reports whether v is a valid semantic version string.
|
||||
func IsValid(v string) bool {
|
||||
_, ok := parse(v)
|
||||
return ok
|
||||
}
|
||||
|
||||
// Canonical returns the canonical formatting of the semantic version v.
|
||||
// It fills in any missing .MINOR or .PATCH and discards build metadata.
|
||||
// Two semantic versions compare equal only if their canonical formattings
|
||||
// are identical strings.
|
||||
// The canonical invalid semantic version is the empty string.
|
||||
func Canonical(v string) string {
|
||||
p, ok := parse(v)
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
if p.build != "" {
|
||||
return v[:len(v)-len(p.build)]
|
||||
}
|
||||
if p.short != "" {
|
||||
return v + p.short
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// Major returns the major version prefix of the semantic version v.
|
||||
// For example, Major("v2.1.0") == "v2".
|
||||
// If v is an invalid semantic version string, Major returns the empty string.
|
||||
func Major(v string) string {
|
||||
pv, ok := parse(v)
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
return v[:1+len(pv.major)]
|
||||
}
|
||||
|
||||
// MajorMinor returns the major.minor version prefix of the semantic version v.
|
||||
// For example, MajorMinor("v2.1.0") == "v2.1".
|
||||
// If v is an invalid semantic version string, MajorMinor returns the empty string.
|
||||
func MajorMinor(v string) string {
|
||||
pv, ok := parse(v)
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
i := 1 + len(pv.major)
|
||||
if j := i + 1 + len(pv.minor); j <= len(v) && v[i] == '.' && v[i+1:j] == pv.minor {
|
||||
return v[:j]
|
||||
}
|
||||
return v[:i] + "." + pv.minor
|
||||
}
|
||||
|
||||
// Prerelease returns the prerelease suffix of the semantic version v.
|
||||
// For example, Prerelease("v2.1.0-pre+meta") == "-pre".
|
||||
// If v is an invalid semantic version string, Prerelease returns the empty string.
|
||||
func Prerelease(v string) string {
|
||||
pv, ok := parse(v)
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
return pv.prerelease
|
||||
}
|
||||
|
||||
// Build returns the build suffix of the semantic version v.
|
||||
// For example, Build("v2.1.0+meta") == "+meta".
|
||||
// If v is an invalid semantic version string, Build returns the empty string.
|
||||
func Build(v string) string {
|
||||
pv, ok := parse(v)
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
return pv.build
|
||||
}
|
||||
|
||||
// Compare returns an integer comparing two versions according to
|
||||
// semantic version precedence.
|
||||
// The result will be 0 if v == w, -1 if v < w, or +1 if v > w.
|
||||
//
|
||||
// An invalid semantic version string is considered less than a valid one.
|
||||
// All invalid semantic version strings compare equal to each other.
|
||||
func Compare(v, w string) int {
|
||||
pv, ok1 := parse(v)
|
||||
pw, ok2 := parse(w)
|
||||
if !ok1 && !ok2 {
|
||||
return 0
|
||||
}
|
||||
if !ok1 {
|
||||
return -1
|
||||
}
|
||||
if !ok2 {
|
||||
return +1
|
||||
}
|
||||
if c := compareInt(pv.major, pw.major); c != 0 {
|
||||
return c
|
||||
}
|
||||
if c := compareInt(pv.minor, pw.minor); c != 0 {
|
||||
return c
|
||||
}
|
||||
if c := compareInt(pv.patch, pw.patch); c != 0 {
|
||||
return c
|
||||
}
|
||||
return comparePrerelease(pv.prerelease, pw.prerelease)
|
||||
}
|
||||
|
||||
// Max canonicalizes its arguments and then returns the version string
|
||||
// that compares greater.
|
||||
//
|
||||
// Deprecated: use Compare instead. In most cases, returning a canonicalized
|
||||
// version is not expected or desired.
|
||||
func Max(v, w string) string {
|
||||
v = Canonical(v)
|
||||
w = Canonical(w)
|
||||
if Compare(v, w) > 0 {
|
||||
return v
|
||||
}
|
||||
return w
|
||||
}
|
||||
|
||||
// ByVersion implements sort.Interface for sorting semantic version strings.
|
||||
type ByVersion []string
|
||||
|
||||
func (vs ByVersion) Len() int { return len(vs) }
|
||||
func (vs ByVersion) Swap(i, j int) { vs[i], vs[j] = vs[j], vs[i] }
|
||||
func (vs ByVersion) Less(i, j int) bool {
|
||||
cmp := Compare(vs[i], vs[j])
|
||||
if cmp != 0 {
|
||||
return cmp < 0
|
||||
}
|
||||
return vs[i] < vs[j]
|
||||
}
|
||||
|
||||
// Sort sorts a list of semantic version strings using ByVersion.
|
||||
func Sort(list []string) {
|
||||
sort.Sort(ByVersion(list))
|
||||
}
|
||||
|
||||
func parse(v string) (p parsed, ok bool) {
|
||||
if v == "" || v[0] != 'v' {
|
||||
return
|
||||
}
|
||||
p.major, v, ok = parseInt(v[1:])
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
if v == "" {
|
||||
p.minor = "0"
|
||||
p.patch = "0"
|
||||
p.short = ".0.0"
|
||||
return
|
||||
}
|
||||
if v[0] != '.' {
|
||||
ok = false
|
||||
return
|
||||
}
|
||||
p.minor, v, ok = parseInt(v[1:])
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
if v == "" {
|
||||
p.patch = "0"
|
||||
p.short = ".0"
|
||||
return
|
||||
}
|
||||
if v[0] != '.' {
|
||||
ok = false
|
||||
return
|
||||
}
|
||||
p.patch, v, ok = parseInt(v[1:])
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
if len(v) > 0 && v[0] == '-' {
|
||||
p.prerelease, v, ok = parsePrerelease(v)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
}
|
||||
if len(v) > 0 && v[0] == '+' {
|
||||
p.build, v, ok = parseBuild(v)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
}
|
||||
if v != "" {
|
||||
ok = false
|
||||
return
|
||||
}
|
||||
ok = true
|
||||
return
|
||||
}
|
||||
|
||||
func parseInt(v string) (t, rest string, ok bool) {
|
||||
if v == "" {
|
||||
return
|
||||
}
|
||||
if v[0] < '0' || '9' < v[0] {
|
||||
return
|
||||
}
|
||||
i := 1
|
||||
for i < len(v) && '0' <= v[i] && v[i] <= '9' {
|
||||
i++
|
||||
}
|
||||
if v[0] == '0' && i != 1 {
|
||||
return
|
||||
}
|
||||
return v[:i], v[i:], true
|
||||
}
|
||||
|
||||
func parsePrerelease(v string) (t, rest string, ok bool) {
|
||||
// "A pre-release version MAY be denoted by appending a hyphen and
|
||||
// a series of dot separated identifiers immediately following the patch version.
|
||||
// Identifiers MUST comprise only ASCII alphanumerics and hyphen [0-9A-Za-z-].
|
||||
// Identifiers MUST NOT be empty. Numeric identifiers MUST NOT include leading zeroes."
|
||||
if v == "" || v[0] != '-' {
|
||||
return
|
||||
}
|
||||
i := 1
|
||||
start := 1
|
||||
for i < len(v) && v[i] != '+' {
|
||||
if !isIdentChar(v[i]) && v[i] != '.' {
|
||||
return
|
||||
}
|
||||
if v[i] == '.' {
|
||||
if start == i || isBadNum(v[start:i]) {
|
||||
return
|
||||
}
|
||||
start = i + 1
|
||||
}
|
||||
i++
|
||||
}
|
||||
if start == i || isBadNum(v[start:i]) {
|
||||
return
|
||||
}
|
||||
return v[:i], v[i:], true
|
||||
}
|
||||
|
||||
func parseBuild(v string) (t, rest string, ok bool) {
|
||||
if v == "" || v[0] != '+' {
|
||||
return
|
||||
}
|
||||
i := 1
|
||||
start := 1
|
||||
for i < len(v) {
|
||||
if !isIdentChar(v[i]) && v[i] != '.' {
|
||||
return
|
||||
}
|
||||
if v[i] == '.' {
|
||||
if start == i {
|
||||
return
|
||||
}
|
||||
start = i + 1
|
||||
}
|
||||
i++
|
||||
}
|
||||
if start == i {
|
||||
return
|
||||
}
|
||||
return v[:i], v[i:], true
|
||||
}
|
||||
|
||||
func isIdentChar(c byte) bool {
|
||||
return 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || '0' <= c && c <= '9' || c == '-'
|
||||
}
|
||||
|
||||
func isBadNum(v string) bool {
|
||||
i := 0
|
||||
for i < len(v) && '0' <= v[i] && v[i] <= '9' {
|
||||
i++
|
||||
}
|
||||
return i == len(v) && i > 1 && v[0] == '0'
|
||||
}
|
||||
|
||||
func isNum(v string) bool {
|
||||
i := 0
|
||||
for i < len(v) && '0' <= v[i] && v[i] <= '9' {
|
||||
i++
|
||||
}
|
||||
return i == len(v)
|
||||
}
|
||||
|
||||
func compareInt(x, y string) int {
|
||||
if x == y {
|
||||
return 0
|
||||
}
|
||||
if len(x) < len(y) {
|
||||
return -1
|
||||
}
|
||||
if len(x) > len(y) {
|
||||
return +1
|
||||
}
|
||||
if x < y {
|
||||
return -1
|
||||
} else {
|
||||
return +1
|
||||
}
|
||||
}
|
||||
|
||||
func comparePrerelease(x, y string) int {
|
||||
// "When major, minor, and patch are equal, a pre-release version has
|
||||
// lower precedence than a normal version.
|
||||
// Example: 1.0.0-alpha < 1.0.0.
|
||||
// Precedence for two pre-release versions with the same major, minor,
|
||||
// and patch version MUST be determined by comparing each dot separated
|
||||
// identifier from left to right until a difference is found as follows:
|
||||
// identifiers consisting of only digits are compared numerically and
|
||||
// identifiers with letters or hyphens are compared lexically in ASCII
|
||||
// sort order. Numeric identifiers always have lower precedence than
|
||||
// non-numeric identifiers. A larger set of pre-release fields has a
|
||||
// higher precedence than a smaller set, if all of the preceding
|
||||
// identifiers are equal.
|
||||
// Example: 1.0.0-alpha < 1.0.0-alpha.1 < 1.0.0-alpha.beta <
|
||||
// 1.0.0-beta < 1.0.0-beta.2 < 1.0.0-beta.11 < 1.0.0-rc.1 < 1.0.0."
|
||||
if x == y {
|
||||
return 0
|
||||
}
|
||||
if x == "" {
|
||||
return +1
|
||||
}
|
||||
if y == "" {
|
||||
return -1
|
||||
}
|
||||
for x != "" && y != "" {
|
||||
x = x[1:] // skip - or .
|
||||
y = y[1:] // skip - or .
|
||||
var dx, dy string
|
||||
dx, x = nextIdent(x)
|
||||
dy, y = nextIdent(y)
|
||||
if dx != dy {
|
||||
ix := isNum(dx)
|
||||
iy := isNum(dy)
|
||||
if ix != iy {
|
||||
if ix {
|
||||
return -1
|
||||
} else {
|
||||
return +1
|
||||
}
|
||||
}
|
||||
if ix {
|
||||
if len(dx) < len(dy) {
|
||||
return -1
|
||||
}
|
||||
if len(dx) > len(dy) {
|
||||
return +1
|
||||
}
|
||||
}
|
||||
if dx < dy {
|
||||
return -1
|
||||
} else {
|
||||
return +1
|
||||
}
|
||||
}
|
||||
}
|
||||
if x == "" {
|
||||
return -1
|
||||
} else {
|
||||
return +1
|
||||
}
|
||||
}
|
||||
|
||||
func nextIdent(x string) (dx, rest string) {
|
||||
i := 0
|
||||
for i < len(x) && x[i] != '.' {
|
||||
i++
|
||||
}
|
||||
return x[:i], x[i:]
|
||||
}
|
||||
56
vendor/golang.org/x/net/context/context.go
generated
vendored
56
vendor/golang.org/x/net/context/context.go
generated
vendored
@@ -1,56 +0,0 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package context defines the Context type, which carries deadlines,
|
||||
// cancelation signals, and other request-scoped values across API boundaries
|
||||
// and between processes.
|
||||
// As of Go 1.7 this package is available in the standard library under the
|
||||
// name context. https://golang.org/pkg/context.
|
||||
//
|
||||
// Incoming requests to a server should create a Context, and outgoing calls to
|
||||
// servers should accept a Context. The chain of function calls between must
|
||||
// propagate the Context, optionally replacing it with a modified copy created
|
||||
// using WithDeadline, WithTimeout, WithCancel, or WithValue.
|
||||
//
|
||||
// Programs that use Contexts should follow these rules to keep interfaces
|
||||
// consistent across packages and enable static analysis tools to check context
|
||||
// propagation:
|
||||
//
|
||||
// Do not store Contexts inside a struct type; instead, pass a Context
|
||||
// explicitly to each function that needs it. The Context should be the first
|
||||
// parameter, typically named ctx:
|
||||
//
|
||||
// func DoSomething(ctx context.Context, arg Arg) error {
|
||||
// // ... use ctx ...
|
||||
// }
|
||||
//
|
||||
// Do not pass a nil Context, even if a function permits it. Pass context.TODO
|
||||
// if you are unsure about which Context to use.
|
||||
//
|
||||
// Use context Values only for request-scoped data that transits processes and
|
||||
// APIs, not for passing optional parameters to functions.
|
||||
//
|
||||
// The same Context may be passed to functions running in different goroutines;
|
||||
// Contexts are safe for simultaneous use by multiple goroutines.
|
||||
//
|
||||
// See http://blog.golang.org/context for example code for a server that uses
|
||||
// Contexts.
|
||||
package context // import "golang.org/x/net/context"
|
||||
|
||||
// Background returns a non-nil, empty Context. It is never canceled, has no
|
||||
// values, and has no deadline. It is typically used by the main function,
|
||||
// initialization, and tests, and as the top-level Context for incoming
|
||||
// requests.
|
||||
func Background() Context {
|
||||
return background
|
||||
}
|
||||
|
||||
// TODO returns a non-nil, empty Context. Code should use context.TODO when
|
||||
// it's unclear which Context to use or it is not yet available (because the
|
||||
// surrounding function has not yet been extended to accept a Context
|
||||
// parameter). TODO is recognized by static analysis tools that determine
|
||||
// whether Contexts are propagated correctly in a program.
|
||||
func TODO() Context {
|
||||
return todo
|
||||
}
|
||||
73
vendor/golang.org/x/net/context/go17.go
generated
vendored
73
vendor/golang.org/x/net/context/go17.go
generated
vendored
@@ -1,73 +0,0 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build go1.7
|
||||
// +build go1.7
|
||||
|
||||
package context
|
||||
|
||||
import (
|
||||
"context" // standard library's context, as of Go 1.7
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
todo = context.TODO()
|
||||
background = context.Background()
|
||||
)
|
||||
|
||||
// Canceled is the error returned by Context.Err when the context is canceled.
|
||||
var Canceled = context.Canceled
|
||||
|
||||
// DeadlineExceeded is the error returned by Context.Err when the context's
|
||||
// deadline passes.
|
||||
var DeadlineExceeded = context.DeadlineExceeded
|
||||
|
||||
// WithCancel returns a copy of parent with a new Done channel. The returned
|
||||
// context's Done channel is closed when the returned cancel function is called
|
||||
// or when the parent context's Done channel is closed, whichever happens first.
|
||||
//
|
||||
// Canceling this context releases resources associated with it, so code should
|
||||
// call cancel as soon as the operations running in this Context complete.
|
||||
func WithCancel(parent Context) (ctx Context, cancel CancelFunc) {
|
||||
ctx, f := context.WithCancel(parent)
|
||||
return ctx, f
|
||||
}
|
||||
|
||||
// WithDeadline returns a copy of the parent context with the deadline adjusted
|
||||
// to be no later than d. If the parent's deadline is already earlier than d,
|
||||
// WithDeadline(parent, d) is semantically equivalent to parent. The returned
|
||||
// context's Done channel is closed when the deadline expires, when the returned
|
||||
// cancel function is called, or when the parent context's Done channel is
|
||||
// closed, whichever happens first.
|
||||
//
|
||||
// Canceling this context releases resources associated with it, so code should
|
||||
// call cancel as soon as the operations running in this Context complete.
|
||||
func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) {
|
||||
ctx, f := context.WithDeadline(parent, deadline)
|
||||
return ctx, f
|
||||
}
|
||||
|
||||
// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)).
|
||||
//
|
||||
// Canceling this context releases resources associated with it, so code should
|
||||
// call cancel as soon as the operations running in this Context complete:
|
||||
//
|
||||
// func slowOperationWithTimeout(ctx context.Context) (Result, error) {
|
||||
// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond)
|
||||
// defer cancel() // releases resources if slowOperation completes before timeout elapses
|
||||
// return slowOperation(ctx)
|
||||
// }
|
||||
func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) {
|
||||
return WithDeadline(parent, time.Now().Add(timeout))
|
||||
}
|
||||
|
||||
// WithValue returns a copy of parent in which the value associated with key is
|
||||
// val.
|
||||
//
|
||||
// Use context Values only for request-scoped data that transits processes and
|
||||
// APIs, not for passing optional parameters to functions.
|
||||
func WithValue(parent Context, key interface{}, val interface{}) Context {
|
||||
return context.WithValue(parent, key, val)
|
||||
}
|
||||
21
vendor/golang.org/x/net/context/go19.go
generated
vendored
21
vendor/golang.org/x/net/context/go19.go
generated
vendored
@@ -1,21 +0,0 @@
|
||||
// Copyright 2017 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build go1.9
|
||||
// +build go1.9
|
||||
|
||||
package context
|
||||
|
||||
import "context" // standard library's context, as of Go 1.7
|
||||
|
||||
// A Context carries a deadline, a cancelation signal, and other values across
|
||||
// API boundaries.
|
||||
//
|
||||
// Context's methods may be called by multiple goroutines simultaneously.
|
||||
type Context = context.Context
|
||||
|
||||
// A CancelFunc tells an operation to abandon its work.
|
||||
// A CancelFunc does not wait for the work to stop.
|
||||
// After the first call, subsequent calls to a CancelFunc do nothing.
|
||||
type CancelFunc = context.CancelFunc
|
||||
301
vendor/golang.org/x/net/context/pre_go17.go
generated
vendored
301
vendor/golang.org/x/net/context/pre_go17.go
generated
vendored
@@ -1,301 +0,0 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !go1.7
|
||||
// +build !go1.7
|
||||
|
||||
package context
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// An emptyCtx is never canceled, has no values, and has no deadline. It is not
|
||||
// struct{}, since vars of this type must have distinct addresses.
|
||||
type emptyCtx int
|
||||
|
||||
func (*emptyCtx) Deadline() (deadline time.Time, ok bool) {
|
||||
return
|
||||
}
|
||||
|
||||
func (*emptyCtx) Done() <-chan struct{} {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (*emptyCtx) Err() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (*emptyCtx) Value(key interface{}) interface{} {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *emptyCtx) String() string {
|
||||
switch e {
|
||||
case background:
|
||||
return "context.Background"
|
||||
case todo:
|
||||
return "context.TODO"
|
||||
}
|
||||
return "unknown empty Context"
|
||||
}
|
||||
|
||||
var (
|
||||
background = new(emptyCtx)
|
||||
todo = new(emptyCtx)
|
||||
)
|
||||
|
||||
// Canceled is the error returned by Context.Err when the context is canceled.
|
||||
var Canceled = errors.New("context canceled")
|
||||
|
||||
// DeadlineExceeded is the error returned by Context.Err when the context's
|
||||
// deadline passes.
|
||||
var DeadlineExceeded = errors.New("context deadline exceeded")
|
||||
|
||||
// WithCancel returns a copy of parent with a new Done channel. The returned
|
||||
// context's Done channel is closed when the returned cancel function is called
|
||||
// or when the parent context's Done channel is closed, whichever happens first.
|
||||
//
|
||||
// Canceling this context releases resources associated with it, so code should
|
||||
// call cancel as soon as the operations running in this Context complete.
|
||||
func WithCancel(parent Context) (ctx Context, cancel CancelFunc) {
|
||||
c := newCancelCtx(parent)
|
||||
propagateCancel(parent, c)
|
||||
return c, func() { c.cancel(true, Canceled) }
|
||||
}
|
||||
|
||||
// newCancelCtx returns an initialized cancelCtx.
|
||||
func newCancelCtx(parent Context) *cancelCtx {
|
||||
return &cancelCtx{
|
||||
Context: parent,
|
||||
done: make(chan struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
// propagateCancel arranges for child to be canceled when parent is.
|
||||
func propagateCancel(parent Context, child canceler) {
|
||||
if parent.Done() == nil {
|
||||
return // parent is never canceled
|
||||
}
|
||||
if p, ok := parentCancelCtx(parent); ok {
|
||||
p.mu.Lock()
|
||||
if p.err != nil {
|
||||
// parent has already been canceled
|
||||
child.cancel(false, p.err)
|
||||
} else {
|
||||
if p.children == nil {
|
||||
p.children = make(map[canceler]bool)
|
||||
}
|
||||
p.children[child] = true
|
||||
}
|
||||
p.mu.Unlock()
|
||||
} else {
|
||||
go func() {
|
||||
select {
|
||||
case <-parent.Done():
|
||||
child.cancel(false, parent.Err())
|
||||
case <-child.Done():
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
// parentCancelCtx follows a chain of parent references until it finds a
|
||||
// *cancelCtx. This function understands how each of the concrete types in this
|
||||
// package represents its parent.
|
||||
func parentCancelCtx(parent Context) (*cancelCtx, bool) {
|
||||
for {
|
||||
switch c := parent.(type) {
|
||||
case *cancelCtx:
|
||||
return c, true
|
||||
case *timerCtx:
|
||||
return c.cancelCtx, true
|
||||
case *valueCtx:
|
||||
parent = c.Context
|
||||
default:
|
||||
return nil, false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// removeChild removes a context from its parent.
|
||||
func removeChild(parent Context, child canceler) {
|
||||
p, ok := parentCancelCtx(parent)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
p.mu.Lock()
|
||||
if p.children != nil {
|
||||
delete(p.children, child)
|
||||
}
|
||||
p.mu.Unlock()
|
||||
}
|
||||
|
||||
// A canceler is a context type that can be canceled directly. The
|
||||
// implementations are *cancelCtx and *timerCtx.
|
||||
type canceler interface {
|
||||
cancel(removeFromParent bool, err error)
|
||||
Done() <-chan struct{}
|
||||
}
|
||||
|
||||
// A cancelCtx can be canceled. When canceled, it also cancels any children
|
||||
// that implement canceler.
|
||||
type cancelCtx struct {
|
||||
Context
|
||||
|
||||
done chan struct{} // closed by the first cancel call.
|
||||
|
||||
mu sync.Mutex
|
||||
children map[canceler]bool // set to nil by the first cancel call
|
||||
err error // set to non-nil by the first cancel call
|
||||
}
|
||||
|
||||
func (c *cancelCtx) Done() <-chan struct{} {
|
||||
return c.done
|
||||
}
|
||||
|
||||
func (c *cancelCtx) Err() error {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
return c.err
|
||||
}
|
||||
|
||||
func (c *cancelCtx) String() string {
|
||||
return fmt.Sprintf("%v.WithCancel", c.Context)
|
||||
}
|
||||
|
||||
// cancel closes c.done, cancels each of c's children, and, if
|
||||
// removeFromParent is true, removes c from its parent's children.
|
||||
func (c *cancelCtx) cancel(removeFromParent bool, err error) {
|
||||
if err == nil {
|
||||
panic("context: internal error: missing cancel error")
|
||||
}
|
||||
c.mu.Lock()
|
||||
if c.err != nil {
|
||||
c.mu.Unlock()
|
||||
return // already canceled
|
||||
}
|
||||
c.err = err
|
||||
close(c.done)
|
||||
for child := range c.children {
|
||||
// NOTE: acquiring the child's lock while holding parent's lock.
|
||||
child.cancel(false, err)
|
||||
}
|
||||
c.children = nil
|
||||
c.mu.Unlock()
|
||||
|
||||
if removeFromParent {
|
||||
removeChild(c.Context, c)
|
||||
}
|
||||
}
|
||||
|
||||
// WithDeadline returns a copy of the parent context with the deadline adjusted
|
||||
// to be no later than d. If the parent's deadline is already earlier than d,
|
||||
// WithDeadline(parent, d) is semantically equivalent to parent. The returned
|
||||
// context's Done channel is closed when the deadline expires, when the returned
|
||||
// cancel function is called, or when the parent context's Done channel is
|
||||
// closed, whichever happens first.
|
||||
//
|
||||
// Canceling this context releases resources associated with it, so code should
|
||||
// call cancel as soon as the operations running in this Context complete.
|
||||
func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) {
|
||||
if cur, ok := parent.Deadline(); ok && cur.Before(deadline) {
|
||||
// The current deadline is already sooner than the new one.
|
||||
return WithCancel(parent)
|
||||
}
|
||||
c := &timerCtx{
|
||||
cancelCtx: newCancelCtx(parent),
|
||||
deadline: deadline,
|
||||
}
|
||||
propagateCancel(parent, c)
|
||||
d := deadline.Sub(time.Now())
|
||||
if d <= 0 {
|
||||
c.cancel(true, DeadlineExceeded) // deadline has already passed
|
||||
return c, func() { c.cancel(true, Canceled) }
|
||||
}
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
if c.err == nil {
|
||||
c.timer = time.AfterFunc(d, func() {
|
||||
c.cancel(true, DeadlineExceeded)
|
||||
})
|
||||
}
|
||||
return c, func() { c.cancel(true, Canceled) }
|
||||
}
|
||||
|
||||
// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to
|
||||
// implement Done and Err. It implements cancel by stopping its timer then
|
||||
// delegating to cancelCtx.cancel.
|
||||
type timerCtx struct {
|
||||
*cancelCtx
|
||||
timer *time.Timer // Under cancelCtx.mu.
|
||||
|
||||
deadline time.Time
|
||||
}
|
||||
|
||||
func (c *timerCtx) Deadline() (deadline time.Time, ok bool) {
|
||||
return c.deadline, true
|
||||
}
|
||||
|
||||
func (c *timerCtx) String() string {
|
||||
return fmt.Sprintf("%v.WithDeadline(%s [%s])", c.cancelCtx.Context, c.deadline, c.deadline.Sub(time.Now()))
|
||||
}
|
||||
|
||||
func (c *timerCtx) cancel(removeFromParent bool, err error) {
|
||||
c.cancelCtx.cancel(false, err)
|
||||
if removeFromParent {
|
||||
// Remove this timerCtx from its parent cancelCtx's children.
|
||||
removeChild(c.cancelCtx.Context, c)
|
||||
}
|
||||
c.mu.Lock()
|
||||
if c.timer != nil {
|
||||
c.timer.Stop()
|
||||
c.timer = nil
|
||||
}
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)).
|
||||
//
|
||||
// Canceling this context releases resources associated with it, so code should
|
||||
// call cancel as soon as the operations running in this Context complete:
|
||||
//
|
||||
// func slowOperationWithTimeout(ctx context.Context) (Result, error) {
|
||||
// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond)
|
||||
// defer cancel() // releases resources if slowOperation completes before timeout elapses
|
||||
// return slowOperation(ctx)
|
||||
// }
|
||||
func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) {
|
||||
return WithDeadline(parent, time.Now().Add(timeout))
|
||||
}
|
||||
|
||||
// WithValue returns a copy of parent in which the value associated with key is
|
||||
// val.
|
||||
//
|
||||
// Use context Values only for request-scoped data that transits processes and
|
||||
// APIs, not for passing optional parameters to functions.
|
||||
func WithValue(parent Context, key interface{}, val interface{}) Context {
|
||||
return &valueCtx{parent, key, val}
|
||||
}
|
||||
|
||||
// A valueCtx carries a key-value pair. It implements Value for that key and
|
||||
// delegates all other calls to the embedded Context.
|
||||
type valueCtx struct {
|
||||
Context
|
||||
key, val interface{}
|
||||
}
|
||||
|
||||
func (c *valueCtx) String() string {
|
||||
return fmt.Sprintf("%v.WithValue(%#v, %#v)", c.Context, c.key, c.val)
|
||||
}
|
||||
|
||||
func (c *valueCtx) Value(key interface{}) interface{} {
|
||||
if c.key == key {
|
||||
return c.val
|
||||
}
|
||||
return c.Context.Value(key)
|
||||
}
|
||||
110
vendor/golang.org/x/net/context/pre_go19.go
generated
vendored
110
vendor/golang.org/x/net/context/pre_go19.go
generated
vendored
@@ -1,110 +0,0 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !go1.9
|
||||
// +build !go1.9
|
||||
|
||||
package context
|
||||
|
||||
import "time"
|
||||
|
||||
// A Context carries a deadline, a cancelation signal, and other values across
|
||||
// API boundaries.
|
||||
//
|
||||
// Context's methods may be called by multiple goroutines simultaneously.
|
||||
type Context interface {
|
||||
// Deadline returns the time when work done on behalf of this context
|
||||
// should be canceled. Deadline returns ok==false when no deadline is
|
||||
// set. Successive calls to Deadline return the same results.
|
||||
Deadline() (deadline time.Time, ok bool)
|
||||
|
||||
// Done returns a channel that's closed when work done on behalf of this
|
||||
// context should be canceled. Done may return nil if this context can
|
||||
// never be canceled. Successive calls to Done return the same value.
|
||||
//
|
||||
// WithCancel arranges for Done to be closed when cancel is called;
|
||||
// WithDeadline arranges for Done to be closed when the deadline
|
||||
// expires; WithTimeout arranges for Done to be closed when the timeout
|
||||
// elapses.
|
||||
//
|
||||
// Done is provided for use in select statements:
|
||||
//
|
||||
// // Stream generates values with DoSomething and sends them to out
|
||||
// // until DoSomething returns an error or ctx.Done is closed.
|
||||
// func Stream(ctx context.Context, out chan<- Value) error {
|
||||
// for {
|
||||
// v, err := DoSomething(ctx)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
// select {
|
||||
// case <-ctx.Done():
|
||||
// return ctx.Err()
|
||||
// case out <- v:
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// See http://blog.golang.org/pipelines for more examples of how to use
|
||||
// a Done channel for cancelation.
|
||||
Done() <-chan struct{}
|
||||
|
||||
// Err returns a non-nil error value after Done is closed. Err returns
|
||||
// Canceled if the context was canceled or DeadlineExceeded if the
|
||||
// context's deadline passed. No other values for Err are defined.
|
||||
// After Done is closed, successive calls to Err return the same value.
|
||||
Err() error
|
||||
|
||||
// Value returns the value associated with this context for key, or nil
|
||||
// if no value is associated with key. Successive calls to Value with
|
||||
// the same key returns the same result.
|
||||
//
|
||||
// Use context values only for request-scoped data that transits
|
||||
// processes and API boundaries, not for passing optional parameters to
|
||||
// functions.
|
||||
//
|
||||
// A key identifies a specific value in a Context. Functions that wish
|
||||
// to store values in Context typically allocate a key in a global
|
||||
// variable then use that key as the argument to context.WithValue and
|
||||
// Context.Value. A key can be any type that supports equality;
|
||||
// packages should define keys as an unexported type to avoid
|
||||
// collisions.
|
||||
//
|
||||
// Packages that define a Context key should provide type-safe accessors
|
||||
// for the values stores using that key:
|
||||
//
|
||||
// // Package user defines a User type that's stored in Contexts.
|
||||
// package user
|
||||
//
|
||||
// import "golang.org/x/net/context"
|
||||
//
|
||||
// // User is the type of value stored in the Contexts.
|
||||
// type User struct {...}
|
||||
//
|
||||
// // key is an unexported type for keys defined in this package.
|
||||
// // This prevents collisions with keys defined in other packages.
|
||||
// type key int
|
||||
//
|
||||
// // userKey is the key for user.User values in Contexts. It is
|
||||
// // unexported; clients use user.NewContext and user.FromContext
|
||||
// // instead of using this key directly.
|
||||
// var userKey key = 0
|
||||
//
|
||||
// // NewContext returns a new Context that carries value u.
|
||||
// func NewContext(ctx context.Context, u *User) context.Context {
|
||||
// return context.WithValue(ctx, userKey, u)
|
||||
// }
|
||||
//
|
||||
// // FromContext returns the User value stored in ctx, if any.
|
||||
// func FromContext(ctx context.Context) (*User, bool) {
|
||||
// u, ok := ctx.Value(userKey).(*User)
|
||||
// return u, ok
|
||||
// }
|
||||
Value(key interface{}) interface{}
|
||||
}
|
||||
|
||||
// A CancelFunc tells an operation to abandon its work.
|
||||
// A CancelFunc does not wait for the work to stop.
|
||||
// After the first call, subsequent calls to a CancelFunc do nothing.
|
||||
type CancelFunc func()
|
||||
2
vendor/golang.org/x/net/html/doc.go
generated
vendored
2
vendor/golang.org/x/net/html/doc.go
generated
vendored
@@ -104,7 +104,7 @@ tokenization, and tokenization and tree construction stages of the WHATWG HTML
|
||||
parsing specification respectively. While the tokenizer parses and normalizes
|
||||
individual HTML tokens, only the parser constructs the DOM tree from the
|
||||
tokenized HTML, as described in the tree construction stage of the
|
||||
specification, dynamically modifying or extending the docuemnt's DOM tree.
|
||||
specification, dynamically modifying or extending the document's DOM tree.
|
||||
|
||||
If your use case requires semantically well-formed HTML documents, as defined by
|
||||
the WHATWG specification, the parser should be used rather than the tokenizer.
|
||||
|
||||
12
vendor/golang.org/x/net/html/token.go
generated
vendored
12
vendor/golang.org/x/net/html/token.go
generated
vendored
@@ -910,9 +910,6 @@ func (z *Tokenizer) readTagAttrKey() {
|
||||
return
|
||||
}
|
||||
switch c {
|
||||
case ' ', '\n', '\r', '\t', '\f', '/':
|
||||
z.pendingAttr[0].end = z.raw.end - 1
|
||||
return
|
||||
case '=':
|
||||
if z.pendingAttr[0].start+1 == z.raw.end {
|
||||
// WHATWG 13.2.5.32, if we see an equals sign before the attribute name
|
||||
@@ -920,7 +917,9 @@ func (z *Tokenizer) readTagAttrKey() {
|
||||
continue
|
||||
}
|
||||
fallthrough
|
||||
case '>':
|
||||
case ' ', '\n', '\r', '\t', '\f', '/', '>':
|
||||
// WHATWG 13.2.5.33 Attribute name state
|
||||
// We need to reconsume the char in the after attribute name state to support the / character
|
||||
z.raw.end--
|
||||
z.pendingAttr[0].end = z.raw.end
|
||||
return
|
||||
@@ -939,6 +938,11 @@ func (z *Tokenizer) readTagAttrVal() {
|
||||
if z.err != nil {
|
||||
return
|
||||
}
|
||||
if c == '/' {
|
||||
// WHATWG 13.2.5.34 After attribute name state
|
||||
// U+002F SOLIDUS (/) - Switch to the self-closing start tag state.
|
||||
return
|
||||
}
|
||||
if c != '=' {
|
||||
z.raw.end--
|
||||
return
|
||||
|
||||
13
vendor/golang.org/x/net/http/httpguts/httplex.go
generated
vendored
13
vendor/golang.org/x/net/http/httpguts/httplex.go
generated
vendored
@@ -12,7 +12,7 @@ import (
|
||||
"golang.org/x/net/idna"
|
||||
)
|
||||
|
||||
var isTokenTable = [127]bool{
|
||||
var isTokenTable = [256]bool{
|
||||
'!': true,
|
||||
'#': true,
|
||||
'$': true,
|
||||
@@ -93,12 +93,7 @@ var isTokenTable = [127]bool{
|
||||
}
|
||||
|
||||
func IsTokenRune(r rune) bool {
|
||||
i := int(r)
|
||||
return i < len(isTokenTable) && isTokenTable[i]
|
||||
}
|
||||
|
||||
func isNotToken(r rune) bool {
|
||||
return !IsTokenRune(r)
|
||||
return r < utf8.RuneSelf && isTokenTable[byte(r)]
|
||||
}
|
||||
|
||||
// HeaderValuesContainsToken reports whether any string in values
|
||||
@@ -202,8 +197,8 @@ func ValidHeaderFieldName(v string) bool {
|
||||
if len(v) == 0 {
|
||||
return false
|
||||
}
|
||||
for _, r := range v {
|
||||
if !IsTokenRune(r) {
|
||||
for i := 0; i < len(v); i++ {
|
||||
if !isTokenTable[v[i]] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
51
vendor/golang.org/x/net/http2/Dockerfile
generated
vendored
51
vendor/golang.org/x/net/http2/Dockerfile
generated
vendored
@@ -1,51 +0,0 @@
|
||||
#
|
||||
# This Dockerfile builds a recent curl with HTTP/2 client support, using
|
||||
# a recent nghttp2 build.
|
||||
#
|
||||
# See the Makefile for how to tag it. If Docker and that image is found, the
|
||||
# Go tests use this curl binary for integration tests.
|
||||
#
|
||||
|
||||
FROM ubuntu:trusty
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get upgrade -y && \
|
||||
apt-get install -y git-core build-essential wget
|
||||
|
||||
RUN apt-get install -y --no-install-recommends \
|
||||
autotools-dev libtool pkg-config zlib1g-dev \
|
||||
libcunit1-dev libssl-dev libxml2-dev libevent-dev \
|
||||
automake autoconf
|
||||
|
||||
# The list of packages nghttp2 recommends for h2load:
|
||||
RUN apt-get install -y --no-install-recommends make binutils \
|
||||
autoconf automake autotools-dev \
|
||||
libtool pkg-config zlib1g-dev libcunit1-dev libssl-dev libxml2-dev \
|
||||
libev-dev libevent-dev libjansson-dev libjemalloc-dev \
|
||||
cython python3.4-dev python-setuptools
|
||||
|
||||
# Note: setting NGHTTP2_VER before the git clone, so an old git clone isn't cached:
|
||||
ENV NGHTTP2_VER 895da9a
|
||||
RUN cd /root && git clone https://github.com/tatsuhiro-t/nghttp2.git
|
||||
|
||||
WORKDIR /root/nghttp2
|
||||
RUN git reset --hard $NGHTTP2_VER
|
||||
RUN autoreconf -i
|
||||
RUN automake
|
||||
RUN autoconf
|
||||
RUN ./configure
|
||||
RUN make
|
||||
RUN make install
|
||||
|
||||
WORKDIR /root
|
||||
RUN wget https://curl.se/download/curl-7.45.0.tar.gz
|
||||
RUN tar -zxvf curl-7.45.0.tar.gz
|
||||
WORKDIR /root/curl-7.45.0
|
||||
RUN ./configure --with-ssl --with-nghttp2=/usr/local
|
||||
RUN make
|
||||
RUN make install
|
||||
RUN ldconfig
|
||||
|
||||
CMD ["-h"]
|
||||
ENTRYPOINT ["/usr/local/bin/curl"]
|
||||
|
||||
3
vendor/golang.org/x/net/http2/Makefile
generated
vendored
3
vendor/golang.org/x/net/http2/Makefile
generated
vendored
@@ -1,3 +0,0 @@
|
||||
curlimage:
|
||||
docker build -t gohttp2/curl .
|
||||
|
||||
59
vendor/golang.org/x/net/http2/databuffer.go
generated
vendored
59
vendor/golang.org/x/net/http2/databuffer.go
generated
vendored
@@ -20,41 +20,44 @@ import (
|
||||
// TODO: Benchmark to determine if the pools are necessary. The GC may have
|
||||
// improved enough that we can instead allocate chunks like this:
|
||||
// make([]byte, max(16<<10, expectedBytesRemaining))
|
||||
var (
|
||||
dataChunkSizeClasses = []int{
|
||||
1 << 10,
|
||||
2 << 10,
|
||||
4 << 10,
|
||||
8 << 10,
|
||||
16 << 10,
|
||||
}
|
||||
dataChunkPools = [...]sync.Pool{
|
||||
{New: func() interface{} { return make([]byte, 1<<10) }},
|
||||
{New: func() interface{} { return make([]byte, 2<<10) }},
|
||||
{New: func() interface{} { return make([]byte, 4<<10) }},
|
||||
{New: func() interface{} { return make([]byte, 8<<10) }},
|
||||
{New: func() interface{} { return make([]byte, 16<<10) }},
|
||||
}
|
||||
)
|
||||
var dataChunkPools = [...]sync.Pool{
|
||||
{New: func() interface{} { return new([1 << 10]byte) }},
|
||||
{New: func() interface{} { return new([2 << 10]byte) }},
|
||||
{New: func() interface{} { return new([4 << 10]byte) }},
|
||||
{New: func() interface{} { return new([8 << 10]byte) }},
|
||||
{New: func() interface{} { return new([16 << 10]byte) }},
|
||||
}
|
||||
|
||||
func getDataBufferChunk(size int64) []byte {
|
||||
i := 0
|
||||
for ; i < len(dataChunkSizeClasses)-1; i++ {
|
||||
if size <= int64(dataChunkSizeClasses[i]) {
|
||||
break
|
||||
}
|
||||
switch {
|
||||
case size <= 1<<10:
|
||||
return dataChunkPools[0].Get().(*[1 << 10]byte)[:]
|
||||
case size <= 2<<10:
|
||||
return dataChunkPools[1].Get().(*[2 << 10]byte)[:]
|
||||
case size <= 4<<10:
|
||||
return dataChunkPools[2].Get().(*[4 << 10]byte)[:]
|
||||
case size <= 8<<10:
|
||||
return dataChunkPools[3].Get().(*[8 << 10]byte)[:]
|
||||
default:
|
||||
return dataChunkPools[4].Get().(*[16 << 10]byte)[:]
|
||||
}
|
||||
return dataChunkPools[i].Get().([]byte)
|
||||
}
|
||||
|
||||
func putDataBufferChunk(p []byte) {
|
||||
for i, n := range dataChunkSizeClasses {
|
||||
if len(p) == n {
|
||||
dataChunkPools[i].Put(p)
|
||||
return
|
||||
}
|
||||
switch len(p) {
|
||||
case 1 << 10:
|
||||
dataChunkPools[0].Put((*[1 << 10]byte)(p))
|
||||
case 2 << 10:
|
||||
dataChunkPools[1].Put((*[2 << 10]byte)(p))
|
||||
case 4 << 10:
|
||||
dataChunkPools[2].Put((*[4 << 10]byte)(p))
|
||||
case 8 << 10:
|
||||
dataChunkPools[3].Put((*[8 << 10]byte)(p))
|
||||
case 16 << 10:
|
||||
dataChunkPools[4].Put((*[16 << 10]byte)(p))
|
||||
default:
|
||||
panic(fmt.Sprintf("unexpected buffer len=%v", len(p)))
|
||||
}
|
||||
panic(fmt.Sprintf("unexpected buffer len=%v", len(p)))
|
||||
}
|
||||
|
||||
// dataBuffer is an io.ReadWriter backed by a list of data chunks.
|
||||
|
||||
51
vendor/golang.org/x/net/http2/frame.go
generated
vendored
51
vendor/golang.org/x/net/http2/frame.go
generated
vendored
@@ -490,6 +490,9 @@ func terminalReadFrameError(err error) bool {
|
||||
// returned error is ErrFrameTooLarge. Other errors may be of type
|
||||
// ConnectionError, StreamError, or anything else from the underlying
|
||||
// reader.
|
||||
//
|
||||
// If ReadFrame returns an error and a non-nil Frame, the Frame's StreamID
|
||||
// indicates the stream responsible for the error.
|
||||
func (fr *Framer) ReadFrame() (Frame, error) {
|
||||
fr.errDetail = nil
|
||||
if fr.lastFrame != nil {
|
||||
@@ -1510,19 +1513,18 @@ func (mh *MetaHeadersFrame) checkPseudos() error {
|
||||
}
|
||||
|
||||
func (fr *Framer) maxHeaderStringLen() int {
|
||||
v := fr.maxHeaderListSize()
|
||||
if uint32(int(v)) == v {
|
||||
return int(v)
|
||||
v := int(fr.maxHeaderListSize())
|
||||
if v < 0 {
|
||||
// If maxHeaderListSize overflows an int, use no limit (0).
|
||||
return 0
|
||||
}
|
||||
// They had a crazy big number for MaxHeaderBytes anyway,
|
||||
// so give them unlimited header lengths:
|
||||
return 0
|
||||
return v
|
||||
}
|
||||
|
||||
// readMetaFrame returns 0 or more CONTINUATION frames from fr and
|
||||
// merge them into the provided hf and returns a MetaHeadersFrame
|
||||
// with the decoded hpack values.
|
||||
func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) {
|
||||
func (fr *Framer) readMetaFrame(hf *HeadersFrame) (Frame, error) {
|
||||
if fr.AllowIllegalReads {
|
||||
return nil, errors.New("illegal use of AllowIllegalReads with ReadMetaHeaders")
|
||||
}
|
||||
@@ -1565,6 +1567,7 @@ func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) {
|
||||
if size > remainSize {
|
||||
hdec.SetEmitEnabled(false)
|
||||
mh.Truncated = true
|
||||
remainSize = 0
|
||||
return
|
||||
}
|
||||
remainSize -= size
|
||||
@@ -1577,8 +1580,38 @@ func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) {
|
||||
var hc headersOrContinuation = hf
|
||||
for {
|
||||
frag := hc.HeaderBlockFragment()
|
||||
|
||||
// Avoid parsing large amounts of headers that we will then discard.
|
||||
// If the sender exceeds the max header list size by too much,
|
||||
// skip parsing the fragment and close the connection.
|
||||
//
|
||||
// "Too much" is either any CONTINUATION frame after we've already
|
||||
// exceeded the max header list size (in which case remainSize is 0),
|
||||
// or a frame whose encoded size is more than twice the remaining
|
||||
// header list bytes we're willing to accept.
|
||||
if int64(len(frag)) > int64(2*remainSize) {
|
||||
if VerboseLogs {
|
||||
log.Printf("http2: header list too large")
|
||||
}
|
||||
// It would be nice to send a RST_STREAM before sending the GOAWAY,
|
||||
// but the structure of the server's frame writer makes this difficult.
|
||||
return mh, ConnectionError(ErrCodeProtocol)
|
||||
}
|
||||
|
||||
// Also close the connection after any CONTINUATION frame following an
|
||||
// invalid header, since we stop tracking the size of the headers after
|
||||
// an invalid one.
|
||||
if invalid != nil {
|
||||
if VerboseLogs {
|
||||
log.Printf("http2: invalid header: %v", invalid)
|
||||
}
|
||||
// It would be nice to send a RST_STREAM before sending the GOAWAY,
|
||||
// but the structure of the server's frame writer makes this difficult.
|
||||
return mh, ConnectionError(ErrCodeProtocol)
|
||||
}
|
||||
|
||||
if _, err := hdec.Write(frag); err != nil {
|
||||
return nil, ConnectionError(ErrCodeCompression)
|
||||
return mh, ConnectionError(ErrCodeCompression)
|
||||
}
|
||||
|
||||
if hc.HeadersEnded() {
|
||||
@@ -1595,7 +1628,7 @@ func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) {
|
||||
mh.HeadersFrame.invalidate()
|
||||
|
||||
if err := hdec.Close(); err != nil {
|
||||
return nil, ConnectionError(ErrCodeCompression)
|
||||
return mh, ConnectionError(ErrCodeCompression)
|
||||
}
|
||||
if invalid != nil {
|
||||
fr.errDetail = invalid
|
||||
|
||||
30
vendor/golang.org/x/net/http2/go111.go
generated
vendored
30
vendor/golang.org/x/net/http2/go111.go
generated
vendored
@@ -1,30 +0,0 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build go1.11
|
||||
// +build go1.11
|
||||
|
||||
package http2
|
||||
|
||||
import (
|
||||
"net/http/httptrace"
|
||||
"net/textproto"
|
||||
)
|
||||
|
||||
func traceHasWroteHeaderField(trace *httptrace.ClientTrace) bool {
|
||||
return trace != nil && trace.WroteHeaderField != nil
|
||||
}
|
||||
|
||||
func traceWroteHeaderField(trace *httptrace.ClientTrace, k, v string) {
|
||||
if trace != nil && trace.WroteHeaderField != nil {
|
||||
trace.WroteHeaderField(k, []string{v})
|
||||
}
|
||||
}
|
||||
|
||||
func traceGot1xxResponseFunc(trace *httptrace.ClientTrace) func(int, textproto.MIMEHeader) error {
|
||||
if trace != nil {
|
||||
return trace.Got1xxResponse
|
||||
}
|
||||
return nil
|
||||
}
|
||||
27
vendor/golang.org/x/net/http2/go115.go
generated
vendored
27
vendor/golang.org/x/net/http2/go115.go
generated
vendored
@@ -1,27 +0,0 @@
|
||||
// Copyright 2021 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build go1.15
|
||||
// +build go1.15
|
||||
|
||||
package http2
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
)
|
||||
|
||||
// dialTLSWithContext uses tls.Dialer, added in Go 1.15, to open a TLS
|
||||
// connection.
|
||||
func (t *Transport) dialTLSWithContext(ctx context.Context, network, addr string, cfg *tls.Config) (*tls.Conn, error) {
|
||||
dialer := &tls.Dialer{
|
||||
Config: cfg,
|
||||
}
|
||||
cn, err := dialer.DialContext(ctx, network, addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tlsCn := cn.(*tls.Conn) // DialContext comment promises this will always succeed
|
||||
return tlsCn, nil
|
||||
}
|
||||
17
vendor/golang.org/x/net/http2/go118.go
generated
vendored
17
vendor/golang.org/x/net/http2/go118.go
generated
vendored
@@ -1,17 +0,0 @@
|
||||
// Copyright 2021 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build go1.18
|
||||
// +build go1.18
|
||||
|
||||
package http2
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"net"
|
||||
)
|
||||
|
||||
func tlsUnderlyingConn(tc *tls.Conn) net.Conn {
|
||||
return tc.NetConn()
|
||||
}
|
||||
21
vendor/golang.org/x/net/http2/not_go111.go
generated
vendored
21
vendor/golang.org/x/net/http2/not_go111.go
generated
vendored
@@ -1,21 +0,0 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !go1.11
|
||||
// +build !go1.11
|
||||
|
||||
package http2
|
||||
|
||||
import (
|
||||
"net/http/httptrace"
|
||||
"net/textproto"
|
||||
)
|
||||
|
||||
func traceHasWroteHeaderField(trace *httptrace.ClientTrace) bool { return false }
|
||||
|
||||
func traceWroteHeaderField(trace *httptrace.ClientTrace, k, v string) {}
|
||||
|
||||
func traceGot1xxResponseFunc(trace *httptrace.ClientTrace) func(int, textproto.MIMEHeader) error {
|
||||
return nil
|
||||
}
|
||||
31
vendor/golang.org/x/net/http2/not_go115.go
generated
vendored
31
vendor/golang.org/x/net/http2/not_go115.go
generated
vendored
@@ -1,31 +0,0 @@
|
||||
// Copyright 2021 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !go1.15
|
||||
// +build !go1.15
|
||||
|
||||
package http2
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
)
|
||||
|
||||
// dialTLSWithContext opens a TLS connection.
|
||||
func (t *Transport) dialTLSWithContext(ctx context.Context, network, addr string, cfg *tls.Config) (*tls.Conn, error) {
|
||||
cn, err := tls.Dial(network, addr, cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := cn.Handshake(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if cfg.InsecureSkipVerify {
|
||||
return cn, nil
|
||||
}
|
||||
if err := cn.VerifyHostname(cfg.ServerName); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cn, nil
|
||||
}
|
||||
17
vendor/golang.org/x/net/http2/not_go118.go
generated
vendored
17
vendor/golang.org/x/net/http2/not_go118.go
generated
vendored
@@ -1,17 +0,0 @@
|
||||
// Copyright 2021 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !go1.18
|
||||
// +build !go1.18
|
||||
|
||||
package http2
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"net"
|
||||
)
|
||||
|
||||
func tlsUnderlyingConn(tc *tls.Conn) net.Conn {
|
||||
return nil
|
||||
}
|
||||
11
vendor/golang.org/x/net/http2/pipe.go
generated
vendored
11
vendor/golang.org/x/net/http2/pipe.go
generated
vendored
@@ -77,7 +77,10 @@ func (p *pipe) Read(d []byte) (n int, err error) {
|
||||
}
|
||||
}
|
||||
|
||||
var errClosedPipeWrite = errors.New("write on closed buffer")
|
||||
var (
|
||||
errClosedPipeWrite = errors.New("write on closed buffer")
|
||||
errUninitializedPipeWrite = errors.New("write on uninitialized buffer")
|
||||
)
|
||||
|
||||
// Write copies bytes from p into the buffer and wakes a reader.
|
||||
// It is an error to write more data than the buffer can hold.
|
||||
@@ -91,6 +94,12 @@ func (p *pipe) Write(d []byte) (n int, err error) {
|
||||
if p.err != nil || p.breakErr != nil {
|
||||
return 0, errClosedPipeWrite
|
||||
}
|
||||
// pipe.setBuffer is never invoked, leaving the buffer uninitialized.
|
||||
// We shouldn't try to write to an uninitialized pipe,
|
||||
// but returning an error is better than panicking.
|
||||
if p.b == nil {
|
||||
return 0, errUninitializedPipeWrite
|
||||
}
|
||||
return p.b.Write(d)
|
||||
}
|
||||
|
||||
|
||||
134
vendor/golang.org/x/net/http2/server.go
generated
vendored
134
vendor/golang.org/x/net/http2/server.go
generated
vendored
@@ -124,6 +124,7 @@ type Server struct {
|
||||
// IdleTimeout specifies how long until idle clients should be
|
||||
// closed with a GOAWAY frame. PING frames are not considered
|
||||
// activity for the purposes of IdleTimeout.
|
||||
// If zero or negative, there is no timeout.
|
||||
IdleTimeout time.Duration
|
||||
|
||||
// MaxUploadBufferPerConnection is the size of the initial flow
|
||||
@@ -434,7 +435,7 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) {
|
||||
// passes the connection off to us with the deadline already set.
|
||||
// Write deadlines are set per stream in serverConn.newStream.
|
||||
// Disarm the net.Conn write deadline here.
|
||||
if sc.hs.WriteTimeout != 0 {
|
||||
if sc.hs.WriteTimeout > 0 {
|
||||
sc.conn.SetWriteDeadline(time.Time{})
|
||||
}
|
||||
|
||||
@@ -581,9 +582,11 @@ type serverConn struct {
|
||||
advMaxStreams uint32 // our SETTINGS_MAX_CONCURRENT_STREAMS advertised the client
|
||||
curClientStreams uint32 // number of open streams initiated by the client
|
||||
curPushedStreams uint32 // number of open streams initiated by server push
|
||||
curHandlers uint32 // number of running handler goroutines
|
||||
maxClientStreamID uint32 // max ever seen from client (odd), or 0 if there have been no client requests
|
||||
maxPushPromiseID uint32 // ID of the last push promise (even), or 0 if there have been no pushes
|
||||
streams map[uint32]*stream
|
||||
unstartedHandlers []unstartedHandler
|
||||
initialStreamSendWindowSize int32
|
||||
maxFrameSize int32
|
||||
peerMaxHeaderListSize uint32 // zero means unknown (default)
|
||||
@@ -729,11 +732,7 @@ func isClosedConnError(err error) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// TODO: remove this string search and be more like the Windows
|
||||
// case below. That might involve modifying the standard library
|
||||
// to return better error types.
|
||||
str := err.Error()
|
||||
if strings.Contains(str, "use of closed network connection") {
|
||||
if errors.Is(err, net.ErrClosed) {
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -922,7 +921,7 @@ func (sc *serverConn) serve() {
|
||||
sc.setConnState(http.StateActive)
|
||||
sc.setConnState(http.StateIdle)
|
||||
|
||||
if sc.srv.IdleTimeout != 0 {
|
||||
if sc.srv.IdleTimeout > 0 {
|
||||
sc.idleTimer = time.AfterFunc(sc.srv.IdleTimeout, sc.onIdleTimer)
|
||||
defer sc.idleTimer.Stop()
|
||||
}
|
||||
@@ -981,6 +980,8 @@ func (sc *serverConn) serve() {
|
||||
return
|
||||
case gracefulShutdownMsg:
|
||||
sc.startGracefulShutdownInternal()
|
||||
case handlerDoneMsg:
|
||||
sc.handlerDone()
|
||||
default:
|
||||
panic("unknown timer")
|
||||
}
|
||||
@@ -1012,14 +1013,6 @@ func (sc *serverConn) serve() {
|
||||
}
|
||||
}
|
||||
|
||||
func (sc *serverConn) awaitGracefulShutdown(sharedCh <-chan struct{}, privateCh chan struct{}) {
|
||||
select {
|
||||
case <-sc.doneServing:
|
||||
case <-sharedCh:
|
||||
close(privateCh)
|
||||
}
|
||||
}
|
||||
|
||||
type serverMessage int
|
||||
|
||||
// Message values sent to serveMsgCh.
|
||||
@@ -1028,6 +1021,7 @@ var (
|
||||
idleTimerMsg = new(serverMessage)
|
||||
shutdownTimerMsg = new(serverMessage)
|
||||
gracefulShutdownMsg = new(serverMessage)
|
||||
handlerDoneMsg = new(serverMessage)
|
||||
)
|
||||
|
||||
func (sc *serverConn) onSettingsTimer() { sc.sendServeMsg(settingsTimerMsg) }
|
||||
@@ -1484,6 +1478,11 @@ func (sc *serverConn) processFrameFromReader(res readFrameResult) bool {
|
||||
sc.goAway(ErrCodeFlowControl)
|
||||
return true
|
||||
case ConnectionError:
|
||||
if res.f != nil {
|
||||
if id := res.f.Header().StreamID; id > sc.maxClientStreamID {
|
||||
sc.maxClientStreamID = id
|
||||
}
|
||||
}
|
||||
sc.logf("http2: server connection error from %v: %v", sc.conn.RemoteAddr(), ev)
|
||||
sc.goAway(ErrCode(ev))
|
||||
return true // goAway will handle shutdown
|
||||
@@ -1640,7 +1639,7 @@ func (sc *serverConn) closeStream(st *stream, err error) {
|
||||
delete(sc.streams, st.id)
|
||||
if len(sc.streams) == 0 {
|
||||
sc.setConnState(http.StateIdle)
|
||||
if sc.srv.IdleTimeout != 0 {
|
||||
if sc.srv.IdleTimeout > 0 {
|
||||
sc.idleTimer.Reset(sc.srv.IdleTimeout)
|
||||
}
|
||||
if h1ServerKeepAlivesDisabled(sc.hs) {
|
||||
@@ -1900,9 +1899,11 @@ func (st *stream) copyTrailersToHandlerRequest() {
|
||||
// onReadTimeout is run on its own goroutine (from time.AfterFunc)
|
||||
// when the stream's ReadTimeout has fired.
|
||||
func (st *stream) onReadTimeout() {
|
||||
// Wrap the ErrDeadlineExceeded to avoid callers depending on us
|
||||
// returning the bare error.
|
||||
st.body.CloseWithError(fmt.Errorf("%w", os.ErrDeadlineExceeded))
|
||||
if st.body != nil {
|
||||
// Wrap the ErrDeadlineExceeded to avoid callers depending on us
|
||||
// returning the bare error.
|
||||
st.body.CloseWithError(fmt.Errorf("%w", os.ErrDeadlineExceeded))
|
||||
}
|
||||
}
|
||||
|
||||
// onWriteTimeout is run on its own goroutine (from time.AfterFunc)
|
||||
@@ -2018,15 +2019,12 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error {
|
||||
// similar to how the http1 server works. Here it's
|
||||
// technically more like the http1 Server's ReadHeaderTimeout
|
||||
// (in Go 1.8), though. That's a more sane option anyway.
|
||||
if sc.hs.ReadTimeout != 0 {
|
||||
if sc.hs.ReadTimeout > 0 {
|
||||
sc.conn.SetReadDeadline(time.Time{})
|
||||
if st.body != nil {
|
||||
st.readDeadline = time.AfterFunc(sc.hs.ReadTimeout, st.onReadTimeout)
|
||||
}
|
||||
st.readDeadline = time.AfterFunc(sc.hs.ReadTimeout, st.onReadTimeout)
|
||||
}
|
||||
|
||||
go sc.runHandler(rw, req, handler)
|
||||
return nil
|
||||
return sc.scheduleHandler(id, rw, req, handler)
|
||||
}
|
||||
|
||||
func (sc *serverConn) upgradeRequest(req *http.Request) {
|
||||
@@ -2042,10 +2040,14 @@ func (sc *serverConn) upgradeRequest(req *http.Request) {
|
||||
|
||||
// Disable any read deadline set by the net/http package
|
||||
// prior to the upgrade.
|
||||
if sc.hs.ReadTimeout != 0 {
|
||||
if sc.hs.ReadTimeout > 0 {
|
||||
sc.conn.SetReadDeadline(time.Time{})
|
||||
}
|
||||
|
||||
// This is the first request on the connection,
|
||||
// so start the handler directly rather than going
|
||||
// through scheduleHandler.
|
||||
sc.curHandlers++
|
||||
go sc.runHandler(rw, req, sc.handler.ServeHTTP)
|
||||
}
|
||||
|
||||
@@ -2116,7 +2118,7 @@ func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream
|
||||
st.flow.conn = &sc.flow // link to conn-level counter
|
||||
st.flow.add(sc.initialStreamSendWindowSize)
|
||||
st.inflow.init(sc.srv.initialStreamRecvWindowSize())
|
||||
if sc.hs.WriteTimeout != 0 {
|
||||
if sc.hs.WriteTimeout > 0 {
|
||||
st.writeDeadline = time.AfterFunc(sc.hs.WriteTimeout, st.onWriteTimeout)
|
||||
}
|
||||
|
||||
@@ -2286,8 +2288,62 @@ func (sc *serverConn) newResponseWriter(st *stream, req *http.Request) *response
|
||||
return &responseWriter{rws: rws}
|
||||
}
|
||||
|
||||
type unstartedHandler struct {
|
||||
streamID uint32
|
||||
rw *responseWriter
|
||||
req *http.Request
|
||||
handler func(http.ResponseWriter, *http.Request)
|
||||
}
|
||||
|
||||
// scheduleHandler starts a handler goroutine,
|
||||
// or schedules one to start as soon as an existing handler finishes.
|
||||
func (sc *serverConn) scheduleHandler(streamID uint32, rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) error {
|
||||
sc.serveG.check()
|
||||
maxHandlers := sc.advMaxStreams
|
||||
if sc.curHandlers < maxHandlers {
|
||||
sc.curHandlers++
|
||||
go sc.runHandler(rw, req, handler)
|
||||
return nil
|
||||
}
|
||||
if len(sc.unstartedHandlers) > int(4*sc.advMaxStreams) {
|
||||
return sc.countError("too_many_early_resets", ConnectionError(ErrCodeEnhanceYourCalm))
|
||||
}
|
||||
sc.unstartedHandlers = append(sc.unstartedHandlers, unstartedHandler{
|
||||
streamID: streamID,
|
||||
rw: rw,
|
||||
req: req,
|
||||
handler: handler,
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sc *serverConn) handlerDone() {
|
||||
sc.serveG.check()
|
||||
sc.curHandlers--
|
||||
i := 0
|
||||
maxHandlers := sc.advMaxStreams
|
||||
for ; i < len(sc.unstartedHandlers); i++ {
|
||||
u := sc.unstartedHandlers[i]
|
||||
if sc.streams[u.streamID] == nil {
|
||||
// This stream was reset before its goroutine had a chance to start.
|
||||
continue
|
||||
}
|
||||
if sc.curHandlers >= maxHandlers {
|
||||
break
|
||||
}
|
||||
sc.curHandlers++
|
||||
go sc.runHandler(u.rw, u.req, u.handler)
|
||||
sc.unstartedHandlers[i] = unstartedHandler{} // don't retain references
|
||||
}
|
||||
sc.unstartedHandlers = sc.unstartedHandlers[i:]
|
||||
if len(sc.unstartedHandlers) == 0 {
|
||||
sc.unstartedHandlers = nil
|
||||
}
|
||||
}
|
||||
|
||||
// Run on its own goroutine.
|
||||
func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) {
|
||||
defer sc.sendServeMsg(handlerDoneMsg)
|
||||
didPanic := true
|
||||
defer func() {
|
||||
rw.rws.stream.cancelCtx()
|
||||
@@ -2495,7 +2551,6 @@ type responseWriterState struct {
|
||||
wroteHeader bool // WriteHeader called (explicitly or implicitly). Not necessarily sent to user yet.
|
||||
sentHeader bool // have we sent the header frame?
|
||||
handlerDone bool // handler has finished
|
||||
dirty bool // a Write failed; don't reuse this responseWriterState
|
||||
|
||||
sentContentLen int64 // non-zero if handler set a Content-Length header
|
||||
wroteBytes int64
|
||||
@@ -2615,7 +2670,6 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) {
|
||||
date: date,
|
||||
})
|
||||
if err != nil {
|
||||
rws.dirty = true
|
||||
return 0, err
|
||||
}
|
||||
if endStream {
|
||||
@@ -2636,7 +2690,6 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) {
|
||||
if len(p) > 0 || endStream {
|
||||
// only send a 0 byte DATA frame if we're ending the stream.
|
||||
if err := rws.conn.writeDataFromHandler(rws.stream, p, endStream); err != nil {
|
||||
rws.dirty = true
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
@@ -2648,9 +2701,6 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) {
|
||||
trailers: rws.trailers,
|
||||
endStream: true,
|
||||
})
|
||||
if err != nil {
|
||||
rws.dirty = true
|
||||
}
|
||||
return len(p), err
|
||||
}
|
||||
return len(p), nil
|
||||
@@ -2866,14 +2916,12 @@ func (rws *responseWriterState) writeHeader(code int) {
|
||||
h.Del("Transfer-Encoding")
|
||||
}
|
||||
|
||||
if rws.conn.writeHeaders(rws.stream, &writeResHeaders{
|
||||
rws.conn.writeHeaders(rws.stream, &writeResHeaders{
|
||||
streamID: rws.stream.id,
|
||||
httpResCode: code,
|
||||
h: h,
|
||||
endStream: rws.handlerDone && !rws.hasTrailers(),
|
||||
}) != nil {
|
||||
rws.dirty = true
|
||||
}
|
||||
})
|
||||
|
||||
return
|
||||
}
|
||||
@@ -2938,19 +2986,10 @@ func (w *responseWriter) write(lenData int, dataB []byte, dataS string) (n int,
|
||||
|
||||
func (w *responseWriter) handlerDone() {
|
||||
rws := w.rws
|
||||
dirty := rws.dirty
|
||||
rws.handlerDone = true
|
||||
w.Flush()
|
||||
w.rws = nil
|
||||
if !dirty {
|
||||
// Only recycle the pool if all prior Write calls to
|
||||
// the serverConn goroutine completed successfully. If
|
||||
// they returned earlier due to resets from the peer
|
||||
// there might still be write goroutines outstanding
|
||||
// from the serverConn referencing the rws memory. See
|
||||
// issue 20704.
|
||||
responseWriterStatePool.Put(rws)
|
||||
}
|
||||
responseWriterStatePool.Put(rws)
|
||||
}
|
||||
|
||||
// Push errors.
|
||||
@@ -3133,6 +3172,7 @@ func (sc *serverConn) startPush(msg *startPushRequest) {
|
||||
panic(fmt.Sprintf("newWriterAndRequestNoBody(%+v): %v", msg.url, err))
|
||||
}
|
||||
|
||||
sc.curHandlers++
|
||||
go sc.runHandler(rw, req, sc.handler.ServeHTTP)
|
||||
return promisedID, nil
|
||||
}
|
||||
|
||||
331
vendor/golang.org/x/net/http2/testsync.go
generated
vendored
Normal file
331
vendor/golang.org/x/net/http2/testsync.go
generated
vendored
Normal file
@@ -0,0 +1,331 @@
|
||||
// Copyright 2024 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
package http2
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// testSyncHooks coordinates goroutines in tests.
|
||||
//
|
||||
// For example, a call to ClientConn.RoundTrip involves several goroutines, including:
|
||||
// - the goroutine running RoundTrip;
|
||||
// - the clientStream.doRequest goroutine, which writes the request; and
|
||||
// - the clientStream.readLoop goroutine, which reads the response.
|
||||
//
|
||||
// Using testSyncHooks, a test can start a RoundTrip and identify when all these goroutines
|
||||
// are blocked waiting for some condition such as reading the Request.Body or waiting for
|
||||
// flow control to become available.
|
||||
//
|
||||
// The testSyncHooks also manage timers and synthetic time in tests.
|
||||
// This permits us to, for example, start a request and cause it to time out waiting for
|
||||
// response headers without resorting to time.Sleep calls.
|
||||
type testSyncHooks struct {
|
||||
// active/inactive act as a mutex and condition variable.
|
||||
//
|
||||
// - neither chan contains a value: testSyncHooks is locked.
|
||||
// - active contains a value: unlocked, and at least one goroutine is not blocked
|
||||
// - inactive contains a value: unlocked, and all goroutines are blocked
|
||||
active chan struct{}
|
||||
inactive chan struct{}
|
||||
|
||||
// goroutine counts
|
||||
total int // total goroutines
|
||||
condwait map[*sync.Cond]int // blocked in sync.Cond.Wait
|
||||
blocked []*testBlockedGoroutine // otherwise blocked
|
||||
|
||||
// fake time
|
||||
now time.Time
|
||||
timers []*fakeTimer
|
||||
|
||||
// Transport testing: Report various events.
|
||||
newclientconn func(*ClientConn)
|
||||
newstream func(*clientStream)
|
||||
}
|
||||
|
||||
// testBlockedGoroutine is a blocked goroutine.
|
||||
type testBlockedGoroutine struct {
|
||||
f func() bool // blocked until f returns true
|
||||
ch chan struct{} // closed when unblocked
|
||||
}
|
||||
|
||||
func newTestSyncHooks() *testSyncHooks {
|
||||
h := &testSyncHooks{
|
||||
active: make(chan struct{}, 1),
|
||||
inactive: make(chan struct{}, 1),
|
||||
condwait: map[*sync.Cond]int{},
|
||||
}
|
||||
h.inactive <- struct{}{}
|
||||
h.now = time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC)
|
||||
return h
|
||||
}
|
||||
|
||||
// lock acquires the testSyncHooks mutex.
|
||||
func (h *testSyncHooks) lock() {
|
||||
select {
|
||||
case <-h.active:
|
||||
case <-h.inactive:
|
||||
}
|
||||
}
|
||||
|
||||
// waitInactive waits for all goroutines to become inactive.
|
||||
func (h *testSyncHooks) waitInactive() {
|
||||
for {
|
||||
<-h.inactive
|
||||
if !h.unlock() {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// unlock releases the testSyncHooks mutex.
|
||||
// It reports whether any goroutines are active.
|
||||
func (h *testSyncHooks) unlock() (active bool) {
|
||||
// Look for a blocked goroutine which can be unblocked.
|
||||
blocked := h.blocked[:0]
|
||||
unblocked := false
|
||||
for _, b := range h.blocked {
|
||||
if !unblocked && b.f() {
|
||||
unblocked = true
|
||||
close(b.ch)
|
||||
} else {
|
||||
blocked = append(blocked, b)
|
||||
}
|
||||
}
|
||||
h.blocked = blocked
|
||||
|
||||
// Count goroutines blocked on condition variables.
|
||||
condwait := 0
|
||||
for _, count := range h.condwait {
|
||||
condwait += count
|
||||
}
|
||||
|
||||
if h.total > condwait+len(blocked) {
|
||||
h.active <- struct{}{}
|
||||
return true
|
||||
} else {
|
||||
h.inactive <- struct{}{}
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// goRun starts a new goroutine.
|
||||
func (h *testSyncHooks) goRun(f func()) {
|
||||
h.lock()
|
||||
h.total++
|
||||
h.unlock()
|
||||
go func() {
|
||||
defer func() {
|
||||
h.lock()
|
||||
h.total--
|
||||
h.unlock()
|
||||
}()
|
||||
f()
|
||||
}()
|
||||
}
|
||||
|
||||
// blockUntil indicates that a goroutine is blocked waiting for some condition to become true.
|
||||
// It waits until f returns true before proceeding.
|
||||
//
|
||||
// Example usage:
|
||||
//
|
||||
// h.blockUntil(func() bool {
|
||||
// // Is the context done yet?
|
||||
// select {
|
||||
// case <-ctx.Done():
|
||||
// default:
|
||||
// return false
|
||||
// }
|
||||
// return true
|
||||
// })
|
||||
// // Wait for the context to become done.
|
||||
// <-ctx.Done()
|
||||
//
|
||||
// The function f passed to blockUntil must be non-blocking and idempotent.
|
||||
func (h *testSyncHooks) blockUntil(f func() bool) {
|
||||
if f() {
|
||||
return
|
||||
}
|
||||
ch := make(chan struct{})
|
||||
h.lock()
|
||||
h.blocked = append(h.blocked, &testBlockedGoroutine{
|
||||
f: f,
|
||||
ch: ch,
|
||||
})
|
||||
h.unlock()
|
||||
<-ch
|
||||
}
|
||||
|
||||
// broadcast is sync.Cond.Broadcast.
|
||||
func (h *testSyncHooks) condBroadcast(cond *sync.Cond) {
|
||||
h.lock()
|
||||
delete(h.condwait, cond)
|
||||
h.unlock()
|
||||
cond.Broadcast()
|
||||
}
|
||||
|
||||
// broadcast is sync.Cond.Wait.
|
||||
func (h *testSyncHooks) condWait(cond *sync.Cond) {
|
||||
h.lock()
|
||||
h.condwait[cond]++
|
||||
h.unlock()
|
||||
}
|
||||
|
||||
// newTimer creates a new fake timer.
|
||||
func (h *testSyncHooks) newTimer(d time.Duration) timer {
|
||||
h.lock()
|
||||
defer h.unlock()
|
||||
t := &fakeTimer{
|
||||
hooks: h,
|
||||
when: h.now.Add(d),
|
||||
c: make(chan time.Time),
|
||||
}
|
||||
h.timers = append(h.timers, t)
|
||||
return t
|
||||
}
|
||||
|
||||
// afterFunc creates a new fake AfterFunc timer.
|
||||
func (h *testSyncHooks) afterFunc(d time.Duration, f func()) timer {
|
||||
h.lock()
|
||||
defer h.unlock()
|
||||
t := &fakeTimer{
|
||||
hooks: h,
|
||||
when: h.now.Add(d),
|
||||
f: f,
|
||||
}
|
||||
h.timers = append(h.timers, t)
|
||||
return t
|
||||
}
|
||||
|
||||
func (h *testSyncHooks) contextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc) {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
t := h.afterFunc(d, cancel)
|
||||
return ctx, func() {
|
||||
t.Stop()
|
||||
cancel()
|
||||
}
|
||||
}
|
||||
|
||||
func (h *testSyncHooks) timeUntilEvent() time.Duration {
|
||||
h.lock()
|
||||
defer h.unlock()
|
||||
var next time.Time
|
||||
for _, t := range h.timers {
|
||||
if next.IsZero() || t.when.Before(next) {
|
||||
next = t.when
|
||||
}
|
||||
}
|
||||
if d := next.Sub(h.now); d > 0 {
|
||||
return d
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// advance advances time and causes synthetic timers to fire.
|
||||
func (h *testSyncHooks) advance(d time.Duration) {
|
||||
h.lock()
|
||||
defer h.unlock()
|
||||
h.now = h.now.Add(d)
|
||||
timers := h.timers[:0]
|
||||
for _, t := range h.timers {
|
||||
t := t // remove after go.mod depends on go1.22
|
||||
t.mu.Lock()
|
||||
switch {
|
||||
case t.when.After(h.now):
|
||||
timers = append(timers, t)
|
||||
case t.when.IsZero():
|
||||
// stopped timer
|
||||
default:
|
||||
t.when = time.Time{}
|
||||
if t.c != nil {
|
||||
close(t.c)
|
||||
}
|
||||
if t.f != nil {
|
||||
h.total++
|
||||
go func() {
|
||||
defer func() {
|
||||
h.lock()
|
||||
h.total--
|
||||
h.unlock()
|
||||
}()
|
||||
t.f()
|
||||
}()
|
||||
}
|
||||
}
|
||||
t.mu.Unlock()
|
||||
}
|
||||
h.timers = timers
|
||||
}
|
||||
|
||||
// A timer wraps a time.Timer, or a synthetic equivalent in tests.
|
||||
// Unlike time.Timer, timer is single-use: The timer channel is closed when the timer expires.
|
||||
type timer interface {
|
||||
C() <-chan time.Time
|
||||
Stop() bool
|
||||
Reset(d time.Duration) bool
|
||||
}
|
||||
|
||||
// timeTimer implements timer using real time.
|
||||
type timeTimer struct {
|
||||
t *time.Timer
|
||||
c chan time.Time
|
||||
}
|
||||
|
||||
// newTimeTimer creates a new timer using real time.
|
||||
func newTimeTimer(d time.Duration) timer {
|
||||
ch := make(chan time.Time)
|
||||
t := time.AfterFunc(d, func() {
|
||||
close(ch)
|
||||
})
|
||||
return &timeTimer{t, ch}
|
||||
}
|
||||
|
||||
// newTimeAfterFunc creates an AfterFunc timer using real time.
|
||||
func newTimeAfterFunc(d time.Duration, f func()) timer {
|
||||
return &timeTimer{
|
||||
t: time.AfterFunc(d, f),
|
||||
}
|
||||
}
|
||||
|
||||
func (t timeTimer) C() <-chan time.Time { return t.c }
|
||||
func (t timeTimer) Stop() bool { return t.t.Stop() }
|
||||
func (t timeTimer) Reset(d time.Duration) bool { return t.t.Reset(d) }
|
||||
|
||||
// fakeTimer implements timer using fake time.
|
||||
type fakeTimer struct {
|
||||
hooks *testSyncHooks
|
||||
|
||||
mu sync.Mutex
|
||||
when time.Time // when the timer will fire
|
||||
c chan time.Time // closed when the timer fires; mutually exclusive with f
|
||||
f func() // called when the timer fires; mutually exclusive with c
|
||||
}
|
||||
|
||||
func (t *fakeTimer) C() <-chan time.Time { return t.c }
|
||||
|
||||
func (t *fakeTimer) Stop() bool {
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
stopped := t.when.IsZero()
|
||||
t.when = time.Time{}
|
||||
return stopped
|
||||
}
|
||||
|
||||
func (t *fakeTimer) Reset(d time.Duration) bool {
|
||||
if t.c != nil || t.f == nil {
|
||||
panic("fakeTimer only supports Reset on AfterFunc timers")
|
||||
}
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
t.hooks.lock()
|
||||
defer t.hooks.unlock()
|
||||
active := !t.when.IsZero()
|
||||
t.when = t.hooks.now.Add(d)
|
||||
if !active {
|
||||
t.hooks.timers = append(t.hooks.timers, t)
|
||||
}
|
||||
return active
|
||||
}
|
||||
358
vendor/golang.org/x/net/http2/transport.go
generated
vendored
358
vendor/golang.org/x/net/http2/transport.go
generated
vendored
@@ -147,6 +147,12 @@ type Transport struct {
|
||||
// waiting for their turn.
|
||||
StrictMaxConcurrentStreams bool
|
||||
|
||||
// IdleConnTimeout is the maximum amount of time an idle
|
||||
// (keep-alive) connection will remain idle before closing
|
||||
// itself.
|
||||
// Zero means no limit.
|
||||
IdleConnTimeout time.Duration
|
||||
|
||||
// ReadIdleTimeout is the timeout after which a health check using ping
|
||||
// frame will be carried out if no frame is received on the connection.
|
||||
// Note that a ping response will is considered a received frame, so if
|
||||
@@ -178,6 +184,8 @@ type Transport struct {
|
||||
|
||||
connPoolOnce sync.Once
|
||||
connPoolOrDef ClientConnPool // non-nil version of ConnPool
|
||||
|
||||
syncHooks *testSyncHooks
|
||||
}
|
||||
|
||||
func (t *Transport) maxHeaderListSize() uint32 {
|
||||
@@ -291,8 +299,7 @@ func (t *Transport) initConnPool() {
|
||||
// HTTP/2 server.
|
||||
type ClientConn struct {
|
||||
t *Transport
|
||||
tconn net.Conn // usually *tls.Conn, except specialized impls
|
||||
tconnClosed bool
|
||||
tconn net.Conn // usually *tls.Conn, except specialized impls
|
||||
tlsState *tls.ConnectionState // nil only for specialized impls
|
||||
reused uint32 // whether conn is being reused; atomic
|
||||
singleUse bool // whether being used for a single http.Request
|
||||
@@ -303,7 +310,7 @@ type ClientConn struct {
|
||||
readerErr error // set before readerDone is closed
|
||||
|
||||
idleTimeout time.Duration // or 0 for never
|
||||
idleTimer *time.Timer
|
||||
idleTimer timer
|
||||
|
||||
mu sync.Mutex // guards following
|
||||
cond *sync.Cond // hold mu; broadcast on flow/closed changes
|
||||
@@ -345,6 +352,60 @@ type ClientConn struct {
|
||||
werr error // first write error that has occurred
|
||||
hbuf bytes.Buffer // HPACK encoder writes into this
|
||||
henc *hpack.Encoder
|
||||
|
||||
syncHooks *testSyncHooks // can be nil
|
||||
}
|
||||
|
||||
// Hook points used for testing.
|
||||
// Outside of tests, cc.syncHooks is nil and these all have minimal implementations.
|
||||
// Inside tests, see the testSyncHooks function docs.
|
||||
|
||||
// goRun starts a new goroutine.
|
||||
func (cc *ClientConn) goRun(f func()) {
|
||||
if cc.syncHooks != nil {
|
||||
cc.syncHooks.goRun(f)
|
||||
return
|
||||
}
|
||||
go f()
|
||||
}
|
||||
|
||||
// condBroadcast is cc.cond.Broadcast.
|
||||
func (cc *ClientConn) condBroadcast() {
|
||||
if cc.syncHooks != nil {
|
||||
cc.syncHooks.condBroadcast(cc.cond)
|
||||
}
|
||||
cc.cond.Broadcast()
|
||||
}
|
||||
|
||||
// condWait is cc.cond.Wait.
|
||||
func (cc *ClientConn) condWait() {
|
||||
if cc.syncHooks != nil {
|
||||
cc.syncHooks.condWait(cc.cond)
|
||||
}
|
||||
cc.cond.Wait()
|
||||
}
|
||||
|
||||
// newTimer creates a new time.Timer, or a synthetic timer in tests.
|
||||
func (cc *ClientConn) newTimer(d time.Duration) timer {
|
||||
if cc.syncHooks != nil {
|
||||
return cc.syncHooks.newTimer(d)
|
||||
}
|
||||
return newTimeTimer(d)
|
||||
}
|
||||
|
||||
// afterFunc creates a new time.AfterFunc timer, or a synthetic timer in tests.
|
||||
func (cc *ClientConn) afterFunc(d time.Duration, f func()) timer {
|
||||
if cc.syncHooks != nil {
|
||||
return cc.syncHooks.afterFunc(d, f)
|
||||
}
|
||||
return newTimeAfterFunc(d, f)
|
||||
}
|
||||
|
||||
func (cc *ClientConn) contextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc) {
|
||||
if cc.syncHooks != nil {
|
||||
return cc.syncHooks.contextWithTimeout(ctx, d)
|
||||
}
|
||||
return context.WithTimeout(ctx, d)
|
||||
}
|
||||
|
||||
// clientStream is the state for a single HTTP/2 stream. One of these
|
||||
@@ -426,7 +487,7 @@ func (cs *clientStream) abortStreamLocked(err error) {
|
||||
// TODO(dneil): Clean up tests where cs.cc.cond is nil.
|
||||
if cs.cc.cond != nil {
|
||||
// Wake up writeRequestBody if it is waiting on flow control.
|
||||
cs.cc.cond.Broadcast()
|
||||
cs.cc.condBroadcast()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -436,7 +497,7 @@ func (cs *clientStream) abortRequestBodyWrite() {
|
||||
defer cc.mu.Unlock()
|
||||
if cs.reqBody != nil && cs.reqBodyClosed == nil {
|
||||
cs.closeReqBodyLocked()
|
||||
cc.cond.Broadcast()
|
||||
cc.condBroadcast()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -446,10 +507,10 @@ func (cs *clientStream) closeReqBodyLocked() {
|
||||
}
|
||||
cs.reqBodyClosed = make(chan struct{})
|
||||
reqBodyClosed := cs.reqBodyClosed
|
||||
go func() {
|
||||
cs.cc.goRun(func() {
|
||||
cs.reqBody.Close()
|
||||
close(reqBodyClosed)
|
||||
}()
|
||||
})
|
||||
}
|
||||
|
||||
type stickyErrWriter struct {
|
||||
@@ -538,15 +599,6 @@ func authorityAddr(scheme string, authority string) (addr string) {
|
||||
return net.JoinHostPort(host, port)
|
||||
}
|
||||
|
||||
var retryBackoffHook func(time.Duration) *time.Timer
|
||||
|
||||
func backoffNewTimer(d time.Duration) *time.Timer {
|
||||
if retryBackoffHook != nil {
|
||||
return retryBackoffHook(d)
|
||||
}
|
||||
return time.NewTimer(d)
|
||||
}
|
||||
|
||||
// RoundTripOpt is like RoundTrip, but takes options.
|
||||
func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Response, error) {
|
||||
if !(req.URL.Scheme == "https" || (req.URL.Scheme == "http" && t.AllowHTTP)) {
|
||||
@@ -574,13 +626,27 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res
|
||||
backoff := float64(uint(1) << (uint(retry) - 1))
|
||||
backoff += backoff * (0.1 * mathrand.Float64())
|
||||
d := time.Second * time.Duration(backoff)
|
||||
timer := backoffNewTimer(d)
|
||||
var tm timer
|
||||
if t.syncHooks != nil {
|
||||
tm = t.syncHooks.newTimer(d)
|
||||
t.syncHooks.blockUntil(func() bool {
|
||||
select {
|
||||
case <-tm.C():
|
||||
case <-req.Context().Done():
|
||||
default:
|
||||
return false
|
||||
}
|
||||
return true
|
||||
})
|
||||
} else {
|
||||
tm = newTimeTimer(d)
|
||||
}
|
||||
select {
|
||||
case <-timer.C:
|
||||
case <-tm.C():
|
||||
t.vlogf("RoundTrip retrying after failure: %v", roundTripErr)
|
||||
continue
|
||||
case <-req.Context().Done():
|
||||
timer.Stop()
|
||||
tm.Stop()
|
||||
err = req.Context().Err()
|
||||
}
|
||||
}
|
||||
@@ -659,6 +725,9 @@ func canRetryError(err error) bool {
|
||||
}
|
||||
|
||||
func (t *Transport) dialClientConn(ctx context.Context, addr string, singleUse bool) (*ClientConn, error) {
|
||||
if t.syncHooks != nil {
|
||||
return t.newClientConn(nil, singleUse, t.syncHooks)
|
||||
}
|
||||
host, _, err := net.SplitHostPort(addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -667,7 +736,7 @@ func (t *Transport) dialClientConn(ctx context.Context, addr string, singleUse b
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return t.newClientConn(tconn, singleUse)
|
||||
return t.newClientConn(tconn, singleUse, nil)
|
||||
}
|
||||
|
||||
func (t *Transport) newTLSConfig(host string) *tls.Config {
|
||||
@@ -733,10 +802,10 @@ func (t *Transport) maxEncoderHeaderTableSize() uint32 {
|
||||
}
|
||||
|
||||
func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) {
|
||||
return t.newClientConn(c, t.disableKeepAlives())
|
||||
return t.newClientConn(c, t.disableKeepAlives(), nil)
|
||||
}
|
||||
|
||||
func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, error) {
|
||||
func (t *Transport) newClientConn(c net.Conn, singleUse bool, hooks *testSyncHooks) (*ClientConn, error) {
|
||||
cc := &ClientConn{
|
||||
t: t,
|
||||
tconn: c,
|
||||
@@ -751,10 +820,15 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro
|
||||
wantSettingsAck: true,
|
||||
pings: make(map[[8]byte]chan struct{}),
|
||||
reqHeaderMu: make(chan struct{}, 1),
|
||||
syncHooks: hooks,
|
||||
}
|
||||
if hooks != nil {
|
||||
hooks.newclientconn(cc)
|
||||
c = cc.tconn
|
||||
}
|
||||
if d := t.idleConnTimeout(); d != 0 {
|
||||
cc.idleTimeout = d
|
||||
cc.idleTimer = time.AfterFunc(d, cc.onIdleTimeout)
|
||||
cc.idleTimer = cc.afterFunc(d, cc.onIdleTimeout)
|
||||
}
|
||||
if VerboseLogs {
|
||||
t.vlogf("http2: Transport creating client conn %p to %v", cc, c.RemoteAddr())
|
||||
@@ -819,7 +893,7 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro
|
||||
return nil, cc.werr
|
||||
}
|
||||
|
||||
go cc.readLoop()
|
||||
cc.goRun(cc.readLoop)
|
||||
return cc, nil
|
||||
}
|
||||
|
||||
@@ -827,7 +901,7 @@ func (cc *ClientConn) healthCheck() {
|
||||
pingTimeout := cc.t.pingTimeout()
|
||||
// We don't need to periodically ping in the health check, because the readLoop of ClientConn will
|
||||
// trigger the healthCheck again if there is no frame received.
|
||||
ctx, cancel := context.WithTimeout(context.Background(), pingTimeout)
|
||||
ctx, cancel := cc.contextWithTimeout(context.Background(), pingTimeout)
|
||||
defer cancel()
|
||||
cc.vlogf("http2: Transport sending health check")
|
||||
err := cc.Ping(ctx)
|
||||
@@ -862,7 +936,20 @@ func (cc *ClientConn) setGoAway(f *GoAwayFrame) {
|
||||
}
|
||||
last := f.LastStreamID
|
||||
for streamID, cs := range cc.streams {
|
||||
if streamID > last {
|
||||
if streamID <= last {
|
||||
// The server's GOAWAY indicates that it received this stream.
|
||||
// It will either finish processing it, or close the connection
|
||||
// without doing so. Either way, leave the stream alone for now.
|
||||
continue
|
||||
}
|
||||
if streamID == 1 && cc.goAway.ErrCode != ErrCodeNo {
|
||||
// Don't retry the first stream on a connection if we get a non-NO error.
|
||||
// If the server is sending an error on a new connection,
|
||||
// retrying the request on a new one probably isn't going to work.
|
||||
cs.abortStreamLocked(fmt.Errorf("http2: Transport received GOAWAY from server ErrCode:%v", cc.goAway.ErrCode))
|
||||
} else {
|
||||
// Aborting the stream with errClentConnGotGoAway indicates that
|
||||
// the request should be retried on a new connection.
|
||||
cs.abortStreamLocked(errClientConnGotGoAway)
|
||||
}
|
||||
}
|
||||
@@ -1019,7 +1106,7 @@ func (cc *ClientConn) forceCloseConn() {
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
if nc := tlsUnderlyingConn(tc); nc != nil {
|
||||
if nc := tc.NetConn(); nc != nil {
|
||||
nc.Close()
|
||||
}
|
||||
}
|
||||
@@ -1057,7 +1144,7 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error {
|
||||
// Wait for all in-flight streams to complete or connection to close
|
||||
done := make(chan struct{})
|
||||
cancelled := false // guarded by cc.mu
|
||||
go func() {
|
||||
cc.goRun(func() {
|
||||
cc.mu.Lock()
|
||||
defer cc.mu.Unlock()
|
||||
for {
|
||||
@@ -1069,9 +1156,9 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error {
|
||||
if cancelled {
|
||||
break
|
||||
}
|
||||
cc.cond.Wait()
|
||||
cc.condWait()
|
||||
}
|
||||
}()
|
||||
})
|
||||
shutdownEnterWaitStateHook()
|
||||
select {
|
||||
case <-done:
|
||||
@@ -1081,7 +1168,7 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error {
|
||||
cc.mu.Lock()
|
||||
// Free the goroutine above
|
||||
cancelled = true
|
||||
cc.cond.Broadcast()
|
||||
cc.condBroadcast()
|
||||
cc.mu.Unlock()
|
||||
return ctx.Err()
|
||||
}
|
||||
@@ -1119,7 +1206,7 @@ func (cc *ClientConn) closeForError(err error) {
|
||||
for _, cs := range cc.streams {
|
||||
cs.abortStreamLocked(err)
|
||||
}
|
||||
cc.cond.Broadcast()
|
||||
cc.condBroadcast()
|
||||
cc.mu.Unlock()
|
||||
cc.closeConn()
|
||||
}
|
||||
@@ -1216,6 +1303,10 @@ func (cc *ClientConn) decrStreamReservationsLocked() {
|
||||
}
|
||||
|
||||
func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||
return cc.roundTrip(req, nil)
|
||||
}
|
||||
|
||||
func (cc *ClientConn) roundTrip(req *http.Request, streamf func(*clientStream)) (*http.Response, error) {
|
||||
ctx := req.Context()
|
||||
cs := &clientStream{
|
||||
cc: cc,
|
||||
@@ -1230,9 +1321,23 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||
respHeaderRecv: make(chan struct{}),
|
||||
donec: make(chan struct{}),
|
||||
}
|
||||
go cs.doRequest(req)
|
||||
cc.goRun(func() {
|
||||
cs.doRequest(req)
|
||||
})
|
||||
|
||||
waitDone := func() error {
|
||||
if cc.syncHooks != nil {
|
||||
cc.syncHooks.blockUntil(func() bool {
|
||||
select {
|
||||
case <-cs.donec:
|
||||
case <-ctx.Done():
|
||||
case <-cs.reqCancel:
|
||||
default:
|
||||
return false
|
||||
}
|
||||
return true
|
||||
})
|
||||
}
|
||||
select {
|
||||
case <-cs.donec:
|
||||
return nil
|
||||
@@ -1293,7 +1398,24 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||
return err
|
||||
}
|
||||
|
||||
if streamf != nil {
|
||||
streamf(cs)
|
||||
}
|
||||
|
||||
for {
|
||||
if cc.syncHooks != nil {
|
||||
cc.syncHooks.blockUntil(func() bool {
|
||||
select {
|
||||
case <-cs.respHeaderRecv:
|
||||
case <-cs.abort:
|
||||
case <-ctx.Done():
|
||||
case <-cs.reqCancel:
|
||||
default:
|
||||
return false
|
||||
}
|
||||
return true
|
||||
})
|
||||
}
|
||||
select {
|
||||
case <-cs.respHeaderRecv:
|
||||
return handleResponseHeaders()
|
||||
@@ -1349,6 +1471,21 @@ func (cs *clientStream) writeRequest(req *http.Request) (err error) {
|
||||
if cc.reqHeaderMu == nil {
|
||||
panic("RoundTrip on uninitialized ClientConn") // for tests
|
||||
}
|
||||
var newStreamHook func(*clientStream)
|
||||
if cc.syncHooks != nil {
|
||||
newStreamHook = cc.syncHooks.newstream
|
||||
cc.syncHooks.blockUntil(func() bool {
|
||||
select {
|
||||
case cc.reqHeaderMu <- struct{}{}:
|
||||
<-cc.reqHeaderMu
|
||||
case <-cs.reqCancel:
|
||||
case <-ctx.Done():
|
||||
default:
|
||||
return false
|
||||
}
|
||||
return true
|
||||
})
|
||||
}
|
||||
select {
|
||||
case cc.reqHeaderMu <- struct{}{}:
|
||||
case <-cs.reqCancel:
|
||||
@@ -1373,6 +1510,10 @@ func (cs *clientStream) writeRequest(req *http.Request) (err error) {
|
||||
}
|
||||
cc.mu.Unlock()
|
||||
|
||||
if newStreamHook != nil {
|
||||
newStreamHook(cs)
|
||||
}
|
||||
|
||||
// TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere?
|
||||
if !cc.t.disableCompression() &&
|
||||
req.Header.Get("Accept-Encoding") == "" &&
|
||||
@@ -1453,15 +1594,30 @@ func (cs *clientStream) writeRequest(req *http.Request) (err error) {
|
||||
var respHeaderTimer <-chan time.Time
|
||||
var respHeaderRecv chan struct{}
|
||||
if d := cc.responseHeaderTimeout(); d != 0 {
|
||||
timer := time.NewTimer(d)
|
||||
timer := cc.newTimer(d)
|
||||
defer timer.Stop()
|
||||
respHeaderTimer = timer.C
|
||||
respHeaderTimer = timer.C()
|
||||
respHeaderRecv = cs.respHeaderRecv
|
||||
}
|
||||
// Wait until the peer half-closes its end of the stream,
|
||||
// or until the request is aborted (via context, error, or otherwise),
|
||||
// whichever comes first.
|
||||
for {
|
||||
if cc.syncHooks != nil {
|
||||
cc.syncHooks.blockUntil(func() bool {
|
||||
select {
|
||||
case <-cs.peerClosed:
|
||||
case <-respHeaderTimer:
|
||||
case <-respHeaderRecv:
|
||||
case <-cs.abort:
|
||||
case <-ctx.Done():
|
||||
case <-cs.reqCancel:
|
||||
default:
|
||||
return false
|
||||
}
|
||||
return true
|
||||
})
|
||||
}
|
||||
select {
|
||||
case <-cs.peerClosed:
|
||||
return nil
|
||||
@@ -1610,7 +1766,7 @@ func (cc *ClientConn) awaitOpenSlotForStreamLocked(cs *clientStream) error {
|
||||
return nil
|
||||
}
|
||||
cc.pendingRequests++
|
||||
cc.cond.Wait()
|
||||
cc.condWait()
|
||||
cc.pendingRequests--
|
||||
select {
|
||||
case <-cs.abort:
|
||||
@@ -1872,10 +2028,26 @@ func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error)
|
||||
cs.flow.take(take)
|
||||
return take, nil
|
||||
}
|
||||
cc.cond.Wait()
|
||||
cc.condWait()
|
||||
}
|
||||
}
|
||||
|
||||
func validateHeaders(hdrs http.Header) string {
|
||||
for k, vv := range hdrs {
|
||||
if !httpguts.ValidHeaderFieldName(k) {
|
||||
return fmt.Sprintf("name %q", k)
|
||||
}
|
||||
for _, v := range vv {
|
||||
if !httpguts.ValidHeaderFieldValue(v) {
|
||||
// Don't include the value in the error,
|
||||
// because it may be sensitive.
|
||||
return fmt.Sprintf("value for header %q", k)
|
||||
}
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
var errNilRequestURL = errors.New("http2: Request.URI is nil")
|
||||
|
||||
// requires cc.wmu be held.
|
||||
@@ -1913,19 +2085,14 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail
|
||||
}
|
||||
}
|
||||
|
||||
// Check for any invalid headers and return an error before we
|
||||
// Check for any invalid headers+trailers and return an error before we
|
||||
// potentially pollute our hpack state. (We want to be able to
|
||||
// continue to reuse the hpack encoder for future requests)
|
||||
for k, vv := range req.Header {
|
||||
if !httpguts.ValidHeaderFieldName(k) {
|
||||
return nil, fmt.Errorf("invalid HTTP header name %q", k)
|
||||
}
|
||||
for _, v := range vv {
|
||||
if !httpguts.ValidHeaderFieldValue(v) {
|
||||
// Don't include the value in the error, because it may be sensitive.
|
||||
return nil, fmt.Errorf("invalid HTTP header value for header %q", k)
|
||||
}
|
||||
}
|
||||
if err := validateHeaders(req.Header); err != "" {
|
||||
return nil, fmt.Errorf("invalid HTTP header %s", err)
|
||||
}
|
||||
if err := validateHeaders(req.Trailer); err != "" {
|
||||
return nil, fmt.Errorf("invalid HTTP trailer %s", err)
|
||||
}
|
||||
|
||||
enumerateHeaders := func(f func(name, value string)) {
|
||||
@@ -2144,7 +2311,7 @@ func (cc *ClientConn) forgetStreamID(id uint32) {
|
||||
}
|
||||
// Wake up writeRequestBody via clientStream.awaitFlowControl and
|
||||
// wake up RoundTrip if there is a pending request.
|
||||
cc.cond.Broadcast()
|
||||
cc.condBroadcast()
|
||||
|
||||
closeOnIdle := cc.singleUse || cc.doNotReuse || cc.t.disableKeepAlives() || cc.goAway != nil
|
||||
if closeOnIdle && cc.streamsReserved == 0 && len(cc.streams) == 0 {
|
||||
@@ -2232,7 +2399,7 @@ func (rl *clientConnReadLoop) cleanup() {
|
||||
cs.abortStreamLocked(err)
|
||||
}
|
||||
}
|
||||
cc.cond.Broadcast()
|
||||
cc.condBroadcast()
|
||||
cc.mu.Unlock()
|
||||
}
|
||||
|
||||
@@ -2267,10 +2434,9 @@ func (rl *clientConnReadLoop) run() error {
|
||||
cc := rl.cc
|
||||
gotSettings := false
|
||||
readIdleTimeout := cc.t.ReadIdleTimeout
|
||||
var t *time.Timer
|
||||
var t timer
|
||||
if readIdleTimeout != 0 {
|
||||
t = time.AfterFunc(readIdleTimeout, cc.healthCheck)
|
||||
defer t.Stop()
|
||||
t = cc.afterFunc(readIdleTimeout, cc.healthCheck)
|
||||
}
|
||||
for {
|
||||
f, err := cc.fr.ReadFrame()
|
||||
@@ -2685,7 +2851,7 @@ func (rl *clientConnReadLoop) processData(f *DataFrame) error {
|
||||
})
|
||||
return nil
|
||||
}
|
||||
if !cs.firstByte {
|
||||
if !cs.pastHeaders {
|
||||
cc.logf("protocol error: received DATA before a HEADERS frame")
|
||||
rl.endStreamError(cs, StreamError{
|
||||
StreamID: f.StreamID,
|
||||
@@ -2868,7 +3034,7 @@ func (rl *clientConnReadLoop) processSettingsNoWrite(f *SettingsFrame) error {
|
||||
for _, cs := range cc.streams {
|
||||
cs.flow.add(delta)
|
||||
}
|
||||
cc.cond.Broadcast()
|
||||
cc.condBroadcast()
|
||||
|
||||
cc.initialWindowSize = s.Val
|
||||
case SettingHeaderTableSize:
|
||||
@@ -2912,9 +3078,18 @@ func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error {
|
||||
fl = &cs.flow
|
||||
}
|
||||
if !fl.add(int32(f.Increment)) {
|
||||
// For stream, the sender sends RST_STREAM with an error code of FLOW_CONTROL_ERROR
|
||||
if cs != nil {
|
||||
rl.endStreamError(cs, StreamError{
|
||||
StreamID: f.StreamID,
|
||||
Code: ErrCodeFlowControl,
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
return ConnectionError(ErrCodeFlowControl)
|
||||
}
|
||||
cc.cond.Broadcast()
|
||||
cc.condBroadcast()
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -2956,24 +3131,38 @@ func (cc *ClientConn) Ping(ctx context.Context) error {
|
||||
}
|
||||
cc.mu.Unlock()
|
||||
}
|
||||
errc := make(chan error, 1)
|
||||
go func() {
|
||||
var pingError error
|
||||
errc := make(chan struct{})
|
||||
cc.goRun(func() {
|
||||
cc.wmu.Lock()
|
||||
defer cc.wmu.Unlock()
|
||||
if err := cc.fr.WritePing(false, p); err != nil {
|
||||
errc <- err
|
||||
if pingError = cc.fr.WritePing(false, p); pingError != nil {
|
||||
close(errc)
|
||||
return
|
||||
}
|
||||
if err := cc.bw.Flush(); err != nil {
|
||||
errc <- err
|
||||
if pingError = cc.bw.Flush(); pingError != nil {
|
||||
close(errc)
|
||||
return
|
||||
}
|
||||
}()
|
||||
})
|
||||
if cc.syncHooks != nil {
|
||||
cc.syncHooks.blockUntil(func() bool {
|
||||
select {
|
||||
case <-c:
|
||||
case <-errc:
|
||||
case <-ctx.Done():
|
||||
case <-cc.readerDone:
|
||||
default:
|
||||
return false
|
||||
}
|
||||
return true
|
||||
})
|
||||
}
|
||||
select {
|
||||
case <-c:
|
||||
return nil
|
||||
case err := <-errc:
|
||||
return err
|
||||
case <-errc:
|
||||
return pingError
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case <-cc.readerDone:
|
||||
@@ -3142,9 +3331,17 @@ func (rt noDialH2RoundTripper) RoundTrip(req *http.Request) (*http.Response, err
|
||||
}
|
||||
|
||||
func (t *Transport) idleConnTimeout() time.Duration {
|
||||
// to keep things backwards compatible, we use non-zero values of
|
||||
// IdleConnTimeout, followed by using the IdleConnTimeout on the underlying
|
||||
// http1 transport, followed by 0
|
||||
if t.IdleConnTimeout != 0 {
|
||||
return t.IdleConnTimeout
|
||||
}
|
||||
|
||||
if t.t1 != nil {
|
||||
return t.t1.IdleConnTimeout
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
@@ -3202,3 +3399,34 @@ func traceFirstResponseByte(trace *httptrace.ClientTrace) {
|
||||
trace.GotFirstResponseByte()
|
||||
}
|
||||
}
|
||||
|
||||
func traceHasWroteHeaderField(trace *httptrace.ClientTrace) bool {
|
||||
return trace != nil && trace.WroteHeaderField != nil
|
||||
}
|
||||
|
||||
func traceWroteHeaderField(trace *httptrace.ClientTrace, k, v string) {
|
||||
if trace != nil && trace.WroteHeaderField != nil {
|
||||
trace.WroteHeaderField(k, []string{v})
|
||||
}
|
||||
}
|
||||
|
||||
func traceGot1xxResponseFunc(trace *httptrace.ClientTrace) func(int, textproto.MIMEHeader) error {
|
||||
if trace != nil {
|
||||
return trace.Got1xxResponse
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// dialTLSWithContext uses tls.Dialer, added in Go 1.15, to open a TLS
|
||||
// connection.
|
||||
func (t *Transport) dialTLSWithContext(ctx context.Context, network, addr string, cfg *tls.Config) (*tls.Conn, error) {
|
||||
dialer := &tls.Dialer{
|
||||
Config: cfg,
|
||||
}
|
||||
cn, err := dialer.DialContext(ctx, network, addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tlsCn := cn.(*tls.Conn) // DialContext comment promises this will always succeed
|
||||
return tlsCn, nil
|
||||
}
|
||||
|
||||
1
vendor/golang.org/x/net/idna/go118.go
generated
vendored
1
vendor/golang.org/x/net/idna/go118.go
generated
vendored
@@ -5,7 +5,6 @@
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build go1.18
|
||||
// +build go1.18
|
||||
|
||||
package idna
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user