feat: Waku v2 bridge

Issue #12610
This commit is contained in:
Michal Iskierko
2023-11-12 13:29:38 +01:00
parent 56e7bd01ca
commit 6d31343205
6716 changed files with 1982502 additions and 5891 deletions

20
vendor/github.com/afex/hystrix-go/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,20 @@
The MIT License (MIT)
Copyright (c) 2013 keith
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

196
vendor/github.com/afex/hystrix-go/hystrix/circuit.go generated vendored Normal file
View File

@@ -0,0 +1,196 @@
package hystrix
import (
"fmt"
"sync"
"sync/atomic"
"time"
)
// CircuitBreaker is created for each ExecutorPool to track whether requests
// should be attempted, or rejected if the Health of the circuit is too low.
type CircuitBreaker struct {
Name string
open bool
forceOpen bool
mutex *sync.RWMutex
openedOrLastTestedTime int64
executorPool *executorPool
metrics *metricExchange
}
var (
circuitBreakersMutex *sync.RWMutex
circuitBreakers map[string]*CircuitBreaker
)
func init() {
circuitBreakersMutex = &sync.RWMutex{}
circuitBreakers = make(map[string]*CircuitBreaker)
}
// GetCircuit returns the circuit for the given command and whether this call created it.
func GetCircuit(name string) (*CircuitBreaker, bool, error) {
circuitBreakersMutex.RLock()
_, ok := circuitBreakers[name]
if !ok {
circuitBreakersMutex.RUnlock()
circuitBreakersMutex.Lock()
defer circuitBreakersMutex.Unlock()
// because we released the rlock before we obtained the exclusive lock,
// we need to double check that some other thread didn't beat us to
// creation.
if cb, ok := circuitBreakers[name]; ok {
return cb, false, nil
}
circuitBreakers[name] = newCircuitBreaker(name)
} else {
defer circuitBreakersMutex.RUnlock()
}
return circuitBreakers[name], !ok, nil
}
// Flush purges all circuit and metric information from memory.
func Flush() {
circuitBreakersMutex.Lock()
defer circuitBreakersMutex.Unlock()
for name, cb := range circuitBreakers {
cb.metrics.Reset()
cb.executorPool.Metrics.Reset()
delete(circuitBreakers, name)
}
}
// newCircuitBreaker creates a CircuitBreaker with associated Health
func newCircuitBreaker(name string) *CircuitBreaker {
c := &CircuitBreaker{}
c.Name = name
c.metrics = newMetricExchange(name)
c.executorPool = newExecutorPool(name)
c.mutex = &sync.RWMutex{}
return c
}
// toggleForceOpen allows manually causing the fallback logic for all instances
// of a given command.
func (circuit *CircuitBreaker) toggleForceOpen(toggle bool) error {
circuit, _, err := GetCircuit(circuit.Name)
if err != nil {
return err
}
circuit.forceOpen = toggle
return nil
}
// IsOpen is called before any Command execution to check whether or
// not it should be attempted. An "open" circuit means it is disabled.
func (circuit *CircuitBreaker) IsOpen() bool {
circuit.mutex.RLock()
o := circuit.forceOpen || circuit.open
circuit.mutex.RUnlock()
if o {
return true
}
if uint64(circuit.metrics.Requests().Sum(time.Now())) < getSettings(circuit.Name).RequestVolumeThreshold {
return false
}
if !circuit.metrics.IsHealthy(time.Now()) {
// too many failures, open the circuit
circuit.setOpen()
return true
}
return false
}
// AllowRequest is checked before a command executes, ensuring that circuit state and metric health allow it.
// When the circuit is open, this call will occasionally return true to measure whether the external service
// has recovered.
func (circuit *CircuitBreaker) AllowRequest() bool {
return !circuit.IsOpen() || circuit.allowSingleTest()
}
func (circuit *CircuitBreaker) allowSingleTest() bool {
circuit.mutex.RLock()
defer circuit.mutex.RUnlock()
now := time.Now().UnixNano()
openedOrLastTestedTime := atomic.LoadInt64(&circuit.openedOrLastTestedTime)
if circuit.open && now > openedOrLastTestedTime+getSettings(circuit.Name).SleepWindow.Nanoseconds() {
swapped := atomic.CompareAndSwapInt64(&circuit.openedOrLastTestedTime, openedOrLastTestedTime, now)
if swapped {
log.Printf("hystrix-go: allowing single test to possibly close circuit %v", circuit.Name)
}
return swapped
}
return false
}
func (circuit *CircuitBreaker) setOpen() {
circuit.mutex.Lock()
defer circuit.mutex.Unlock()
if circuit.open {
return
}
log.Printf("hystrix-go: opening circuit %v", circuit.Name)
circuit.openedOrLastTestedTime = time.Now().UnixNano()
circuit.open = true
}
func (circuit *CircuitBreaker) setClose() {
circuit.mutex.Lock()
defer circuit.mutex.Unlock()
if !circuit.open {
return
}
log.Printf("hystrix-go: closing circuit %v", circuit.Name)
circuit.open = false
circuit.metrics.Reset()
}
// ReportEvent records command metrics for tracking recent error rates and exposing data to the dashboard.
func (circuit *CircuitBreaker) ReportEvent(eventTypes []string, start time.Time, runDuration time.Duration) error {
if len(eventTypes) == 0 {
return fmt.Errorf("no event types sent for metrics")
}
circuit.mutex.RLock()
o := circuit.open
circuit.mutex.RUnlock()
if eventTypes[0] == "success" && o {
circuit.setClose()
}
var concurrencyInUse float64
if circuit.executorPool.Max > 0 {
concurrencyInUse = float64(circuit.executorPool.ActiveCount()) / float64(circuit.executorPool.Max)
}
select {
case circuit.metrics.Updates <- &commandExecution{
Types: eventTypes,
Start: start,
RunDuration: runDuration,
ConcurrencyInUse: concurrencyInUse,
}:
default:
return CircuitError{Message: fmt.Sprintf("metrics channel (%v) is at capacity", circuit.Name)}
}
return nil
}

79
vendor/github.com/afex/hystrix-go/hystrix/doc.go generated vendored Normal file
View File

@@ -0,0 +1,79 @@
/*
Package hystrix is a latency and fault tolerance library designed to isolate
points of access to remote systems, services and 3rd party libraries, stop
cascading failure and enable resilience in complex distributed systems where
failure is inevitable.
Based on the java project of the same name, by Netflix. https://github.com/Netflix/Hystrix
Execute code as a Hystrix command
Define your application logic which relies on external systems, passing your function to Go. When that system is healthy this will be the only thing which executes.
hystrix.Go("my_command", func() error {
// talk to other services
return nil
}, nil)
Defining fallback behavior
If you want code to execute during a service outage, pass in a second function to Go. Ideally, the logic here will allow your application to gracefully handle external services being unavailable.
This triggers when your code returns an error, or whenever it is unable to complete based on a variety of health checks https://github.com/Netflix/Hystrix/wiki/How-it-Works.
hystrix.Go("my_command", func() error {
// talk to other services
return nil
}, func(err error) error {
// do this when services are down
return nil
})
Waiting for output
Calling Go is like launching a goroutine, except you receive a channel of errors you can choose to monitor.
output := make(chan bool, 1)
errors := hystrix.Go("my_command", func() error {
// talk to other services
output <- true
return nil
}, nil)
select {
case out := <-output:
// success
case err := <-errors:
// failure
}
Synchronous API
Since calling a command and immediately waiting for it to finish is a common pattern, a synchronous API is available with the Do function which returns a single error.
err := hystrix.Do("my_command", func() error {
// talk to other services
return nil
}, nil)
Configure settings
During application boot, you can call ConfigureCommand to tweak the settings for each command.
hystrix.ConfigureCommand("my_command", hystrix.CommandConfig{
Timeout: 1000,
MaxConcurrentRequests: 100,
ErrorPercentThreshold: 25,
})
You can also use Configure which accepts a map[string]CommandConfig.
Enable dashboard metrics
In your main.go, register the event stream HTTP handler on a port and launch it in a goroutine. Once you configure turbine for your Hystrix Dashboard https://github.com/Netflix/Hystrix/tree/master/hystrix-dashboard to start streaming events, your commands will automatically begin appearing.
hystrixStreamHandler := hystrix.NewStreamHandler()
hystrixStreamHandler.Start()
go http.ListenAndServe(net.JoinHostPort("", "81"), hystrixStreamHandler)
*/
package hystrix

View File

@@ -0,0 +1,326 @@
package hystrix
import (
"bytes"
"encoding/json"
"net/http"
"sync"
"time"
"github.com/afex/hystrix-go/hystrix/rolling"
)
const (
streamEventBufferSize = 10
)
// NewStreamHandler returns a server capable of exposing dashboard metrics via HTTP.
func NewStreamHandler() *StreamHandler {
return &StreamHandler{}
}
// StreamHandler publishes metrics for each command and each pool once a second to all connected HTTP client.
type StreamHandler struct {
requests map[*http.Request]chan []byte
mu sync.RWMutex
done chan struct{}
}
// Start begins watching the in-memory circuit breakers for metrics
func (sh *StreamHandler) Start() {
sh.requests = make(map[*http.Request]chan []byte)
sh.done = make(chan struct{})
go sh.loop()
}
// Stop shuts down the metric collection routine
func (sh *StreamHandler) Stop() {
close(sh.done)
}
var _ http.Handler = (*StreamHandler)(nil)
func (sh *StreamHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
// Make sure that the writer supports flushing.
f, ok := rw.(http.Flusher)
if !ok {
http.Error(rw, "Streaming unsupported!", http.StatusInternalServerError)
return
}
events := sh.register(req)
defer sh.unregister(req)
notify := rw.(http.CloseNotifier).CloseNotify()
rw.Header().Add("Content-Type", "text/event-stream")
rw.Header().Set("Cache-Control", "no-cache")
rw.Header().Set("Connection", "keep-alive")
for {
select {
case <-notify:
// client is gone
return
case event := <-events:
_, err := rw.Write(event)
if err != nil {
return
}
f.Flush()
}
}
}
func (sh *StreamHandler) loop() {
tick := time.Tick(1 * time.Second)
for {
select {
case <-tick:
circuitBreakersMutex.RLock()
for _, cb := range circuitBreakers {
sh.publishMetrics(cb)
sh.publishThreadPools(cb.executorPool)
}
circuitBreakersMutex.RUnlock()
case <-sh.done:
return
}
}
}
func (sh *StreamHandler) publishMetrics(cb *CircuitBreaker) error {
now := time.Now()
reqCount := cb.metrics.Requests().Sum(now)
errCount := cb.metrics.DefaultCollector().Errors().Sum(now)
errPct := cb.metrics.ErrorPercent(now)
eventBytes, err := json.Marshal(&streamCmdMetric{
Type: "HystrixCommand",
Name: cb.Name,
Group: cb.Name,
Time: currentTime(),
ReportingHosts: 1,
RequestCount: uint32(reqCount),
ErrorCount: uint32(errCount),
ErrorPct: uint32(errPct),
CircuitBreakerOpen: cb.IsOpen(),
RollingCountSuccess: uint32(cb.metrics.DefaultCollector().Successes().Sum(now)),
RollingCountFailure: uint32(cb.metrics.DefaultCollector().Failures().Sum(now)),
RollingCountThreadPoolRejected: uint32(cb.metrics.DefaultCollector().Rejects().Sum(now)),
RollingCountShortCircuited: uint32(cb.metrics.DefaultCollector().ShortCircuits().Sum(now)),
RollingCountTimeout: uint32(cb.metrics.DefaultCollector().Timeouts().Sum(now)),
RollingCountFallbackSuccess: uint32(cb.metrics.DefaultCollector().FallbackSuccesses().Sum(now)),
RollingCountFallbackFailure: uint32(cb.metrics.DefaultCollector().FallbackFailures().Sum(now)),
LatencyTotal: generateLatencyTimings(cb.metrics.DefaultCollector().TotalDuration()),
LatencyTotalMean: cb.metrics.DefaultCollector().TotalDuration().Mean(),
LatencyExecute: generateLatencyTimings(cb.metrics.DefaultCollector().RunDuration()),
LatencyExecuteMean: cb.metrics.DefaultCollector().RunDuration().Mean(),
// TODO: all hard-coded values should become configurable settings, per circuit
RollingStatsWindow: 10000,
ExecutionIsolationStrategy: "THREAD",
CircuitBreakerEnabled: true,
CircuitBreakerForceClosed: false,
CircuitBreakerForceOpen: cb.forceOpen,
CircuitBreakerErrorThresholdPercent: uint32(getSettings(cb.Name).ErrorPercentThreshold),
CircuitBreakerSleepWindow: uint32(getSettings(cb.Name).SleepWindow.Seconds() * 1000),
CircuitBreakerRequestVolumeThreshold: uint32(getSettings(cb.Name).RequestVolumeThreshold),
})
if err != nil {
return err
}
err = sh.writeToRequests(eventBytes)
if err != nil {
return err
}
return nil
}
func (sh *StreamHandler) publishThreadPools(pool *executorPool) error {
now := time.Now()
eventBytes, err := json.Marshal(&streamThreadPoolMetric{
Type: "HystrixThreadPool",
Name: pool.Name,
ReportingHosts: 1,
CurrentActiveCount: uint32(pool.ActiveCount()),
CurrentTaskCount: 0,
CurrentCompletedTaskCount: 0,
RollingCountThreadsExecuted: uint32(pool.Metrics.Executed.Sum(now)),
RollingMaxActiveThreads: uint32(pool.Metrics.MaxActiveRequests.Max(now)),
CurrentPoolSize: uint32(pool.Max),
CurrentCorePoolSize: uint32(pool.Max),
CurrentLargestPoolSize: uint32(pool.Max),
CurrentMaximumPoolSize: uint32(pool.Max),
RollingStatsWindow: 10000,
QueueSizeRejectionThreshold: 0,
CurrentQueueSize: 0,
})
if err != nil {
return err
}
err = sh.writeToRequests(eventBytes)
return nil
}
func (sh *StreamHandler) writeToRequests(eventBytes []byte) error {
var b bytes.Buffer
_, err := b.Write([]byte("data:"))
if err != nil {
return err
}
_, err = b.Write(eventBytes)
if err != nil {
return err
}
_, err = b.Write([]byte("\n\n"))
if err != nil {
return err
}
dataBytes := b.Bytes()
sh.mu.RLock()
for _, requestEvents := range sh.requests {
select {
case requestEvents <- dataBytes:
default:
}
}
sh.mu.RUnlock()
return nil
}
func (sh *StreamHandler) register(req *http.Request) <-chan []byte {
sh.mu.RLock()
events, ok := sh.requests[req]
sh.mu.RUnlock()
if ok {
return events
}
events = make(chan []byte, streamEventBufferSize)
sh.mu.Lock()
sh.requests[req] = events
sh.mu.Unlock()
return events
}
func (sh *StreamHandler) unregister(req *http.Request) {
sh.mu.Lock()
delete(sh.requests, req)
sh.mu.Unlock()
}
func generateLatencyTimings(r *rolling.Timing) streamCmdLatency {
return streamCmdLatency{
Timing0: r.Percentile(0),
Timing25: r.Percentile(25),
Timing50: r.Percentile(50),
Timing75: r.Percentile(75),
Timing90: r.Percentile(90),
Timing95: r.Percentile(95),
Timing99: r.Percentile(99),
Timing995: r.Percentile(99.5),
Timing100: r.Percentile(100),
}
}
type streamCmdMetric struct {
Type string `json:"type"`
Name string `json:"name"`
Group string `json:"group"`
Time int64 `json:"currentTime"`
ReportingHosts uint32 `json:"reportingHosts"`
// Health
RequestCount uint32 `json:"requestCount"`
ErrorCount uint32 `json:"errorCount"`
ErrorPct uint32 `json:"errorPercentage"`
CircuitBreakerOpen bool `json:"isCircuitBreakerOpen"`
RollingCountCollapsedRequests uint32 `json:"rollingCountCollapsedRequests"`
RollingCountExceptionsThrown uint32 `json:"rollingCountExceptionsThrown"`
RollingCountFailure uint32 `json:"rollingCountFailure"`
RollingCountFallbackFailure uint32 `json:"rollingCountFallbackFailure"`
RollingCountFallbackRejection uint32 `json:"rollingCountFallbackRejection"`
RollingCountFallbackSuccess uint32 `json:"rollingCountFallbackSuccess"`
RollingCountResponsesFromCache uint32 `json:"rollingCountResponsesFromCache"`
RollingCountSemaphoreRejected uint32 `json:"rollingCountSemaphoreRejected"`
RollingCountShortCircuited uint32 `json:"rollingCountShortCircuited"`
RollingCountSuccess uint32 `json:"rollingCountSuccess"`
RollingCountThreadPoolRejected uint32 `json:"rollingCountThreadPoolRejected"`
RollingCountTimeout uint32 `json:"rollingCountTimeout"`
CurrentConcurrentExecutionCount uint32 `json:"currentConcurrentExecutionCount"`
LatencyExecuteMean uint32 `json:"latencyExecute_mean"`
LatencyExecute streamCmdLatency `json:"latencyExecute"`
LatencyTotalMean uint32 `json:"latencyTotal_mean"`
LatencyTotal streamCmdLatency `json:"latencyTotal"`
// Properties
CircuitBreakerRequestVolumeThreshold uint32 `json:"propertyValue_circuitBreakerRequestVolumeThreshold"`
CircuitBreakerSleepWindow uint32 `json:"propertyValue_circuitBreakerSleepWindowInMilliseconds"`
CircuitBreakerErrorThresholdPercent uint32 `json:"propertyValue_circuitBreakerErrorThresholdPercentage"`
CircuitBreakerForceOpen bool `json:"propertyValue_circuitBreakerForceOpen"`
CircuitBreakerForceClosed bool `json:"propertyValue_circuitBreakerForceClosed"`
CircuitBreakerEnabled bool `json:"propertyValue_circuitBreakerEnabled"`
ExecutionIsolationStrategy string `json:"propertyValue_executionIsolationStrategy"`
ExecutionIsolationThreadTimeout uint32 `json:"propertyValue_executionIsolationThreadTimeoutInMilliseconds"`
ExecutionIsolationThreadInterruptOnTimeout bool `json:"propertyValue_executionIsolationThreadInterruptOnTimeout"`
ExecutionIsolationThreadPoolKeyOverride string `json:"propertyValue_executionIsolationThreadPoolKeyOverride"`
ExecutionIsolationSemaphoreMaxConcurrentRequests uint32 `json:"propertyValue_executionIsolationSemaphoreMaxConcurrentRequests"`
FallbackIsolationSemaphoreMaxConcurrentRequests uint32 `json:"propertyValue_fallbackIsolationSemaphoreMaxConcurrentRequests"`
RollingStatsWindow uint32 `json:"propertyValue_metricsRollingStatisticalWindowInMilliseconds"`
RequestCacheEnabled bool `json:"propertyValue_requestCacheEnabled"`
RequestLogEnabled bool `json:"propertyValue_requestLogEnabled"`
}
type streamCmdLatency struct {
Timing0 uint32 `json:"0"`
Timing25 uint32 `json:"25"`
Timing50 uint32 `json:"50"`
Timing75 uint32 `json:"75"`
Timing90 uint32 `json:"90"`
Timing95 uint32 `json:"95"`
Timing99 uint32 `json:"99"`
Timing995 uint32 `json:"99.5"`
Timing100 uint32 `json:"100"`
}
type streamThreadPoolMetric struct {
Type string `json:"type"`
Name string `json:"name"`
ReportingHosts uint32 `json:"reportingHosts"`
CurrentActiveCount uint32 `json:"currentActiveCount"`
CurrentCompletedTaskCount uint32 `json:"currentCompletedTaskCount"`
CurrentCorePoolSize uint32 `json:"currentCorePoolSize"`
CurrentLargestPoolSize uint32 `json:"currentLargestPoolSize"`
CurrentMaximumPoolSize uint32 `json:"currentMaximumPoolSize"`
CurrentPoolSize uint32 `json:"currentPoolSize"`
CurrentQueueSize uint32 `json:"currentQueueSize"`
CurrentTaskCount uint32 `json:"currentTaskCount"`
RollingMaxActiveThreads uint32 `json:"rollingMaxActiveThreads"`
RollingCountThreadsExecuted uint32 `json:"rollingCountThreadsExecuted"`
RollingStatsWindow uint32 `json:"propertyValue_metricsRollingStatisticalWindowInMilliseconds"`
QueueSizeRejectionThreshold uint32 `json:"propertyValue_queueSizeRejectionThreshold"`
}
func currentTime() int64 {
return time.Now().UnixNano() / int64(1000000)
}

299
vendor/github.com/afex/hystrix-go/hystrix/hystrix.go generated vendored Normal file
View File

@@ -0,0 +1,299 @@
package hystrix
import (
"context"
"fmt"
"sync"
"time"
)
type runFunc func() error
type fallbackFunc func(error) error
type runFuncC func(context.Context) error
type fallbackFuncC func(context.Context, error) error
// A CircuitError is an error which models various failure states of execution,
// such as the circuit being open or a timeout.
type CircuitError struct {
Message string
}
func (e CircuitError) Error() string {
return "hystrix: " + e.Message
}
// command models the state used for a single execution on a circuit. "hystrix command" is commonly
// used to describe the pairing of your run/fallback functions with a circuit.
type command struct {
sync.Mutex
ticket *struct{}
start time.Time
errChan chan error
finished chan bool
circuit *CircuitBreaker
run runFuncC
fallback fallbackFuncC
runDuration time.Duration
events []string
}
var (
// ErrMaxConcurrency occurs when too many of the same named command are executed at the same time.
ErrMaxConcurrency = CircuitError{Message: "max concurrency"}
// ErrCircuitOpen returns when an execution attempt "short circuits". This happens due to the circuit being measured as unhealthy.
ErrCircuitOpen = CircuitError{Message: "circuit open"}
// ErrTimeout occurs when the provided function takes too long to execute.
ErrTimeout = CircuitError{Message: "timeout"}
)
// Go runs your function while tracking the health of previous calls to it.
// If your function begins slowing down or failing repeatedly, we will block
// new calls to it for you to give the dependent service time to repair.
//
// Define a fallback function if you want to define some code to execute during outages.
func Go(name string, run runFunc, fallback fallbackFunc) chan error {
runC := func(ctx context.Context) error {
return run()
}
var fallbackC fallbackFuncC
if fallback != nil {
fallbackC = func(ctx context.Context, err error) error {
return fallback(err)
}
}
return GoC(context.Background(), name, runC, fallbackC)
}
// GoC runs your function while tracking the health of previous calls to it.
// If your function begins slowing down or failing repeatedly, we will block
// new calls to it for you to give the dependent service time to repair.
//
// Define a fallback function if you want to define some code to execute during outages.
func GoC(ctx context.Context, name string, run runFuncC, fallback fallbackFuncC) chan error {
cmd := &command{
run: run,
fallback: fallback,
start: time.Now(),
errChan: make(chan error, 1),
finished: make(chan bool, 1),
}
// dont have methods with explicit params and returns
// let data come in and out naturally, like with any closure
// explicit error return to give place for us to kill switch the operation (fallback)
circuit, _, err := GetCircuit(name)
if err != nil {
cmd.errChan <- err
return cmd.errChan
}
cmd.circuit = circuit
ticketCond := sync.NewCond(cmd)
ticketChecked := false
// When the caller extracts error from returned errChan, it's assumed that
// the ticket's been returned to executorPool. Therefore, returnTicket() can
// not run after cmd.errorWithFallback().
returnTicket := func() {
cmd.Lock()
// Avoid releasing before a ticket is acquired.
for !ticketChecked {
ticketCond.Wait()
}
cmd.circuit.executorPool.Return(cmd.ticket)
cmd.Unlock()
}
// Shared by the following two goroutines. It ensures only the faster
// goroutine runs errWithFallback() and reportAllEvent().
returnOnce := &sync.Once{}
reportAllEvent := func() {
err := cmd.circuit.ReportEvent(cmd.events, cmd.start, cmd.runDuration)
if err != nil {
log.Printf(err.Error())
}
}
go func() {
defer func() { cmd.finished <- true }()
// Circuits get opened when recent executions have shown to have a high error rate.
// Rejecting new executions allows backends to recover, and the circuit will allow
// new traffic when it feels a healthly state has returned.
if !cmd.circuit.AllowRequest() {
cmd.Lock()
// It's safe for another goroutine to go ahead releasing a nil ticket.
ticketChecked = true
ticketCond.Signal()
cmd.Unlock()
returnOnce.Do(func() {
returnTicket()
cmd.errorWithFallback(ctx, ErrCircuitOpen)
reportAllEvent()
})
return
}
// As backends falter, requests take longer but don't always fail.
//
// When requests slow down but the incoming rate of requests stays the same, you have to
// run more at a time to keep up. By controlling concurrency during these situations, you can
// shed load which accumulates due to the increasing ratio of active commands to incoming requests.
cmd.Lock()
select {
case cmd.ticket = <-circuit.executorPool.Tickets:
ticketChecked = true
ticketCond.Signal()
cmd.Unlock()
default:
ticketChecked = true
ticketCond.Signal()
cmd.Unlock()
returnOnce.Do(func() {
returnTicket()
cmd.errorWithFallback(ctx, ErrMaxConcurrency)
reportAllEvent()
})
return
}
runStart := time.Now()
runErr := run(ctx)
returnOnce.Do(func() {
defer reportAllEvent()
cmd.runDuration = time.Since(runStart)
returnTicket()
if runErr != nil {
cmd.errorWithFallback(ctx, runErr)
return
}
cmd.reportEvent("success")
})
}()
go func() {
timer := time.NewTimer(getSettings(name).Timeout)
defer timer.Stop()
select {
case <-cmd.finished:
// returnOnce has been executed in another goroutine
case <-ctx.Done():
returnOnce.Do(func() {
returnTicket()
cmd.errorWithFallback(ctx, ctx.Err())
reportAllEvent()
})
return
case <-timer.C:
returnOnce.Do(func() {
returnTicket()
cmd.errorWithFallback(ctx, ErrTimeout)
reportAllEvent()
})
return
}
}()
return cmd.errChan
}
// Do runs your function in a synchronous manner, blocking until either your function succeeds
// or an error is returned, including hystrix circuit errors
func Do(name string, run runFunc, fallback fallbackFunc) error {
runC := func(ctx context.Context) error {
return run()
}
var fallbackC fallbackFuncC
if fallback != nil {
fallbackC = func(ctx context.Context, err error) error {
return fallback(err)
}
}
return DoC(context.Background(), name, runC, fallbackC)
}
// DoC runs your function in a synchronous manner, blocking until either your function succeeds
// or an error is returned, including hystrix circuit errors
func DoC(ctx context.Context, name string, run runFuncC, fallback fallbackFuncC) error {
done := make(chan struct{}, 1)
r := func(ctx context.Context) error {
err := run(ctx)
if err != nil {
return err
}
done <- struct{}{}
return nil
}
f := func(ctx context.Context, e error) error {
err := fallback(ctx, e)
if err != nil {
return err
}
done <- struct{}{}
return nil
}
var errChan chan error
if fallback == nil {
errChan = GoC(ctx, name, r, nil)
} else {
errChan = GoC(ctx, name, r, f)
}
select {
case <-done:
return nil
case err := <-errChan:
return err
}
}
func (c *command) reportEvent(eventType string) {
c.Lock()
defer c.Unlock()
c.events = append(c.events, eventType)
}
// errorWithFallback triggers the fallback while reporting the appropriate metric events.
func (c *command) errorWithFallback(ctx context.Context, err error) {
eventType := "failure"
if err == ErrCircuitOpen {
eventType = "short-circuit"
} else if err == ErrMaxConcurrency {
eventType = "rejected"
} else if err == ErrTimeout {
eventType = "timeout"
} else if err == context.Canceled {
eventType = "context_canceled"
} else if err == context.DeadlineExceeded {
eventType = "context_deadline_exceeded"
}
c.reportEvent(eventType)
fallbackErr := c.tryFallback(ctx, err)
if fallbackErr != nil {
c.errChan <- fallbackErr
}
}
func (c *command) tryFallback(ctx context.Context, err error) error {
if c.fallback == nil {
// If we don't have a fallback return the original error.
return err
}
fallbackErr := c.fallback(ctx, err)
if fallbackErr != nil {
c.reportEvent("fallback-failure")
return fmt.Errorf("fallback failed with '%v'. run error was '%v'", fallbackErr, err)
}
c.reportEvent("fallback-success")
return nil
}

11
vendor/github.com/afex/hystrix-go/hystrix/logger.go generated vendored Normal file
View File

@@ -0,0 +1,11 @@
package hystrix
type logger interface {
Printf(format string, items ...interface{})
}
// NoopLogger does not log anything.
type NoopLogger struct{}
// Printf does nothing.
func (l NoopLogger) Printf(format string, items ...interface{}) {}

View File

@@ -0,0 +1,169 @@
package metricCollector
import (
"sync"
"github.com/afex/hystrix-go/hystrix/rolling"
)
// DefaultMetricCollector holds information about the circuit state.
// This implementation of MetricCollector is the canonical source of information about the circuit.
// It is used for for all internal hystrix operations
// including circuit health checks and metrics sent to the hystrix dashboard.
//
// Metric Collectors do not need Mutexes as they are updated by circuits within a locked context.
type DefaultMetricCollector struct {
mutex *sync.RWMutex
numRequests *rolling.Number
errors *rolling.Number
successes *rolling.Number
failures *rolling.Number
rejects *rolling.Number
shortCircuits *rolling.Number
timeouts *rolling.Number
contextCanceled *rolling.Number
contextDeadlineExceeded *rolling.Number
fallbackSuccesses *rolling.Number
fallbackFailures *rolling.Number
totalDuration *rolling.Timing
runDuration *rolling.Timing
}
func newDefaultMetricCollector(name string) MetricCollector {
m := &DefaultMetricCollector{}
m.mutex = &sync.RWMutex{}
m.Reset()
return m
}
// NumRequests returns the rolling number of requests
func (d *DefaultMetricCollector) NumRequests() *rolling.Number {
d.mutex.RLock()
defer d.mutex.RUnlock()
return d.numRequests
}
// Errors returns the rolling number of errors
func (d *DefaultMetricCollector) Errors() *rolling.Number {
d.mutex.RLock()
defer d.mutex.RUnlock()
return d.errors
}
// Successes returns the rolling number of successes
func (d *DefaultMetricCollector) Successes() *rolling.Number {
d.mutex.RLock()
defer d.mutex.RUnlock()
return d.successes
}
// Failures returns the rolling number of failures
func (d *DefaultMetricCollector) Failures() *rolling.Number {
d.mutex.RLock()
defer d.mutex.RUnlock()
return d.failures
}
// Rejects returns the rolling number of rejects
func (d *DefaultMetricCollector) Rejects() *rolling.Number {
d.mutex.RLock()
defer d.mutex.RUnlock()
return d.rejects
}
// ShortCircuits returns the rolling number of short circuits
func (d *DefaultMetricCollector) ShortCircuits() *rolling.Number {
d.mutex.RLock()
defer d.mutex.RUnlock()
return d.shortCircuits
}
// Timeouts returns the rolling number of timeouts
func (d *DefaultMetricCollector) Timeouts() *rolling.Number {
d.mutex.RLock()
defer d.mutex.RUnlock()
return d.timeouts
}
// FallbackSuccesses returns the rolling number of fallback successes
func (d *DefaultMetricCollector) FallbackSuccesses() *rolling.Number {
d.mutex.RLock()
defer d.mutex.RUnlock()
return d.fallbackSuccesses
}
func (d *DefaultMetricCollector) ContextCanceled() *rolling.Number {
d.mutex.RLock()
defer d.mutex.RUnlock()
return d.contextCanceled
}
func (d *DefaultMetricCollector) ContextDeadlineExceeded() *rolling.Number {
d.mutex.RLock()
defer d.mutex.RUnlock()
return d.contextDeadlineExceeded
}
// FallbackFailures returns the rolling number of fallback failures
func (d *DefaultMetricCollector) FallbackFailures() *rolling.Number {
d.mutex.RLock()
defer d.mutex.RUnlock()
return d.fallbackFailures
}
// TotalDuration returns the rolling total duration
func (d *DefaultMetricCollector) TotalDuration() *rolling.Timing {
d.mutex.RLock()
defer d.mutex.RUnlock()
return d.totalDuration
}
// RunDuration returns the rolling run duration
func (d *DefaultMetricCollector) RunDuration() *rolling.Timing {
d.mutex.RLock()
defer d.mutex.RUnlock()
return d.runDuration
}
func (d *DefaultMetricCollector) Update(r MetricResult) {
d.mutex.RLock()
defer d.mutex.RUnlock()
d.numRequests.Increment(r.Attempts)
d.errors.Increment(r.Errors)
d.successes.Increment(r.Successes)
d.failures.Increment(r.Failures)
d.rejects.Increment(r.Rejects)
d.shortCircuits.Increment(r.ShortCircuits)
d.timeouts.Increment(r.Timeouts)
d.fallbackSuccesses.Increment(r.FallbackSuccesses)
d.fallbackFailures.Increment(r.FallbackFailures)
d.contextCanceled.Increment(r.ContextCanceled)
d.contextDeadlineExceeded.Increment(r.ContextDeadlineExceeded)
d.totalDuration.Add(r.TotalDuration)
d.runDuration.Add(r.RunDuration)
}
// Reset resets all metrics in this collector to 0.
func (d *DefaultMetricCollector) Reset() {
d.mutex.Lock()
defer d.mutex.Unlock()
d.numRequests = rolling.NewNumber()
d.errors = rolling.NewNumber()
d.successes = rolling.NewNumber()
d.rejects = rolling.NewNumber()
d.shortCircuits = rolling.NewNumber()
d.failures = rolling.NewNumber()
d.timeouts = rolling.NewNumber()
d.fallbackSuccesses = rolling.NewNumber()
d.fallbackFailures = rolling.NewNumber()
d.contextCanceled = rolling.NewNumber()
d.contextDeadlineExceeded = rolling.NewNumber()
d.totalDuration = rolling.NewTiming()
d.runDuration = rolling.NewTiming()
}

View File

@@ -0,0 +1,67 @@
package metricCollector
import (
"sync"
"time"
)
// Registry is the default metricCollectorRegistry that circuits will use to
// collect statistics about the health of the circuit.
var Registry = metricCollectorRegistry{
lock: &sync.RWMutex{},
registry: []func(name string) MetricCollector{
newDefaultMetricCollector,
},
}
type metricCollectorRegistry struct {
lock *sync.RWMutex
registry []func(name string) MetricCollector
}
// InitializeMetricCollectors runs the registried MetricCollector Initializers to create an array of MetricCollectors.
func (m *metricCollectorRegistry) InitializeMetricCollectors(name string) []MetricCollector {
m.lock.RLock()
defer m.lock.RUnlock()
metrics := make([]MetricCollector, len(m.registry))
for i, metricCollectorInitializer := range m.registry {
metrics[i] = metricCollectorInitializer(name)
}
return metrics
}
// Register places a MetricCollector Initializer in the registry maintained by this metricCollectorRegistry.
func (m *metricCollectorRegistry) Register(initMetricCollector func(string) MetricCollector) {
m.lock.Lock()
defer m.lock.Unlock()
m.registry = append(m.registry, initMetricCollector)
}
type MetricResult struct {
Attempts float64
Errors float64
Successes float64
Failures float64
Rejects float64
ShortCircuits float64
Timeouts float64
FallbackSuccesses float64
FallbackFailures float64
ContextCanceled float64
ContextDeadlineExceeded float64
TotalDuration time.Duration
RunDuration time.Duration
ConcurrencyInUse float64
}
// MetricCollector represents the contract that all collectors must fulfill to gather circuit statistics.
// Implementations of this interface do not have to maintain locking around thier data stores so long as
// they are not modified outside of the hystrix context.
type MetricCollector interface {
// Update accepts a set of metrics from a command execution for remote instrumentation
Update(MetricResult)
// Reset resets the internal counters and timers.
Reset()
}

150
vendor/github.com/afex/hystrix-go/hystrix/metrics.go generated vendored Normal file
View File

@@ -0,0 +1,150 @@
package hystrix
import (
"sync"
"time"
"github.com/afex/hystrix-go/hystrix/metric_collector"
"github.com/afex/hystrix-go/hystrix/rolling"
)
type commandExecution struct {
Types []string `json:"types"`
Start time.Time `json:"start_time"`
RunDuration time.Duration `json:"run_duration"`
ConcurrencyInUse float64 `json:"concurrency_inuse"`
}
type metricExchange struct {
Name string
Updates chan *commandExecution
Mutex *sync.RWMutex
metricCollectors []metricCollector.MetricCollector
}
func newMetricExchange(name string) *metricExchange {
m := &metricExchange{}
m.Name = name
m.Updates = make(chan *commandExecution, 2000)
m.Mutex = &sync.RWMutex{}
m.metricCollectors = metricCollector.Registry.InitializeMetricCollectors(name)
m.Reset()
go m.Monitor()
return m
}
// The Default Collector function will panic if collectors are not setup to specification.
func (m *metricExchange) DefaultCollector() *metricCollector.DefaultMetricCollector {
if len(m.metricCollectors) < 1 {
panic("No Metric Collectors Registered.")
}
collection, ok := m.metricCollectors[0].(*metricCollector.DefaultMetricCollector)
if !ok {
panic("Default metric collector is not registered correctly. The default metric collector must be registered first.")
}
return collection
}
func (m *metricExchange) Monitor() {
for update := range m.Updates {
// we only grab a read lock to make sure Reset() isn't changing the numbers.
m.Mutex.RLock()
totalDuration := time.Since(update.Start)
wg := &sync.WaitGroup{}
for _, collector := range m.metricCollectors {
wg.Add(1)
go m.IncrementMetrics(wg, collector, update, totalDuration)
}
wg.Wait()
m.Mutex.RUnlock()
}
}
func (m *metricExchange) IncrementMetrics(wg *sync.WaitGroup, collector metricCollector.MetricCollector, update *commandExecution, totalDuration time.Duration) {
// granular metrics
r := metricCollector.MetricResult{
Attempts: 1,
TotalDuration: totalDuration,
RunDuration: update.RunDuration,
ConcurrencyInUse: update.ConcurrencyInUse,
}
switch update.Types[0] {
case "success":
r.Successes = 1
case "failure":
r.Failures = 1
r.Errors = 1
case "rejected":
r.Rejects = 1
r.Errors = 1
case "short-circuit":
r.ShortCircuits = 1
r.Errors = 1
case "timeout":
r.Timeouts = 1
r.Errors = 1
case "context_canceled":
r.ContextCanceled = 1
case "context_deadline_exceeded":
r.ContextDeadlineExceeded = 1
}
if len(update.Types) > 1 {
// fallback metrics
if update.Types[1] == "fallback-success" {
r.FallbackSuccesses = 1
}
if update.Types[1] == "fallback-failure" {
r.FallbackFailures = 1
}
}
collector.Update(r)
wg.Done()
}
func (m *metricExchange) Reset() {
m.Mutex.Lock()
defer m.Mutex.Unlock()
for _, collector := range m.metricCollectors {
collector.Reset()
}
}
func (m *metricExchange) Requests() *rolling.Number {
m.Mutex.RLock()
defer m.Mutex.RUnlock()
return m.requestsLocked()
}
func (m *metricExchange) requestsLocked() *rolling.Number {
return m.DefaultCollector().NumRequests()
}
func (m *metricExchange) ErrorPercent(now time.Time) int {
m.Mutex.RLock()
defer m.Mutex.RUnlock()
var errPct float64
reqs := m.requestsLocked().Sum(now)
errs := m.DefaultCollector().Errors().Sum(now)
if reqs > 0 {
errPct = (float64(errs) / float64(reqs)) * 100
}
return int(errPct + 0.5)
}
func (m *metricExchange) IsHealthy(now time.Time) bool {
return m.ErrorPercent(now) < getSettings(m.Name).ErrorPercentThreshold
}

37
vendor/github.com/afex/hystrix-go/hystrix/pool.go generated vendored Normal file
View File

@@ -0,0 +1,37 @@
package hystrix
type executorPool struct {
Name string
Metrics *poolMetrics
Max int
Tickets chan *struct{}
}
func newExecutorPool(name string) *executorPool {
p := &executorPool{}
p.Name = name
p.Metrics = newPoolMetrics(name)
p.Max = getSettings(name).MaxConcurrentRequests
p.Tickets = make(chan *struct{}, p.Max)
for i := 0; i < p.Max; i++ {
p.Tickets <- &struct{}{}
}
return p
}
func (p *executorPool) Return(ticket *struct{}) {
if ticket == nil {
return
}
p.Metrics.Updates <- poolMetricsUpdate{
activeCount: p.ActiveCount(),
}
p.Tickets <- ticket
}
func (p *executorPool) ActiveCount() int {
return p.Max - len(p.Tickets)
}

View File

@@ -0,0 +1,52 @@
package hystrix
import (
"sync"
"github.com/afex/hystrix-go/hystrix/rolling"
)
type poolMetrics struct {
Mutex *sync.RWMutex
Updates chan poolMetricsUpdate
Name string
MaxActiveRequests *rolling.Number
Executed *rolling.Number
}
type poolMetricsUpdate struct {
activeCount int
}
func newPoolMetrics(name string) *poolMetrics {
m := &poolMetrics{}
m.Name = name
m.Updates = make(chan poolMetricsUpdate)
m.Mutex = &sync.RWMutex{}
m.Reset()
go m.Monitor()
return m
}
func (m *poolMetrics) Reset() {
m.Mutex.Lock()
defer m.Mutex.Unlock()
m.MaxActiveRequests = rolling.NewNumber()
m.Executed = rolling.NewNumber()
}
func (m *poolMetrics) Monitor() {
for u := range m.Updates {
m.Mutex.RLock()
m.Executed.Increment(1)
m.MaxActiveRequests.UpdateMax(float64(u.activeCount))
m.Mutex.RUnlock()
}
}

View File

@@ -0,0 +1,116 @@
package rolling
import (
"sync"
"time"
)
// Number tracks a numberBucket over a bounded number of
// time buckets. Currently the buckets are one second long and only the last 10 seconds are kept.
type Number struct {
Buckets map[int64]*numberBucket
Mutex *sync.RWMutex
}
type numberBucket struct {
Value float64
}
// NewNumber initializes a RollingNumber struct.
func NewNumber() *Number {
r := &Number{
Buckets: make(map[int64]*numberBucket),
Mutex: &sync.RWMutex{},
}
return r
}
func (r *Number) getCurrentBucket() *numberBucket {
now := time.Now().Unix()
var bucket *numberBucket
var ok bool
if bucket, ok = r.Buckets[now]; !ok {
bucket = &numberBucket{}
r.Buckets[now] = bucket
}
return bucket
}
func (r *Number) removeOldBuckets() {
now := time.Now().Unix() - 10
for timestamp := range r.Buckets {
// TODO: configurable rolling window
if timestamp <= now {
delete(r.Buckets, timestamp)
}
}
}
// Increment increments the number in current timeBucket.
func (r *Number) Increment(i float64) {
if i == 0 {
return
}
r.Mutex.Lock()
defer r.Mutex.Unlock()
b := r.getCurrentBucket()
b.Value += i
r.removeOldBuckets()
}
// UpdateMax updates the maximum value in the current bucket.
func (r *Number) UpdateMax(n float64) {
r.Mutex.Lock()
defer r.Mutex.Unlock()
b := r.getCurrentBucket()
if n > b.Value {
b.Value = n
}
r.removeOldBuckets()
}
// Sum sums the values over the buckets in the last 10 seconds.
func (r *Number) Sum(now time.Time) float64 {
sum := float64(0)
r.Mutex.RLock()
defer r.Mutex.RUnlock()
for timestamp, bucket := range r.Buckets {
// TODO: configurable rolling window
if timestamp >= now.Unix()-10 {
sum += bucket.Value
}
}
return sum
}
// Max returns the maximum value seen in the last 10 seconds.
func (r *Number) Max(now time.Time) float64 {
var max float64
r.Mutex.RLock()
defer r.Mutex.RUnlock()
for timestamp, bucket := range r.Buckets {
// TODO: configurable rolling window
if timestamp >= now.Unix()-10 {
if bucket.Value > max {
max = bucket.Value
}
}
}
return max
}
func (r *Number) Avg(now time.Time) float64 {
return r.Sum(now) / 10
}

View File

@@ -0,0 +1,148 @@
package rolling
import (
"math"
"sort"
"sync"
"time"
)
// Timing maintains time Durations for each time bucket.
// The Durations are kept in an array to allow for a variety of
// statistics to be calculated from the source data.
type Timing struct {
Buckets map[int64]*timingBucket
Mutex *sync.RWMutex
CachedSortedDurations []time.Duration
LastCachedTime int64
}
type timingBucket struct {
Durations []time.Duration
}
// NewTiming creates a RollingTiming struct.
func NewTiming() *Timing {
r := &Timing{
Buckets: make(map[int64]*timingBucket),
Mutex: &sync.RWMutex{},
}
return r
}
type byDuration []time.Duration
func (c byDuration) Len() int { return len(c) }
func (c byDuration) Swap(i, j int) { c[i], c[j] = c[j], c[i] }
func (c byDuration) Less(i, j int) bool { return c[i] < c[j] }
// SortedDurations returns an array of time.Duration sorted from shortest
// to longest that have occurred in the last 60 seconds.
func (r *Timing) SortedDurations() []time.Duration {
r.Mutex.RLock()
t := r.LastCachedTime
r.Mutex.RUnlock()
if t+time.Duration(1*time.Second).Nanoseconds() > time.Now().UnixNano() {
// don't recalculate if current cache is still fresh
return r.CachedSortedDurations
}
var durations byDuration
now := time.Now()
r.Mutex.Lock()
defer r.Mutex.Unlock()
for timestamp, b := range r.Buckets {
// TODO: configurable rolling window
if timestamp >= now.Unix()-60 {
for _, d := range b.Durations {
durations = append(durations, d)
}
}
}
sort.Sort(durations)
r.CachedSortedDurations = durations
r.LastCachedTime = time.Now().UnixNano()
return r.CachedSortedDurations
}
func (r *Timing) getCurrentBucket() *timingBucket {
r.Mutex.RLock()
now := time.Now()
bucket, exists := r.Buckets[now.Unix()]
r.Mutex.RUnlock()
if !exists {
r.Mutex.Lock()
defer r.Mutex.Unlock()
r.Buckets[now.Unix()] = &timingBucket{}
bucket = r.Buckets[now.Unix()]
}
return bucket
}
func (r *Timing) removeOldBuckets() {
now := time.Now()
for timestamp := range r.Buckets {
// TODO: configurable rolling window
if timestamp <= now.Unix()-60 {
delete(r.Buckets, timestamp)
}
}
}
// Add appends the time.Duration given to the current time bucket.
func (r *Timing) Add(duration time.Duration) {
b := r.getCurrentBucket()
r.Mutex.Lock()
defer r.Mutex.Unlock()
b.Durations = append(b.Durations, duration)
r.removeOldBuckets()
}
// Percentile computes the percentile given with a linear interpolation.
func (r *Timing) Percentile(p float64) uint32 {
sortedDurations := r.SortedDurations()
length := len(sortedDurations)
if length <= 0 {
return 0
}
pos := r.ordinal(len(sortedDurations), p) - 1
return uint32(sortedDurations[pos].Nanoseconds() / 1000000)
}
func (r *Timing) ordinal(length int, percentile float64) int64 {
if percentile == 0 && length > 0 {
return 1
}
return int64(math.Ceil((percentile / float64(100)) * float64(length)))
}
// Mean computes the average timing in the last 60 seconds.
func (r *Timing) Mean() uint32 {
sortedDurations := r.SortedDurations()
var sum time.Duration
for _, d := range sortedDurations {
sum += d
}
length := int64(len(sortedDurations))
if length == 0 {
return 0
}
return uint32(sum.Nanoseconds()/length) / 1000000
}

124
vendor/github.com/afex/hystrix-go/hystrix/settings.go generated vendored Normal file
View File

@@ -0,0 +1,124 @@
package hystrix
import (
"sync"
"time"
)
var (
// DefaultTimeout is how long to wait for command to complete, in milliseconds
DefaultTimeout = 1000
// DefaultMaxConcurrent is how many commands of the same type can run at the same time
DefaultMaxConcurrent = 10
// DefaultVolumeThreshold is the minimum number of requests needed before a circuit can be tripped due to health
DefaultVolumeThreshold = 20
// DefaultSleepWindow is how long, in milliseconds, to wait after a circuit opens before testing for recovery
DefaultSleepWindow = 5000
// DefaultErrorPercentThreshold causes circuits to open once the rolling measure of errors exceeds this percent of requests
DefaultErrorPercentThreshold = 50
// DefaultLogger is the default logger that will be used in the Hystrix package. By default prints nothing.
DefaultLogger = NoopLogger{}
)
type Settings struct {
Timeout time.Duration
MaxConcurrentRequests int
RequestVolumeThreshold uint64
SleepWindow time.Duration
ErrorPercentThreshold int
}
// CommandConfig is used to tune circuit settings at runtime
type CommandConfig struct {
Timeout int `json:"timeout"`
MaxConcurrentRequests int `json:"max_concurrent_requests"`
RequestVolumeThreshold int `json:"request_volume_threshold"`
SleepWindow int `json:"sleep_window"`
ErrorPercentThreshold int `json:"error_percent_threshold"`
}
var circuitSettings map[string]*Settings
var settingsMutex *sync.RWMutex
var log logger
func init() {
circuitSettings = make(map[string]*Settings)
settingsMutex = &sync.RWMutex{}
log = DefaultLogger
}
// Configure applies settings for a set of circuits
func Configure(cmds map[string]CommandConfig) {
for k, v := range cmds {
ConfigureCommand(k, v)
}
}
// ConfigureCommand applies settings for a circuit
func ConfigureCommand(name string, config CommandConfig) {
settingsMutex.Lock()
defer settingsMutex.Unlock()
timeout := DefaultTimeout
if config.Timeout != 0 {
timeout = config.Timeout
}
max := DefaultMaxConcurrent
if config.MaxConcurrentRequests != 0 {
max = config.MaxConcurrentRequests
}
volume := DefaultVolumeThreshold
if config.RequestVolumeThreshold != 0 {
volume = config.RequestVolumeThreshold
}
sleep := DefaultSleepWindow
if config.SleepWindow != 0 {
sleep = config.SleepWindow
}
errorPercent := DefaultErrorPercentThreshold
if config.ErrorPercentThreshold != 0 {
errorPercent = config.ErrorPercentThreshold
}
circuitSettings[name] = &Settings{
Timeout: time.Duration(timeout) * time.Millisecond,
MaxConcurrentRequests: max,
RequestVolumeThreshold: uint64(volume),
SleepWindow: time.Duration(sleep) * time.Millisecond,
ErrorPercentThreshold: errorPercent,
}
}
func getSettings(name string) *Settings {
settingsMutex.RLock()
s, exists := circuitSettings[name]
settingsMutex.RUnlock()
if !exists {
ConfigureCommand(name, CommandConfig{})
s = getSettings(name)
}
return s
}
func GetCircuitSettings() map[string]*Settings {
copy := make(map[string]*Settings)
settingsMutex.RLock()
for key, val := range circuitSettings {
copy[key] = val
}
settingsMutex.RUnlock()
return copy
}
// SetLogger configures the logger that will be used. This only applies to the hystrix package.
func SetLogger(l logger) {
log = l
}