-
Notifications
You must be signed in to change notification settings - Fork 257
/
client.go
847 lines (740 loc) · 26.7 KB
/
client.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0
// Package retryablehttp provides a familiar HTTP client interface with
// automatic retries and exponential backoff. It is a thin wrapper over the
// standard net/http client library and exposes nearly the same public API.
// This makes retryablehttp very easy to drop into existing programs.
//
// retryablehttp performs automatic retries under certain conditions. Mainly, if
// an error is returned by the client (connection errors etc), or if a 500-range
// response is received, then a retry is invoked. Otherwise, the response is
// returned and left to the caller to interpret.
//
// Requests which take a request body should provide a non-nil function
// parameter. The best choice is to provide either a function satisfying
// ReaderFunc which provides multiple io.Readers in an efficient manner, a
// *bytes.Buffer (the underlying raw byte slice will be used) or a raw byte
// slice. As it is a reference type, and we will wrap it as needed by readers,
// we can efficiently re-use the request body without needing to copy it. If an
// io.Reader (such as a *bytes.Reader) is provided, the full body will be read
// prior to the first request, and will be efficiently re-used for any retries.
// ReadSeeker can be used, but some users have observed occasional data races
// between the net/http library and the Seek functionality of some
// implementations of ReadSeeker, so should be avoided if possible.
package retryablehttp
import (
"bytes"
"context"
"crypto/x509"
"fmt"
"io"
"io/ioutil"
"log"
"math"
"math/rand"
"net/http"
"net/url"
"os"
"regexp"
"strconv"
"strings"
"sync"
"time"
cleanhttp "github.com/hashicorp/go-cleanhttp"
)
var (
// Default retry configuration
defaultRetryWaitMin = 1 * time.Second
defaultRetryWaitMax = 30 * time.Second
defaultRetryMax = 4
// defaultLogger is the logger provided with defaultClient
defaultLogger = log.New(os.Stderr, "", log.LstdFlags)
// defaultClient is used for performing requests without explicitly making
// a new client. It is purposely private to avoid modifications.
defaultClient = NewClient()
// We need to consume response bodies to maintain http connections, but
// limit the size we consume to respReadLimit.
respReadLimit = int64(4096)
// A regular expression to match the error returned by net/http when the
// configured number of redirects is exhausted. This error isn't typed
// specifically so we resort to matching on the error string.
redirectsErrorRe = regexp.MustCompile(`stopped after \d+ redirects\z`)
// A regular expression to match the error returned by net/http when the
// scheme specified in the URL is invalid. This error isn't typed
// specifically so we resort to matching on the error string.
schemeErrorRe = regexp.MustCompile(`unsupported protocol scheme`)
// A regular expression to match the error returned by net/http when the
// TLS certificate is not trusted. This error isn't typed
// specifically so we resort to matching on the error string.
notTrustedErrorRe = regexp.MustCompile(`certificate is not trusted`)
)
// ReaderFunc is the type of function that can be given natively to NewRequest
type ReaderFunc func() (io.Reader, error)
// ResponseHandlerFunc is a type of function that takes in a Response, and does something with it.
// The ResponseHandlerFunc is called when the HTTP client successfully receives a response and the
// CheckRetry function indicates that a retry of the base request is not necessary.
// If an error is returned from this function, the CheckRetry policy will be used to determine
// whether to retry the whole request (including this handler).
//
// Make sure to check status codes! Even if the request was completed it may have a non-2xx status code.
//
// The response body is not automatically closed. It must be closed either by the ResponseHandlerFunc or
// by the caller out-of-band. Failure to do so will result in a memory leak.
type ResponseHandlerFunc func(*http.Response) error
// LenReader is an interface implemented by many in-memory io.Reader's. Used
// for automatically sending the right Content-Length header when possible.
type LenReader interface {
Len() int
}
// Request wraps the metadata needed to create HTTP requests.
type Request struct {
// body is a seekable reader over the request body payload. This is
// used to rewind the request data in between retries.
body ReaderFunc
responseHandler ResponseHandlerFunc
// Embed an HTTP request directly. This makes a *Request act exactly
// like an *http.Request so that all meta methods are supported.
*http.Request
}
// WithContext returns wrapped Request with a shallow copy of underlying *http.Request
// with its context changed to ctx. The provided ctx must be non-nil.
func (r *Request) WithContext(ctx context.Context) *Request {
return &Request{
body: r.body,
responseHandler: r.responseHandler,
Request: r.Request.WithContext(ctx),
}
}
// SetResponseHandler allows setting the response handler.
func (r *Request) SetResponseHandler(fn ResponseHandlerFunc) {
r.responseHandler = fn
}
// BodyBytes allows accessing the request body. It is an analogue to
// http.Request's Body variable, but it returns a copy of the underlying data
// rather than consuming it.
//
// This function is not thread-safe; do not call it at the same time as another
// call, or at the same time this request is being used with Client.Do.
func (r *Request) BodyBytes() ([]byte, error) {
if r.body == nil {
return nil, nil
}
body, err := r.body()
if err != nil {
return nil, err
}
buf := new(bytes.Buffer)
_, err = buf.ReadFrom(body)
if err != nil {
return nil, err
}
return buf.Bytes(), nil
}
// SetBody allows setting the request body.
//
// It is useful if a new body needs to be set without constructing a new Request.
func (r *Request) SetBody(rawBody interface{}) error {
bodyReader, contentLength, err := getBodyReaderAndContentLength(rawBody)
if err != nil {
return err
}
r.body = bodyReader
r.ContentLength = contentLength
if bodyReader != nil {
r.GetBody = func() (io.ReadCloser, error) {
body, err := bodyReader()
if err != nil {
return nil, err
}
if rc, ok := body.(io.ReadCloser); ok {
return rc, nil
}
return io.NopCloser(body), nil
}
} else {
r.GetBody = func() (io.ReadCloser, error) { return http.NoBody, nil }
}
return nil
}
// WriteTo allows copying the request body into a writer.
//
// It writes data to w until there's no more data to write or
// when an error occurs. The return int64 value is the number of bytes
// written. Any error encountered during the write is also returned.
// The signature matches io.WriterTo interface.
func (r *Request) WriteTo(w io.Writer) (int64, error) {
body, err := r.body()
if err != nil {
return 0, err
}
if c, ok := body.(io.Closer); ok {
defer c.Close()
}
return io.Copy(w, body)
}
func getBodyReaderAndContentLength(rawBody interface{}) (ReaderFunc, int64, error) {
var bodyReader ReaderFunc
var contentLength int64
switch body := rawBody.(type) {
// If they gave us a function already, great! Use it.
case ReaderFunc:
bodyReader = body
tmp, err := body()
if err != nil {
return nil, 0, err
}
if lr, ok := tmp.(LenReader); ok {
contentLength = int64(lr.Len())
}
if c, ok := tmp.(io.Closer); ok {
c.Close()
}
case func() (io.Reader, error):
bodyReader = body
tmp, err := body()
if err != nil {
return nil, 0, err
}
if lr, ok := tmp.(LenReader); ok {
contentLength = int64(lr.Len())
}
if c, ok := tmp.(io.Closer); ok {
c.Close()
}
// If a regular byte slice, we can read it over and over via new
// readers
case []byte:
buf := body
bodyReader = func() (io.Reader, error) {
return bytes.NewReader(buf), nil
}
contentLength = int64(len(buf))
// If a bytes.Buffer we can read the underlying byte slice over and
// over
case *bytes.Buffer:
buf := body
bodyReader = func() (io.Reader, error) {
return bytes.NewReader(buf.Bytes()), nil
}
contentLength = int64(buf.Len())
// We prioritize *bytes.Reader here because we don't really want to
// deal with it seeking so want it to match here instead of the
// io.ReadSeeker case.
case *bytes.Reader:
buf, err := ioutil.ReadAll(body)
if err != nil {
return nil, 0, err
}
bodyReader = func() (io.Reader, error) {
return bytes.NewReader(buf), nil
}
contentLength = int64(len(buf))
// Compat case
case io.ReadSeeker:
raw := body
bodyReader = func() (io.Reader, error) {
_, err := raw.Seek(0, 0)
return ioutil.NopCloser(raw), err
}
if lr, ok := raw.(LenReader); ok {
contentLength = int64(lr.Len())
}
// Read all in so we can reset
case io.Reader:
buf, err := ioutil.ReadAll(body)
if err != nil {
return nil, 0, err
}
if len(buf) == 0 {
bodyReader = func() (io.Reader, error) {
return http.NoBody, nil
}
contentLength = 0
} else {
bodyReader = func() (io.Reader, error) {
return bytes.NewReader(buf), nil
}
contentLength = int64(len(buf))
}
// No body provided, nothing to do
case nil:
// Unrecognized type
default:
return nil, 0, fmt.Errorf("cannot handle type %T", rawBody)
}
return bodyReader, contentLength, nil
}
// FromRequest wraps an http.Request in a retryablehttp.Request
func FromRequest(r *http.Request) (*Request, error) {
bodyReader, _, err := getBodyReaderAndContentLength(r.Body)
if err != nil {
return nil, err
}
// Could assert contentLength == r.ContentLength
return &Request{body: bodyReader, Request: r}, nil
}
// NewRequest creates a new wrapped request.
func NewRequest(method, url string, rawBody interface{}) (*Request, error) {
return NewRequestWithContext(context.Background(), method, url, rawBody)
}
// NewRequestWithContext creates a new wrapped request with the provided context.
//
// The context controls the entire lifetime of a request and its response:
// obtaining a connection, sending the request, and reading the response headers and body.
func NewRequestWithContext(ctx context.Context, method, url string, rawBody interface{}) (*Request, error) {
httpReq, err := http.NewRequestWithContext(ctx, method, url, nil)
if err != nil {
return nil, err
}
req := &Request{
Request: httpReq,
}
if err := req.SetBody(rawBody); err != nil {
return nil, err
}
return req, nil
}
// Logger interface allows to use other loggers than
// standard log.Logger.
type Logger interface {
Printf(string, ...interface{})
}
// LeveledLogger is an interface that can be implemented by any logger or a
// logger wrapper to provide leveled logging. The methods accept a message
// string and a variadic number of key-value pairs. For log.Printf style
// formatting where message string contains a format specifier, use Logger
// interface.
type LeveledLogger interface {
Error(msg string, keysAndValues ...interface{})
Info(msg string, keysAndValues ...interface{})
Debug(msg string, keysAndValues ...interface{})
Warn(msg string, keysAndValues ...interface{})
}
// hookLogger adapts an LeveledLogger to Logger for use by the existing hook functions
// without changing the API.
type hookLogger struct {
LeveledLogger
}
func (h hookLogger) Printf(s string, args ...interface{}) {
h.Info(fmt.Sprintf(s, args...))
}
// RequestLogHook allows a function to run before each retry. The HTTP
// request which will be made, and the retry number (0 for the initial
// request) are available to users. The internal logger is exposed to
// consumers.
type RequestLogHook func(Logger, *http.Request, int)
// ResponseLogHook is like RequestLogHook, but allows running a function
// on each HTTP response. This function will be invoked at the end of
// every HTTP request executed, regardless of whether a subsequent retry
// needs to be performed or not. If the response body is read or closed
// from this method, this will affect the response returned from Do().
type ResponseLogHook func(Logger, *http.Response)
// CheckRetry specifies a policy for handling retries. It is called
// following each request with the response and error values returned by
// the http.Client. If CheckRetry returns false, the Client stops retrying
// and returns the response to the caller. If CheckRetry returns an error,
// that error value is returned in lieu of the error from the request. The
// Client will close any response body when retrying, but if the retry is
// aborted it is up to the CheckRetry callback to properly close any
// response body before returning.
type CheckRetry func(ctx context.Context, resp *http.Response, err error) (bool, error)
// Backoff specifies a policy for how long to wait between retries.
// It is called after a failing request to determine the amount of time
// that should pass before trying again.
type Backoff func(min, max time.Duration, attemptNum int, resp *http.Response) time.Duration
// ErrorHandler is called if retries are expired, containing the last status
// from the http library. If not specified, default behavior for the library is
// to close the body and return an error indicating how many tries were
// attempted. If overriding this, be sure to close the body if needed.
type ErrorHandler func(resp *http.Response, err error, numTries int) (*http.Response, error)
// Client is used to make HTTP requests. It adds additional functionality
// like automatic retries to tolerate minor outages.
type Client struct {
HTTPClient *http.Client // Internal HTTP client.
Logger interface{} // Customer logger instance. Can be either Logger or LeveledLogger
RetryWaitMin time.Duration // Minimum time to wait
RetryWaitMax time.Duration // Maximum time to wait
RetryMax int // Maximum number of retries
// RequestLogHook allows a user-supplied function to be called
// before each retry.
RequestLogHook RequestLogHook
// ResponseLogHook allows a user-supplied function to be called
// with the response from each HTTP request executed.
ResponseLogHook ResponseLogHook
// CheckRetry specifies the policy for handling retries, and is called
// after each request. The default policy is DefaultRetryPolicy.
CheckRetry CheckRetry
// Backoff specifies the policy for how long to wait between retries
Backoff Backoff
// ErrorHandler specifies the custom error handler to use, if any
ErrorHandler ErrorHandler
loggerInit sync.Once
clientInit sync.Once
}
// NewClient creates a new Client with default settings.
func NewClient() *Client {
return &Client{
HTTPClient: cleanhttp.DefaultPooledClient(),
Logger: defaultLogger,
RetryWaitMin: defaultRetryWaitMin,
RetryWaitMax: defaultRetryWaitMax,
RetryMax: defaultRetryMax,
CheckRetry: DefaultRetryPolicy,
Backoff: DefaultBackoff,
}
}
func (c *Client) logger() interface{} {
c.loggerInit.Do(func() {
if c.Logger == nil {
return
}
switch c.Logger.(type) {
case Logger, LeveledLogger:
// ok
default:
// This should happen in dev when they are setting Logger and work on code, not in prod.
panic(fmt.Sprintf("invalid logger type passed, must be Logger or LeveledLogger, was %T", c.Logger))
}
})
return c.Logger
}
// DefaultRetryPolicy provides a default callback for Client.CheckRetry, which
// will retry on connection errors and server errors.
func DefaultRetryPolicy(ctx context.Context, resp *http.Response, err error) (bool, error) {
// do not retry on context.Canceled or context.DeadlineExceeded
if ctx.Err() != nil {
return false, ctx.Err()
}
// don't propagate other errors
shouldRetry, _ := baseRetryPolicy(resp, err)
return shouldRetry, nil
}
// ErrorPropagatedRetryPolicy is the same as DefaultRetryPolicy, except it
// propagates errors back instead of returning nil. This allows you to inspect
// why it decided to retry or not.
func ErrorPropagatedRetryPolicy(ctx context.Context, resp *http.Response, err error) (bool, error) {
// do not retry on context.Canceled or context.DeadlineExceeded
if ctx.Err() != nil {
return false, ctx.Err()
}
return baseRetryPolicy(resp, err)
}
func baseRetryPolicy(resp *http.Response, err error) (bool, error) {
if err != nil {
if v, ok := err.(*url.Error); ok {
// Don't retry if the error was due to too many redirects.
if redirectsErrorRe.MatchString(v.Error()) {
return false, v
}
// Don't retry if the error was due to an invalid protocol scheme.
if schemeErrorRe.MatchString(v.Error()) {
return false, v
}
// Don't retry if the error was due to TLS cert verification failure.
if notTrustedErrorRe.MatchString(v.Error()) {
return false, v
}
if _, ok := v.Err.(x509.UnknownAuthorityError); ok {
return false, v
}
}
// The error is likely recoverable so retry.
return true, nil
}
// 429 Too Many Requests is recoverable. Sometimes the server puts
// a Retry-After response header to indicate when the server is
// available to start processing request from client.
if resp.StatusCode == http.StatusTooManyRequests {
return true, nil
}
// Check the response code. We retry on 500-range responses to allow
// the server time to recover, as 500's are typically not permanent
// errors and may relate to outages on the server side. This will catch
// invalid response codes as well, like 0 and 999.
if resp.StatusCode == 0 || (resp.StatusCode >= 500 && resp.StatusCode != http.StatusNotImplemented) {
return true, fmt.Errorf("unexpected HTTP status %s", resp.Status)
}
return false, nil
}
// DefaultBackoff provides a default callback for Client.Backoff which
// will perform exponential backoff based on the attempt number and limited
// by the provided minimum and maximum durations.
//
// It also tries to parse Retry-After response header when a http.StatusTooManyRequests
// (HTTP Code 429) is found in the resp parameter. Hence it will return the number of
// seconds the server states it may be ready to process more requests from this client.
func DefaultBackoff(min, max time.Duration, attemptNum int, resp *http.Response) time.Duration {
if resp != nil {
if resp.StatusCode == http.StatusTooManyRequests || resp.StatusCode == http.StatusServiceUnavailable {
if s, ok := resp.Header["Retry-After"]; ok {
if sleep, err := strconv.ParseInt(s[0], 10, 64); err == nil {
return time.Second * time.Duration(sleep)
}
}
}
}
mult := math.Pow(2, float64(attemptNum)) * float64(min)
sleep := time.Duration(mult)
if float64(sleep) != mult || sleep > max {
sleep = max
}
return sleep
}
// LinearJitterBackoff provides a callback for Client.Backoff which will
// perform linear backoff based on the attempt number and with jitter to
// prevent a thundering herd.
//
// min and max here are *not* absolute values. The number to be multiplied by
// the attempt number will be chosen at random from between them, thus they are
// bounding the jitter.
//
// For instance:
// * To get strictly linear backoff of one second increasing each retry, set
// both to one second (1s, 2s, 3s, 4s, ...)
// * To get a small amount of jitter centered around one second increasing each
// retry, set to around one second, such as a min of 800ms and max of 1200ms
// (892ms, 2102ms, 2945ms, 4312ms, ...)
// * To get extreme jitter, set to a very wide spread, such as a min of 100ms
// and a max of 20s (15382ms, 292ms, 51321ms, 35234ms, ...)
func LinearJitterBackoff(min, max time.Duration, attemptNum int, resp *http.Response) time.Duration {
// attemptNum always starts at zero but we want to start at 1 for multiplication
attemptNum++
if max <= min {
// Unclear what to do here, or they are the same, so return min *
// attemptNum
return min * time.Duration(attemptNum)
}
// Seed rand; doing this every time is fine
rand := rand.New(rand.NewSource(int64(time.Now().Nanosecond())))
// Pick a random number that lies somewhere between the min and max and
// multiply by the attemptNum. attemptNum starts at zero so we always
// increment here. We first get a random percentage, then apply that to the
// difference between min and max, and add to min.
jitter := rand.Float64() * float64(max-min)
jitterMin := int64(jitter) + int64(min)
return time.Duration(jitterMin * int64(attemptNum))
}
// PassthroughErrorHandler is an ErrorHandler that directly passes through the
// values from the net/http library for the final request. The body is not
// closed.
func PassthroughErrorHandler(resp *http.Response, err error, _ int) (*http.Response, error) {
return resp, err
}
// Do wraps calling an HTTP method with retries.
func (c *Client) Do(req *Request) (*http.Response, error) {
c.clientInit.Do(func() {
if c.HTTPClient == nil {
c.HTTPClient = cleanhttp.DefaultPooledClient()
}
})
logger := c.logger()
if logger != nil {
switch v := logger.(type) {
case LeveledLogger:
v.Debug("performing request", "method", req.Method, "url", req.URL)
case Logger:
v.Printf("[DEBUG] %s %s", req.Method, req.URL)
}
}
var resp *http.Response
var attempt int
var shouldRetry bool
var doErr, respErr, checkErr error
for i := 0; ; i++ {
doErr, respErr = nil, nil
attempt++
// Always rewind the request body when non-nil.
if req.body != nil {
body, err := req.body()
if err != nil {
c.HTTPClient.CloseIdleConnections()
return resp, err
}
if c, ok := body.(io.ReadCloser); ok {
req.Body = c
} else {
req.Body = ioutil.NopCloser(body)
}
}
if c.RequestLogHook != nil {
switch v := logger.(type) {
case LeveledLogger:
c.RequestLogHook(hookLogger{v}, req.Request, i)
case Logger:
c.RequestLogHook(v, req.Request, i)
default:
c.RequestLogHook(nil, req.Request, i)
}
}
// Attempt the request
resp, doErr = c.HTTPClient.Do(req.Request)
// Check if we should continue with retries.
shouldRetry, checkErr = c.CheckRetry(req.Context(), resp, doErr)
if !shouldRetry && doErr == nil && req.responseHandler != nil {
respErr = req.responseHandler(resp)
shouldRetry, checkErr = c.CheckRetry(req.Context(), resp, respErr)
}
err := doErr
if respErr != nil {
err = respErr
}
if err != nil {
switch v := logger.(type) {
case LeveledLogger:
v.Error("request failed", "error", err, "method", req.Method, "url", req.URL)
case Logger:
v.Printf("[ERR] %s %s request failed: %v", req.Method, req.URL, err)
}
} else {
// Call this here to maintain the behavior of logging all requests,
// even if CheckRetry signals to stop.
if c.ResponseLogHook != nil {
// Call the response logger function if provided.
switch v := logger.(type) {
case LeveledLogger:
c.ResponseLogHook(hookLogger{v}, resp)
case Logger:
c.ResponseLogHook(v, resp)
default:
c.ResponseLogHook(nil, resp)
}
}
}
if !shouldRetry {
break
}
// We do this before drainBody because there's no need for the I/O if
// we're breaking out
remain := c.RetryMax - i
if remain <= 0 {
break
}
// We're going to retry, consume any response to reuse the connection.
if doErr == nil {
c.drainBody(resp.Body)
}
wait := c.Backoff(c.RetryWaitMin, c.RetryWaitMax, i, resp)
if logger != nil {
desc := fmt.Sprintf("%s %s", req.Method, req.URL)
if resp != nil {
desc = fmt.Sprintf("%s (status: %d)", desc, resp.StatusCode)
}
switch v := logger.(type) {
case LeveledLogger:
v.Debug("retrying request", "request", desc, "timeout", wait, "remaining", remain)
case Logger:
v.Printf("[DEBUG] %s: retrying in %s (%d left)", desc, wait, remain)
}
}
timer := time.NewTimer(wait)
select {
case <-req.Context().Done():
timer.Stop()
c.HTTPClient.CloseIdleConnections()
return nil, req.Context().Err()
case <-timer.C:
}
// Make shallow copy of http Request so that we can modify its body
// without racing against the closeBody call in persistConn.writeLoop.
httpreq := *req.Request
req.Request = &httpreq
}
// this is the closest we have to success criteria
if doErr == nil && respErr == nil && checkErr == nil && !shouldRetry {
return resp, nil
}
defer c.HTTPClient.CloseIdleConnections()
var err error
if checkErr != nil {
err = checkErr
} else if respErr != nil {
err = respErr
} else {
err = doErr
}
if c.ErrorHandler != nil {
return c.ErrorHandler(resp, err, attempt)
}
// By default, we close the response body and return an error without
// returning the response
if resp != nil {
c.drainBody(resp.Body)
}
// this means CheckRetry thought the request was a failure, but didn't
// communicate why
if err == nil {
return nil, fmt.Errorf("%s %s giving up after %d attempt(s)",
req.Method, req.URL, attempt)
}
return nil, fmt.Errorf("%s %s giving up after %d attempt(s): %w",
req.Method, req.URL, attempt, err)
}
// Try to read the response body so we can reuse this connection.
func (c *Client) drainBody(body io.ReadCloser) {
defer body.Close()
_, err := io.Copy(ioutil.Discard, io.LimitReader(body, respReadLimit))
if err != nil {
if c.logger() != nil {
switch v := c.logger().(type) {
case LeveledLogger:
v.Error("error reading response body", "error", err)
case Logger:
v.Printf("[ERR] error reading response body: %v", err)
}
}
}
}
// Get is a shortcut for doing a GET request without making a new client.
func Get(url string) (*http.Response, error) {
return defaultClient.Get(url)
}
// Get is a convenience helper for doing simple GET requests.
func (c *Client) Get(url string) (*http.Response, error) {
req, err := NewRequest("GET", url, nil)
if err != nil {
return nil, err
}
return c.Do(req)
}
// Head is a shortcut for doing a HEAD request without making a new client.
func Head(url string) (*http.Response, error) {
return defaultClient.Head(url)
}
// Head is a convenience method for doing simple HEAD requests.
func (c *Client) Head(url string) (*http.Response, error) {
req, err := NewRequest("HEAD", url, nil)
if err != nil {
return nil, err
}
return c.Do(req)
}
// Post is a shortcut for doing a POST request without making a new client.
func Post(url, bodyType string, body interface{}) (*http.Response, error) {
return defaultClient.Post(url, bodyType, body)
}
// Post is a convenience method for doing simple POST requests.
func (c *Client) Post(url, bodyType string, body interface{}) (*http.Response, error) {
req, err := NewRequest("POST", url, body)
if err != nil {
return nil, err
}
req.Header.Set("Content-Type", bodyType)
return c.Do(req)
}
// PostForm is a shortcut to perform a POST with form data without creating
// a new client.
func PostForm(url string, data url.Values) (*http.Response, error) {
return defaultClient.PostForm(url, data)
}
// PostForm is a convenience method for doing simple POST operations using
// pre-filled url.Values form data.
func (c *Client) PostForm(url string, data url.Values) (*http.Response, error) {
return c.Post(url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode()))
}
// StandardClient returns a stdlib *http.Client with a custom Transport, which
// shims in a *retryablehttp.Client for added retries.
func (c *Client) StandardClient() *http.Client {
return &http.Client{
Transport: &RoundTripper{Client: c},
}
}