Skip to content

Commit

Permalink
Protect against data race when ReverseProxy modifies response headers
Browse files Browse the repository at this point in the history
See issue: golang/go#65123

ReverseProxy configures Got1xxResponse trace hook. We configure
ReverseProxy with our ProxyRoundTripper. ProxyRoundTripper eventually
calls http.Transport.

http.Transport runs readLoop for each connection in a separate
goroutine. When RoundTrip is called readLoop will run Got1xxResponse
hook.

If there are no errors during request handling, RoundTrip waits for
readLoop to finish. If there is an error though RoundTrip exits early
and does not wait for readLoop to finish. This results in readLoop
goroutine running in parallel and we get a data race in our ErrorHandler
which modifies response headers at the same time as Got1xxResponse hook.

This error results in concurrent map writes and not panic, which is not
caught by panic handler making Gorouter fail and drop all connections.

This code can be removed once the ReverseProxy issue is fixed.
  • Loading branch information
mariash authored and dsabeti committed Jan 23, 2024
1 parent b2d621c commit 31a2bec
Show file tree
Hide file tree
Showing 2 changed files with 69 additions and 0 deletions.
19 changes: 19 additions & 0 deletions proxy/round_tripper/proxy_round_tripper.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,10 @@ import (
"io"
"io/ioutil"
"net/http"
"net/http/httptrace"
"net/textproto"
"net/url"
"sync"
"time"

"github.com/uber-go/zap"
Expand Down Expand Up @@ -99,6 +102,17 @@ func (rt *roundTripper) RoundTrip(originalRequest *http.Request) (*http.Response

request := originalRequest.Clone(originalRequest.Context())
request, trace := traceRequest(request)
responseWriterMu := &sync.Mutex{}
requestClientTrace := httptrace.ContextClientTrace(request.Context())
originalGot1xxResponse := requestClientTrace.Got1xxResponse
requestClientTrace.Got1xxResponse = func(code int, header textproto.MIMEHeader) error {
if originalGot1xxResponse == nil {
return nil
}
responseWriterMu.Lock()
defer responseWriterMu.Unlock()
return originalGot1xxResponse(code, header)
}

if request.Body != nil {
// Temporarily disable closing of the body while in the RoundTrip function, since
Expand Down Expand Up @@ -266,6 +280,11 @@ func (rt *roundTripper) RoundTrip(originalRequest *http.Request) (*http.Response
}

if err != nil {
// When roundtrip returns an error, transport readLoop might still be running.
// Protect access to response headers map which can be handled in Got1xxResponse hook in readLoop
// See an issue https://github.com/golang/go/issues/65123
responseWriterMu.Lock()
defer responseWriterMu.Unlock()
rt.errorHandler.HandleError(reqInfo.ProxyResponseWriter, err)
return nil, err
}
Expand Down
50 changes: 50 additions & 0 deletions proxy/round_tripper/proxy_round_tripper_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,8 @@ import (
"net"
"net/http"
"net/http/httptest"
"net/http/httptrace"
"net/textproto"
"net/url"
"strings"
"sync"
Expand Down Expand Up @@ -436,6 +438,54 @@ var _ = Describe("ProxyRoundTripper", func() {
})
})

Context("when backend writes 1xx response but fails eventually", func() {
var done chan struct{}
// This situation is causing data race in ReverseProxy
// See an issue https://github.com/golang/go/issues/65123
BeforeEach(func() {
done = make(chan struct{})
trace := &httptrace.ClientTrace{
Got1xxResponse: func(code int, header textproto.MIMEHeader) error {
for i := 0; i < 10000000; i++ {
resp.Header().Set("X-Something", "Hello")
}
return nil
},
}
req = req.WithContext(httptrace.WithClientTrace(req.Context(), trace))
transport.RoundTripStub = func(req *http.Request) (*http.Response, error) {
go func() {
defer close(done)
// emulating readLoop running after the RoundTrip and modifying response headers
trace := httptrace.ContextClientTrace(req.Context())
if trace != nil && trace.Got1xxResponse != nil {
trace.Got1xxResponse(http.StatusContinue, textproto.MIMEHeader{})
}
}()
return nil, errors.New("failed-roundtrip")
}
})

JustBeforeEach(func() {
realErrorHandler := &round_tripper.ErrorHandler{MetricReporter: combinedReporter}
proxyRoundTripper = round_tripper.NewProxyRoundTripper(
roundTripperFactory,
retriableClassifier,
logger,
combinedReporter,
realErrorHandler,
routeServicesTransport,
cfg,
)
})

It("handles error without data race", func() {
_, err := proxyRoundTripper.RoundTrip(req)
Expect(err).To(HaveOccurred())
Eventually(done).Should(BeClosed())
})
})

DescribeTable("when the backend fails with an empty response error (io.EOF)",
func(reqBody io.ReadCloser, getBodyIsNil bool, reqMethod string, headers map[string]string, classify fails.ClassifierFunc, expectRetry bool) {
badResponse := &http.Response{
Expand Down

0 comments on commit 31a2bec

Please sign in to comment.