Skip to content

Commit

Permalink
[conn_pool] fix use after free in H/1 connection pool (#14220)
Browse files Browse the repository at this point in the history
Fixes use after free when dispatcher tries to run conn_pool->onUpstreamReady() after the connection pool was destroyed. Reverts back to a schedulable callback per https://github.com/envoyproxy/envoy/pull/13867/files

Signed-off-by: Asra Ali <[email protected]>
  • Loading branch information
asraa authored Dec 8, 2020
1 parent fa80297 commit 657f3c1
Show file tree
Hide file tree
Showing 9 changed files with 140 additions and 22 deletions.
7 changes: 6 additions & 1 deletion source/common/conn_pool/conn_pool_base.cc
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,8 @@ ConnPoolImplBase::ConnPoolImplBase(
const Network::TransportSocketOptionsSharedPtr& transport_socket_options,
Upstream::ClusterConnectivityState& state)
: state_(state), host_(host), priority_(priority), dispatcher_(dispatcher),
socket_options_(options), transport_socket_options_(transport_socket_options) {}
socket_options_(options), transport_socket_options_(transport_socket_options),
upstream_ready_cb_(dispatcher_.createSchedulableCallback([this]() { onUpstreamReady(); })) {}

ConnPoolImplBase::~ConnPoolImplBase() {
ASSERT(ready_clients_.empty());
Expand Down Expand Up @@ -214,6 +215,10 @@ bool ConnPoolImplBase::maybePrefetch(float global_prefetch_ratio) {
return tryCreateNewConnection(global_prefetch_ratio);
}

void ConnPoolImplBase::scheduleOnUpstreamReady() {
upstream_ready_cb_->scheduleCallbackCurrentIteration();
}

void ConnPoolImplBase::onUpstreamReady() {
while (!pending_streams_.empty() && !ready_clients_.empty()) {
ActiveClientPtr& client = ready_clients_.front();
Expand Down
5 changes: 4 additions & 1 deletion source/common/conn_pool/conn_pool_base.h
Original file line number Diff line number Diff line change
Expand Up @@ -154,7 +154,7 @@ class ConnPoolImplBase : protected Logger::Loggable<Logger::Id::pool> {
Network::ConnectionEvent event);
// See if the drain process has started and/or completed.
void checkForDrained();
void onUpstreamReady();
void scheduleOnUpstreamReady();
ConnectionPool::Cancellable* newStream(AttachContext& context);
// Called if this pool is likely to be picked soon, to determine if it's worth
// prefetching a connection.
Expand Down Expand Up @@ -250,6 +250,9 @@ class ConnPoolImplBase : protected Logger::Loggable<Logger::Id::pool> {
// The number of streams that can be immediately dispatched
// if all CONNECTING connections become connected.
uint32_t connecting_stream_capacity_{0};

void onUpstreamReady();
Event::SchedulableCallbackPtr upstream_ready_cb_;
};

} // namespace ConnectionPool
Expand Down
2 changes: 1 addition & 1 deletion source/common/http/http1/conn_pool.cc
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ void ActiveClient::StreamWrapper::onDecodeComplete() {
parent_.codec_client_->close();
} else {
auto* pool = &parent_.parent();
pool->dispatcher().post([pool]() -> void { pool->onUpstreamReady(); });
pool->scheduleOnUpstreamReady();
parent_.stream_wrapper_.reset();

pool->checkForDrained();
Expand Down
2 changes: 1 addition & 1 deletion source/common/tcp/conn_pool.cc
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ ActiveTcpClient::~ActiveTcpClient() {
void ActiveTcpClient::clearCallbacks() {
if (state_ == Envoy::ConnectionPool::ActiveClient::State::BUSY && parent_.hasPendingStreams()) {
auto* pool = &parent_;
pool->dispatcher().post([pool]() -> void { pool->onUpstreamReady(); });
pool->scheduleOnUpstreamReady();
}
callbacks_ = nullptr;
tcp_connection_data_ = nullptr;
Expand Down
4 changes: 3 additions & 1 deletion test/common/conn_pool/conn_pool_base_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,8 @@ class TestConnPoolImplBase : public ConnPoolImplBase {
class ConnPoolImplBaseTest : public testing::Test {
public:
ConnPoolImplBaseTest()
: pool_(host_, Upstream::ResourcePriority::Default, dispatcher_, nullptr, nullptr, state_) {
: upstream_ready_cb_(new NiceMock<Event::MockSchedulableCallback>(&dispatcher_)),
pool_(host_, Upstream::ResourcePriority::Default, dispatcher_, nullptr, nullptr, state_) {
// Default connections to 1024 because the tests shouldn't be relying on the
// connection resource limit for most tests.
cluster_->resetResourceManager(1024, 1024, 1024, 1, 1);
Expand Down Expand Up @@ -80,6 +81,7 @@ class ConnPoolImplBaseTest : public testing::Test {
new NiceMock<Upstream::MockHostDescription>()};
std::shared_ptr<Upstream::MockClusterInfo> cluster_{new NiceMock<Upstream::MockClusterInfo>()};
NiceMock<Event::MockDispatcher> dispatcher_;
NiceMock<Event::MockSchedulableCallback>* upstream_ready_cb_;
Upstream::HostSharedPtr host_{
Upstream::makeTestHost(cluster_, "tcp://127.0.0.1:80", dispatcher_.timeSource())};
TestConnPoolImplBase pool_;
Expand Down
120 changes: 113 additions & 7 deletions test/common/http/http1/conn_pool_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,8 @@ class ConnPoolImplForTest : public Event::TestUsingSimulatedTime, public FixedHt
public:
ConnPoolImplForTest(Event::MockDispatcher& dispatcher,
Upstream::ClusterInfoConstSharedPtr cluster,
Random::RandomGenerator& random_generator)
Random::RandomGenerator& random_generator,
Event::MockSchedulableCallback* upstream_ready_cb)
: FixedHttpConnPoolImpl(
Upstream::makeTestHost(cluster, "tcp://127.0.0.1:9000", dispatcher.timeSource()),
Upstream::ResourcePriority::Default, dispatcher, nullptr, nullptr, random_generator,
Expand All @@ -62,7 +63,8 @@ class ConnPoolImplForTest : public Event::TestUsingSimulatedTime, public FixedHt
return nullptr; // Not used: createCodecClient overloaded.
},
std::vector<Protocol>{Protocol::Http11}),
api_(Api::createApiForTest()), mock_dispatcher_(dispatcher) {}
api_(Api::createApiForTest()), mock_dispatcher_(dispatcher),
mock_upstream_ready_cb_(upstream_ready_cb) {}

~ConnPoolImplForTest() override {
EXPECT_EQ(0U, ready_clients_.size());
Expand Down Expand Up @@ -118,15 +120,17 @@ class ConnPoolImplForTest : public Event::TestUsingSimulatedTime, public FixedHt
}

void expectEnableUpstreamReady() {
EXPECT_CALL(mock_dispatcher_, post(_)).WillOnce(SaveArg<0>(&post_cb_));
EXPECT_CALL(*mock_upstream_ready_cb_, scheduleCallbackCurrentIteration())
.Times(1)
.RetiresOnSaturation();
}

void expectAndRunUpstreamReady() { post_cb_(); }
void expectAndRunUpstreamReady() { mock_upstream_ready_cb_->invokeCallback(); }

Upstream::ClusterConnectivityState state_;
Api::ApiPtr api_;
Event::MockDispatcher& mock_dispatcher_;
Event::PostCb post_cb_;
Event::MockSchedulableCallback* mock_upstream_ready_cb_;
std::vector<TestCodecClient> test_clients_;
};

Expand All @@ -136,7 +140,9 @@ class ConnPoolImplForTest : public Event::TestUsingSimulatedTime, public FixedHt
class Http1ConnPoolImplTest : public testing::Test {
public:
Http1ConnPoolImplTest()
: conn_pool_(std::make_unique<ConnPoolImplForTest>(dispatcher_, cluster_, random_)) {}
: upstream_ready_cb_(new Event::MockSchedulableCallback(&dispatcher_)),
conn_pool_(std::make_unique<ConnPoolImplForTest>(dispatcher_, cluster_, random_,
upstream_ready_cb_)) {}

~Http1ConnPoolImplTest() override {
EXPECT_EQ("", TestUtility::nonZeroedGauges(cluster_->stats_store_.gauges()));
Expand All @@ -145,6 +151,7 @@ class Http1ConnPoolImplTest : public testing::Test {
NiceMock<Random::MockRandomGenerator> random_;
NiceMock<Event::MockDispatcher> dispatcher_;
std::shared_ptr<Upstream::MockClusterInfo> cluster_{new NiceMock<Upstream::MockClusterInfo>()};
Event::MockSchedulableCallback* upstream_ready_cb_;
std::unique_ptr<ConnPoolImplForTest> conn_pool_;
NiceMock<Runtime::MockLoader> runtime_;
};
Expand Down Expand Up @@ -243,14 +250,17 @@ TEST_F(Http1ConnPoolImplTest, DrainConnections) {
ActiveTestRequest r2(*this, 1, ActiveTestRequest::Type::CreateConnection);
r2.startRequest();

conn_pool_->expectEnableUpstreamReady();
r1.completeResponse(false);

// This will destroy the ready client and set requests remaining to 1 on the busy client.
conn_pool_->drainConnections();
EXPECT_CALL(*conn_pool_, onClientDestroy());
dispatcher_.clearDeferredDeleteList();
conn_pool_->expectAndRunUpstreamReady();

// This will destroy the busy client when the response finishes.
conn_pool_->expectEnableUpstreamReady();
r2.completeResponse(false);
EXPECT_CALL(*conn_pool_, onClientDestroy());
dispatcher_.clearDeferredDeleteList();
Expand All @@ -267,9 +277,11 @@ TEST_F(Http1ConnPoolImplTest, VerifyTimingStats) {

ActiveTestRequest r1(*this, 0, ActiveTestRequest::Type::CreateConnection);
r1.startRequest();
conn_pool_->expectEnableUpstreamReady();
r1.completeResponse(false);

EXPECT_CALL(*conn_pool_, onClientDestroy());
conn_pool_->expectAndRunUpstreamReady();
conn_pool_->test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose);
dispatcher_.clearDeferredDeleteList();
}
Expand All @@ -293,7 +305,10 @@ TEST_F(Http1ConnPoolImplTest, VerifyAlpnFallback) {

// Recreate the conn pool so that the host re-evaluates the transport socket match, arriving at
// our test transport socket factory.
conn_pool_ = std::make_unique<ConnPoolImplForTest>(dispatcher_, cluster_, random_);
// Recreate this to refresh expectation that the callback is scheduled and saved.
new Event::MockSchedulableCallback(&dispatcher_);
conn_pool_ =
std::make_unique<ConnPoolImplForTest>(dispatcher_, cluster_, random_, upstream_ready_cb_);
NiceMock<MockResponseDecoder> outer_decoder;
ConnPoolCallbacks callbacks;
conn_pool_->expectClientCreate(Protocol::Http11);
Expand Down Expand Up @@ -366,15 +381,18 @@ TEST_F(Http1ConnPoolImplTest, MultipleRequestAndResponse) {
// Request 1 should kick off a new connection.
ActiveTestRequest r1(*this, 0, ActiveTestRequest::Type::CreateConnection);
r1.startRequest();
conn_pool_->expectEnableUpstreamReady();
r1.completeResponse(false);

// Request 2 should not.
ActiveTestRequest r2(*this, 0, ActiveTestRequest::Type::Immediate);
r2.startRequest();
conn_pool_->expectEnableUpstreamReady();
r2.completeResponse(true);

// Cause the connection to go away.
EXPECT_CALL(*conn_pool_, onClientDestroy());
conn_pool_->expectAndRunUpstreamReady();
conn_pool_->test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose);
dispatcher_.clearDeferredDeleteList();
}
Expand Down Expand Up @@ -942,6 +960,7 @@ TEST_F(Http1ConnPoolImplTest, MaxRequestsPerConnection) {
EXPECT_CALL(callbacks.pool_ready_, ready());

conn_pool_->test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected);
conn_pool_->expectEnableUpstreamReady();
EXPECT_TRUE(
callbacks.outer_encoder_
->encodeHeaders(TestRequestHeaderMapImpl{{":path", "/"}, {":method", "GET"}}, true)
Expand Down Expand Up @@ -1005,8 +1024,10 @@ TEST_F(Http1ConnPoolImplTest, DrainCallback) {
r2.handle_->cancel(Envoy::ConnectionPool::CancelPolicy::Default);
EXPECT_EQ(1U, cluster_->stats_.upstream_rq_total_.value());

conn_pool_->expectEnableUpstreamReady();
EXPECT_CALL(drained, ready());
r1.startRequest();

r1.completeResponse(false);

EXPECT_CALL(*conn_pool_, onClientDestroy());
Expand Down Expand Up @@ -1090,22 +1111,26 @@ TEST_F(Http1ConnPoolImplTest, ActiveRequestHasActiveConnectionsTrue) {
EXPECT_TRUE(conn_pool_->hasActiveConnections());

// cleanup
conn_pool_->expectEnableUpstreamReady();
r1.completeResponse(false);
conn_pool_->drainConnections();
EXPECT_CALL(*conn_pool_, onClientDestroy());
dispatcher_.clearDeferredDeleteList();
conn_pool_->expectAndRunUpstreamReady();
}

TEST_F(Http1ConnPoolImplTest, ResponseCompletedConnectionReadyNoActiveConnections) {
ActiveTestRequest r1(*this, 0, ActiveTestRequest::Type::CreateConnection);
r1.startRequest();
conn_pool_->expectEnableUpstreamReady();
r1.completeResponse(false);

EXPECT_FALSE(conn_pool_->hasActiveConnections());

conn_pool_->drainConnections();
EXPECT_CALL(*conn_pool_, onClientDestroy());
dispatcher_.clearDeferredDeleteList();
conn_pool_->expectAndRunUpstreamReady();
}

TEST_F(Http1ConnPoolImplTest, PendingRequestIsConsideredActive) {
Expand All @@ -1125,6 +1150,87 @@ TEST_F(Http1ConnPoolImplTest, PendingRequestIsConsideredActive) {
EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_local_.value());
}

// Schedulable callback that can track it's destruction.
class MockDestructSchedulableCallback : public Event::MockSchedulableCallback {
public:
MockDestructSchedulableCallback(Event::MockDispatcher* dispatcher)
: Event::MockSchedulableCallback(dispatcher) {}
MOCK_METHOD0(Die, void());

~MockDestructSchedulableCallback() override { Die(); }
};

class Http1ConnPoolDestructImplTest : public testing::Test {
public:
Http1ConnPoolDestructImplTest()
: upstream_ready_cb_(new MockDestructSchedulableCallback(&dispatcher_)),
conn_pool_(std::make_unique<ConnPoolImplForTest>(dispatcher_, cluster_, random_,
upstream_ready_cb_)) {}

~Http1ConnPoolDestructImplTest() override {
EXPECT_EQ("", TestUtility::nonZeroedGauges(cluster_->stats_store_.gauges()));
}

NiceMock<Random::MockRandomGenerator> random_;
NiceMock<Event::MockDispatcher> dispatcher_;
std::shared_ptr<Upstream::MockClusterInfo> cluster_{new NiceMock<Upstream::MockClusterInfo>()};
MockDestructSchedulableCallback* upstream_ready_cb_;
std::unique_ptr<ConnPoolImplForTest> conn_pool_;
NiceMock<Runtime::MockLoader> runtime_;
};

// Regression test for use after free when dispatcher executes onUpstreamReady after connection pool
// is destroyed.
TEST_F(Http1ConnPoolDestructImplTest, CbAfterConnPoolDestroyed) {
InSequence s;

NiceMock<MockResponseDecoder> outer_decoder;
ConnPoolCallbacks callbacks;
conn_pool_->expectClientCreate();
Http::ConnectionPool::Cancellable* handle = conn_pool_->newStream(outer_decoder, callbacks);
EXPECT_NE(nullptr, handle);

NiceMock<MockRequestEncoder> request_encoder;
ResponseDecoder* inner_decoder;
EXPECT_CALL(*conn_pool_->test_clients_[0].codec_, newStream(_))
.WillOnce(DoAll(SaveArgAddress(&inner_decoder), ReturnRef(request_encoder)));
EXPECT_CALL(callbacks.pool_ready_, ready());
EXPECT_CALL(*conn_pool_->test_clients_[0].connect_timer_, disableTimer());
conn_pool_->test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected);

EXPECT_TRUE(
callbacks.outer_encoder_
->encodeHeaders(TestRequestHeaderMapImpl{{":path", "/"}, {":method", "GET"}}, true)
.ok());

conn_pool_->expectEnableUpstreamReady();
// Schedules the onUpstreamReady callback.
inner_decoder->decodeHeaders(
ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}}}, true);

// Delete the connection pool.
EXPECT_CALL(*conn_pool_, onClientDestroy());
conn_pool_->destructAllConnections();

// Delete connection pool and check that scheduled callback onUpstreamReady was destroyed.
dispatcher_.deferredDelete(std::move(conn_pool_));
EXPECT_CALL(*upstream_ready_cb_, Die());

// When the dispatcher removes the connection pool, another call to clearDeferredDeleteList()
// occurs in ~HttpConnPoolImplBase. Avoid recursion.
bool deferring_delete = false;
ON_CALL(dispatcher_, clearDeferredDeleteList())
.WillByDefault(Invoke([this, &deferring_delete]() -> void {
if (deferring_delete) {
return;
}
deferring_delete = true;
dispatcher_.to_delete_.clear();
deferring_delete = false;
}));
dispatcher_.clearDeferredDeleteList();
}

} // namespace
} // namespace Http1
} // namespace Http
Expand Down
3 changes: 3 additions & 0 deletions test/common/http/http2/conn_pool_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -74,6 +74,7 @@ class Http2ConnPoolImplTest : public Event::TestUsingSimulatedTime, public testi

Http2ConnPoolImplTest()
: api_(Api::createApiForTest(stats_store_)),
upstream_ready_cb_(new NiceMock<Event::MockSchedulableCallback>(&dispatcher_)),
pool_(std::make_unique<TestConnPoolImpl>(dispatcher_, random_, host_,
Upstream::ResourcePriority::Default, nullptr,
nullptr, state_)) {
Expand Down Expand Up @@ -212,6 +213,7 @@ class Http2ConnPoolImplTest : public Event::TestUsingSimulatedTime, public testi
NiceMock<Event::MockDispatcher> dispatcher_;
std::shared_ptr<Upstream::MockClusterInfo> cluster_{new NiceMock<Upstream::MockClusterInfo>()};
Upstream::HostSharedPtr host_{Upstream::makeTestHost(cluster_, "tcp://127.0.0.1:80", simTime())};
NiceMock<Event::MockSchedulableCallback>* upstream_ready_cb_;
std::unique_ptr<TestConnPoolImpl> pool_;
std::vector<TestCodecClient> test_clients_;
NiceMock<Runtime::MockLoader> runtime_;
Expand Down Expand Up @@ -337,6 +339,7 @@ TEST_F(Http2ConnPoolImplTest, VerifyAlpnFallback) {
// Recreate the conn pool so that the host re-evaluates the transport socket match, arriving at
// our test transport socket factory.
host_ = Upstream::makeTestHost(cluster_, "tcp://127.0.0.1:80", simTime());
new NiceMock<Event::MockSchedulableCallback>(&dispatcher_);
pool_ = std::make_unique<TestConnPoolImpl>(
dispatcher_, random_, host_, Upstream::ResourcePriority::Default, nullptr, nullptr, state_);

Expand Down
4 changes: 3 additions & 1 deletion test/common/http/mixed_conn_pool_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,8 @@ class ConnPoolImplForTest : public Event::TestUsingSimulatedTime, public HttpCon
class MixedConnPoolImplTest : public testing::Test {
public:
MixedConnPoolImplTest()
: conn_pool_(std::make_unique<ConnPoolImplForTest>(dispatcher_, state_, random_, cluster_)) {}
: upstream_ready_cb_(new NiceMock<Event::MockSchedulableCallback>(&dispatcher_)),
conn_pool_(std::make_unique<ConnPoolImplForTest>(dispatcher_, state_, random_, cluster_)) {}

~MixedConnPoolImplTest() override {
EXPECT_EQ("", TestUtility::nonZeroedGauges(cluster_->stats_store_.gauges()));
Expand All @@ -48,6 +49,7 @@ class MixedConnPoolImplTest : public testing::Test {
Upstream::ClusterConnectivityState state_;
NiceMock<Event::MockDispatcher> dispatcher_;
std::shared_ptr<Upstream::MockClusterInfo> cluster_{new NiceMock<Upstream::MockClusterInfo>()};
NiceMock<Event::MockSchedulableCallback>* upstream_ready_cb_;
std::unique_ptr<ConnPoolImplForTest> conn_pool_;
NiceMock<Runtime::MockLoader> runtime_;
NiceMock<Random::MockRandomGenerator> random_;
Expand Down
Loading

0 comments on commit 657f3c1

Please sign in to comment.