diff --git a/folly/experimental/AtomicReadMostlyMainPtr.h b/folly/experimental/AtomicReadMostlyMainPtr.h index 0b8b894346d..79f81b72da1 100644 --- a/folly/experimental/AtomicReadMostlyMainPtr.h +++ b/folly/experimental/AtomicReadMostlyMainPtr.h @@ -143,14 +143,16 @@ class AtomicReadMostlyMainPtr { DCHECK(newMain.get() == nullptr) << "Invariant should ensure that at most one version is non-null"; newMain.reset(std::move(ptr)); - // If order is acq_rel, it should degrade to just release, since this is a - // store rather than an RMW. (Of course, this is such a slow method that we - // don't really care, but precision is its own reward. If TSAN one day - // understands asymmetric barriers, this will also improve its error - // detection here). We get our "acquire-y-ness" from the mutex. + // If order is acq_rel, it should degrade to just release, and if acquire to + // relaxed, since this is a store rather than an RMW. (Of course, this is + // such a slow method that we don't really care, but precision is its own + // reward. If TSAN one day understands asymmetric barriers, this will also + // improve its error detection here). We get our "acquire-y-ness" from the + // mutex. auto realOrder = - (order == std::memory_order_acq_rel ? std::memory_order_release - : order); + (order == std::memory_order_acq_rel ? std::memory_order_release + : order == std::memory_order_acquire ? std::memory_order_relaxed + : order); // After this, read-side critical sections can access both versions, but // new ones will use newMain. // This is also synchronization point with loads. diff --git a/folly/experimental/coro/Retry.h b/folly/experimental/coro/Retry.h index fbf0991e5a8..8209d4e3b9c 100644 --- a/folly/experimental/coro/Retry.h +++ b/folly/experimental/coro/Retry.h @@ -161,16 +161,18 @@ class ExponentialBackoffWithJitter { decider_(static_cast(decider)) {} Task operator()(exception_wrapper&& ew) & { + using dist = std::normal_distribution; + if (retryCount_ == maxRetries_ || !decider_(ew)) { co_yield folly::coro::co_error(std::move(ew)); } ++retryCount_; - auto dist = std::normal_distribution(0.0, relativeJitterStdDev_); - // The jitter will be a value between [e^-stdev] - auto jitter = std::exp(dist(randomGen_)); + auto jitter = relativeJitterStdDev_ > 0 + ? std::exp(dist{0., relativeJitterStdDev_}(randomGen_)) + : 1.; auto backoffRep = jitter * minBackoff_.count() * std::pow(2, retryCount_ - 1u); diff --git a/folly/experimental/test/JemallocHugePageAllocatorTest.cpp b/folly/experimental/test/JemallocHugePageAllocatorTest.cpp index a7fa25e120e..2a109a72f4a 100644 --- a/folly/experimental/test/JemallocHugePageAllocatorTest.cpp +++ b/folly/experimental/test/JemallocHugePageAllocatorTest.cpp @@ -251,11 +251,11 @@ TEST(JemallocHugePageAllocatorTest, STLAllocator) { // This should work, just won't get huge pages since // init hasn't been called yet vec.reserve(100); - EXPECT_NE(nullptr, &vec[0]); + EXPECT_NE(nullptr, vec.data()); // Reserve & initialize, not on huge pages MyVec vec2(100); - EXPECT_NE(nullptr, &vec[0]); + EXPECT_NE(nullptr, vec.data()); // F14 maps need quite a lot of memory by default bool initialized = jha::init(4); @@ -265,7 +265,7 @@ TEST(JemallocHugePageAllocatorTest, STLAllocator) { // Reallocate, this time on huge pages vec.reserve(200); - EXPECT_NE(nullptr, &vec[0]); + EXPECT_NE(nullptr, vec.data()); MyMap map1; map1[0] = {1, 2, 3}; @@ -274,7 +274,7 @@ TEST(JemallocHugePageAllocatorTest, STLAllocator) { map2[0] = {1, 2, 3}; if (initialized) { - EXPECT_TRUE(jha::addressInArena(&vec[0])); + EXPECT_TRUE(jha::addressInArena(vec.data())); EXPECT_TRUE(jha::addressInArena(&map1[0])); EXPECT_TRUE(jha::addressInArena(&map1[0][0])); EXPECT_TRUE(jha::addressInArena(&map2[0])); diff --git a/folly/io/async/test/AsyncUDPSocketTest.cpp b/folly/io/async/test/AsyncUDPSocketTest.cpp index 5611dc7ec24..52db2f40805 100644 --- a/folly/io/async/test/AsyncUDPSocketTest.cpp +++ b/folly/io/async/test/AsyncUDPSocketTest.cpp @@ -368,24 +368,13 @@ class UDPNotifyClient : public UDPClient { } void onRecvMmsg(AsyncUDPSocket& sock) { - std::vector msgs; - msgs.reserve(numMsgs_); - memset(msgs.data(), 0, sizeof(struct mmsghdr) * numMsgs_); - const socklen_t addrLen = sizeof(struct sockaddr_storage); const size_t dataSize = 1024; - std::vector buf; - buf.reserve(numMsgs_ * dataSize); - memset(buf.data(), 0, numMsgs_ * dataSize); - - std::vector addrs; - addrs.reserve(numMsgs_); - memset(addrs.data(), 0, sizeof(struct sockaddr_storage) * numMsgs_); - - std::vector iovecs; - iovecs.reserve(numMsgs_); - memset(iovecs.data(), 0, sizeof(struct iovec) * numMsgs_); + std::vector buf(numMsgs_ * dataSize); + std::vector msgs(numMsgs_); + std::vector addrs(numMsgs_); + std::vector iovecs(numMsgs_); for (unsigned int i = 0; i < numMsgs_; ++i) { struct msghdr* msg = &msgs[i].msg_hdr; diff --git a/folly/json_pointer.cpp b/folly/json_pointer.cpp index 312ab48fbec..5e4e5c3060d 100644 --- a/folly/json_pointer.cpp +++ b/folly/json_pointer.cpp @@ -83,9 +83,10 @@ json_pointer::json_pointer(std::vector tokens) noexcept // private, static bool json_pointer::unescape(std::string& str) { - char const* end = &str[str.size()]; - char* out = &str.front(); - char const* decode = out; + char* out = &str[0]; + char const* begin = out; + char const* end = begin + str.size(); + char const* decode = begin; while (decode < end) { if (*decode != '~') { *out++ = *decode++; @@ -106,7 +107,7 @@ bool json_pointer::unescape(std::string& str) { } decode += 2; } - str.resize(out - &str.front()); + str.resize(out - begin); return true; }