diff --git a/CMake/AbseilDll.cmake b/CMake/AbseilDll.cmake index f0d984a..47f3bee 100644 --- a/CMake/AbseilDll.cmake +++ b/CMake/AbseilDll.cmake @@ -28,7 +28,6 @@ set(ABSL_INTERNAL_DLL_FILES "base/internal/low_level_scheduling.h" "base/internal/nullability_impl.h" "base/internal/per_thread_tls.h" - "base/internal/prefetch.h" "base/prefetch.h" "base/internal/pretty_function.h" "base/internal/raw_logging.cc" @@ -44,7 +43,6 @@ set(ABSL_INTERNAL_DLL_FILES "base/internal/spinlock_wait.h" "base/internal/sysinfo.cc" "base/internal/sysinfo.h" - "base/internal/thread_annotations.h" "base/internal/thread_identity.cc" "base/internal/thread_identity.h" "base/internal/throw_delegate.cc" @@ -57,6 +55,7 @@ set(ABSL_INTERNAL_DLL_FILES "base/log_severity.cc" "base/log_severity.h" "base/macros.h" + "base/no_destructor.h" "base/nullability.h" "base/optimization.h" "base/options.h" @@ -77,7 +76,6 @@ set(ABSL_INTERNAL_DLL_FILES "container/internal/common_policy_traits.h" "container/internal/compressed_tuple.h" "container/internal/container_memory.h" - "container/internal/counting_allocator.h" "container/internal/hash_function_defaults.h" "container/internal/hash_policy_traits.h" "container/internal/hashtable_debug.h" @@ -109,7 +107,7 @@ set(ABSL_INTERNAL_DLL_FILES "crc/internal/crc_x86_arm_combined.cc" "crc/internal/crc_memcpy_fallback.cc" "crc/internal/crc_memcpy.h" - "crc/internal/crc_memcpy_x86_64.cc" + "crc/internal/crc_memcpy_x86_arm_combined.cc" "crc/internal/crc_non_temporal_memcpy.cc" "crc/internal/crc_x86_arm_combined.cc" "crc/internal/non_temporal_arm_intrinsics.h" @@ -141,6 +139,7 @@ set(ABSL_INTERNAL_DLL_FILES "functional/function_ref.h" "functional/internal/any_invocable.h" "functional/internal/function_ref.h" + "functional/overload.h" "hash/hash.h" "hash/internal/city.h" "hash/internal/city.cc" @@ -151,6 +150,7 @@ set(ABSL_INTERNAL_DLL_FILES "hash/internal/low_level_hash.cc" "log/absl_check.h" "log/absl_log.h" + "log/absl_vlog_is_on.h" "log/check.h" "log/die_if_null.cc" "log/die_if_null.h" @@ -163,6 +163,8 @@ set(ABSL_INTERNAL_DLL_FILES "log/internal/conditions.cc" "log/internal/conditions.h" "log/internal/config.h" + "log/internal/fnmatch.h" + "log/internal/fnmatch.cc" "log/internal/globals.cc" "log/internal/globals.h" "log/internal/log_format.cc" @@ -179,6 +181,8 @@ set(ABSL_INTERNAL_DLL_FILES "log/internal/proto.cc" "log/internal/strip.h" "log/internal/structured.h" + "log/internal/vlog_config.cc" + "log/internal/vlog_config.h" "log/internal/voidify.h" "log/initialize.cc" "log/initialize.h" @@ -190,6 +194,7 @@ set(ABSL_INTERNAL_DLL_FILES "log/log_sink_registry.h" "log/log_streamer.h" "log/structured.h" + "log/vlog_is_on.h" "memory/memory.h" "meta/type_traits.h" "numeric/bits.h" @@ -250,6 +255,7 @@ set(ABSL_INTERNAL_DLL_FILES "random/uniform_real_distribution.h" "random/zipf_distribution.h" "status/internal/status_internal.h" + "status/internal/status_internal.cc" "status/internal/statusor_internal.h" "status/status.h" "status/status.cc" @@ -261,6 +267,7 @@ set(ABSL_INTERNAL_DLL_FILES "strings/ascii.h" "strings/charconv.cc" "strings/charconv.h" + "strings/charset.h" "strings/cord.cc" "strings/cord.h" "strings/cord_analysis.cc" @@ -287,9 +294,6 @@ set(ABSL_INTERNAL_DLL_FILES "strings/internal/cord_rep_consume.h" "strings/internal/cord_rep_consume.cc" "strings/internal/cord_rep_flat.h" - "strings/internal/cord_rep_ring.cc" - "strings/internal/cord_rep_ring.h" - "strings/internal/cord_rep_ring_reader.h" "strings/internal/cordz_functions.cc" "strings/internal/cordz_functions.h" "strings/internal/cordz_handle.cc" @@ -308,6 +312,8 @@ set(ABSL_INTERNAL_DLL_FILES "strings/internal/stringify_sink.h" "strings/internal/stringify_sink.cc" "strings/internal/has_absl_stringify.h" + "strings/has_absl_stringify.h" + "strings/has_ostream_operator.h" "strings/match.cc" "strings/match.h" "strings/numbers.cc" @@ -325,7 +331,6 @@ set(ABSL_INTERNAL_DLL_FILES "strings/strip.h" "strings/substitute.cc" "strings/substitute.h" - "strings/internal/char_map.h" "strings/internal/escaping.h" "strings/internal/escaping.cc" "strings/internal/memutil.cc" @@ -421,11 +426,6 @@ set(ABSL_INTERNAL_DLL_FILES "types/bad_variant_access.cc" "types/bad_variant_access.h" "types/compare.h" - "types/internal/conformance_aliases.h" - "types/internal/conformance_archetype.h" - "types/internal/conformance_profile.h" - "types/internal/parentheses.h" - "types/internal/transform_args.h" "types/internal/variant.h" "types/optional.h" "types/internal/optional.h" @@ -627,17 +627,32 @@ include(CheckCXXSourceCompiles) check_cxx_source_compiles( [==[ #ifdef _MSC_VER -# if _MSVC_LANG < 201700L +# if _MSVC_LANG < 201703L # error "The compiler defaults or is configured for C++ < 17" # endif -#elif __cplusplus < 201700L +#elif __cplusplus < 201703L # error "The compiler defaults or is configured for C++ < 17" #endif int main() { return 0; } ]==] ABSL_INTERNAL_AT_LEAST_CXX17) -if(ABSL_INTERNAL_AT_LEAST_CXX17) +check_cxx_source_compiles( + [==[ +#ifdef _MSC_VER +# if _MSVC_LANG < 202002L +# error "The compiler defaults or is configured for C++ < 20" +# endif +#elif __cplusplus < 202002L +# error "The compiler defaults or is configured for C++ < 20" +#endif +int main() { return 0; } +]==] + ABSL_INTERNAL_AT_LEAST_CXX20) + +if(ABSL_INTERNAL_AT_LEAST_CXX20) + set(ABSL_INTERNAL_CXX_STD_FEATURE cxx_std_20) +elseif(ABSL_INTERNAL_AT_LEAST_CXX17) set(ABSL_INTERNAL_CXX_STD_FEATURE cxx_std_17) else() set(ABSL_INTERNAL_CXX_STD_FEATURE cxx_std_14) @@ -807,8 +822,8 @@ Cflags: -I\${includedir}${PC_CFLAGS}\n") if(ABSL_PROPAGATE_CXX_STD) # Abseil libraries require C++14 as the current minimum standard. When - # compiled with C++17 (either because it is the compiler's default or - # explicitly requested), then Abseil requires C++17. + # compiled with a higher minimum (either because it is the compiler's + # default or explicitly requested), then Abseil requires that standard. target_compile_features(${_dll} PUBLIC ${ABSL_INTERNAL_CXX_STD_FEATURE}) endif() diff --git a/CMake/AbseilHelpers.cmake b/CMake/AbseilHelpers.cmake index 3bd33ce..c53b358 100644 --- a/CMake/AbseilHelpers.cmake +++ b/CMake/AbseilHelpers.cmake @@ -80,7 +80,7 @@ endif() # absl::fantastic_lib # ) # -# TODO: Implement "ALWAYSLINK" +# TODO(b/320467376): Implement "ALWAYSLINK". function(absl_cc_library) cmake_parse_arguments(ABSL_CC_LIB "DISABLE_INSTALL;PUBLIC;TESTONLY" @@ -287,8 +287,8 @@ Cflags: -I\${includedir}${PC_CFLAGS}\n") if(ABSL_PROPAGATE_CXX_STD) # Abseil libraries require C++14 as the current minimum standard. When - # compiled with C++17 (either because it is the compiler's default or - # explicitly requested), then Abseil requires C++17. + # compiled with a higher standard (either because it is the compiler's + # default or explicitly requested), then Abseil requires that standard. target_compile_features(${_NAME} PUBLIC ${ABSL_INTERNAL_CXX_STD_FEATURE}) endif() @@ -298,7 +298,7 @@ Cflags: -I\${includedir}${PC_CFLAGS}\n") if(ABSL_ENABLE_INSTALL) set_target_properties(${_NAME} PROPERTIES OUTPUT_NAME "absl_${_NAME}" - SOVERSION "2308.0.0" + SOVERSION "2401.0.0" ) endif() else() diff --git a/CMakeLists.txt b/CMakeLists.txt index eef6626..194f870 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -33,6 +33,11 @@ if (POLICY CMP0048) cmake_policy(SET CMP0048 NEW) endif (POLICY CMP0048) +# Honor the GTest_ROOT variable if specified +if (POLICY CMP0074) + cmake_policy(SET CMP0074 NEW) +endif (POLICY CMP0074) + # option() honor variables if (POLICY CMP0077) cmake_policy(SET CMP0077 NEW) @@ -53,7 +58,7 @@ if (POLICY CMP0141) cmake_policy(SET CMP0141 NEW) endif (POLICY CMP0141) -project(absl LANGUAGES CXX VERSION 20230802) +project(absl LANGUAGES CXX VERSION 20240116) include(CTest) # Output directory is correct by default for most build setups. However, when @@ -221,20 +226,41 @@ if(ABSL_ENABLE_INSTALL) PATTERN "testdata" EXCLUDE ) + # Rewrite options.h to use the compiled ABI. file(READ "absl/base/options.h" ABSL_INTERNAL_OPTIONS_H_CONTENTS) - if (ABSL_INTERNAL_AT_LEAST_CXX17) - string(REGEX REPLACE - "#define ABSL_OPTION_USE_STD_([^ ]*) 2" - "#define ABSL_OPTION_USE_STD_\\1 1" + + # Handle features that require at least C++20. + if (ABSL_INTERNAL_AT_LEAST_CXX20) + foreach(FEATURE "ORDERING") + string(REPLACE + "#define ABSL_OPTION_USE_STD_${FEATURE} 2" + "#define ABSL_OPTION_USE_STD_${FEATURE} 1" ABSL_INTERNAL_OPTIONS_H_PINNED "${ABSL_INTERNAL_OPTIONS_H_CONTENTS}") - else() - string(REGEX REPLACE - "#define ABSL_OPTION_USE_STD_([^ ]*) 2" - "#define ABSL_OPTION_USE_STD_\\1 0" + set(ABSL_INTERNAL_OPTIONS_H_CONTENTS "${ABSL_INTERNAL_OPTIONS_H_PINNED}") + endforeach() + endif() + + # Handle features that require at least C++17. + if (ABSL_INTERNAL_AT_LEAST_CXX17) + foreach(FEATURE "ANY" "OPTIONAL" "STRING_VIEW" "VARIANT") + string(REPLACE + "#define ABSL_OPTION_USE_STD_${FEATURE} 2" + "#define ABSL_OPTION_USE_STD_${FEATURE} 1" ABSL_INTERNAL_OPTIONS_H_PINNED "${ABSL_INTERNAL_OPTIONS_H_CONTENTS}") + set(ABSL_INTERNAL_OPTIONS_H_CONTENTS "${ABSL_INTERNAL_OPTIONS_H_PINNED}") + endforeach() endif() + + # Any feature that still has the value of 2 (because it was not handled above) + # should be set to 0. + string(REGEX REPLACE + "#define ABSL_OPTION_USE_STD_([^ ]*) 2" + "#define ABSL_OPTION_USE_STD_\\1 0" + ABSL_INTERNAL_OPTIONS_H_PINNED + "${ABSL_INTERNAL_OPTIONS_H_CONTENTS}") + file(WRITE "${CMAKE_BINARY_DIR}/options-pinned.h" "${ABSL_INTERNAL_OPTIONS_H_PINNED}") install(FILES "${CMAKE_BINARY_DIR}/options-pinned.h" diff --git a/MODULE.bazel b/MODULE.bazel new file mode 100644 index 0000000..efbc88b --- /dev/null +++ b/MODULE.bazel @@ -0,0 +1,39 @@ +# Copyright 2024 The Abseil Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# https://bazel.build/external/overview#bzlmod + +module( + name = "abseil-cpp", + version = "20240116.0", + compatibility_level = 1, +) + +# Only direct dependencies need to be listed below. +# Please keep the versions in sync with the versions in the WORKSPACE file. + +bazel_dep(name = "bazel_skylib", + version = "1.5.0") + +bazel_dep(name = "google_benchmark", + version = "1.8.3", + repo_name = "com_github_google_benchmark", + dev_dependency = True) + +bazel_dep(name = "googletest", + version = "1.14.0.bcr.1", + repo_name = "com_google_googletest") + +bazel_dep(name = "platforms", + version = "0.0.8") diff --git a/PrivacyInfo.xcprivacy b/PrivacyInfo.xcprivacy index 3ff4a9d..6af1641 100644 --- a/PrivacyInfo.xcprivacy +++ b/PrivacyInfo.xcprivacy @@ -2,13 +2,13 @@ - NSPrivacyTracking - - NSPrivacyCollectedDataTypes - - NSPrivacyTrackingDomains - - NSPrivacyAccessedAPITypes - + NSPrivacyTracking + + NSPrivacyCollectedDataTypes + + NSPrivacyTrackingDomains + + NSPrivacyAccessedAPITypes + diff --git a/README.md b/README.md index 075b85a..f834fcd 100644 --- a/README.md +++ b/README.md @@ -1,14 +1,3 @@ -# Swift Package Manager copy repository - -This repository is a copy of https://github.com/firebase/abseil-cpp with -the git metadata removed to provide one to two orders of magnitude increase in -speed for Swift Package Manager usage. - -This repo is not supported as a direct dependency for non-Google usage. - -There should be no changes to this repo other than updates from its mirror -and Swift Package Manager specific items. - # Abseil - C++ Common Libraries The repository contains the Abseil C++ library code. Abseil is an open-source diff --git a/WORKSPACE b/WORKSPACE index fdb615f..0d88609 100644 --- a/WORKSPACE +++ b/WORKSPACE @@ -20,41 +20,40 @@ load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") # GoogleTest/GoogleMock framework. Used by most unit-tests. http_archive( - name = "com_google_googletest", # 2023-08-02T16:45:10Z - sha256 = "1f357c27ca988c3f7c6b4bf68a9395005ac6761f034046e9dde0896e3aba00e4", - strip_prefix = "googletest-1.14.0", - # Keep this URL in sync with ABSL_GOOGLETEST_COMMIT in ci/cmake_common.sh. - urls = ["https://github.com/google/googletest/archive/refs/tags/v1.14.0.zip"], + name = "com_google_googletest", + sha256 = "8ad598c73ad796e0d8280b082cebd82a630d73e73cd3c70057938a6501bba5d7", + strip_prefix = "googletest-1.14.0", + # Keep this URL in sync with ABSL_GOOGLETEST_COMMIT in ci/cmake_common.sh and + # ci/windows_msvc_cmake.bat. + urls = ["https://github.com/google/googletest/archive/refs/tags/v1.14.0.tar.gz"], ) # RE2 (the regular expression library used by GoogleTest) http_archive( - name = "com_googlesource_code_re2", # 2023-03-17T11:36:51Z - sha256 = "cb8b5312a65f2598954545a76e8bce913f35fbb3a21a5c88797a4448e9f9b9d9", - strip_prefix = "re2-578843a516fd1da7084ae46209a75f3613b6065e", - urls = ["https://github.com/google/re2/archive/578843a516fd1da7084ae46209a75f3613b6065e.zip"], + name = "com_googlesource_code_re2", + sha256 = "828341ad08524618a626167bd320b0c2acc97bd1c28eff693a9ea33a7ed2a85f", + strip_prefix = "re2-2023-11-01", + urls = ["https://github.com/google/re2/releases/download/2023-11-01/re2-2023-11-01.zip"], ) # Google benchmark. http_archive( - name = "com_github_google_benchmark", # 2023-08-01T07:47:09Z - sha256 = "db1e39ee71dc38aa7e57ed007f2c8b3bb59e13656435974781a9dc0617d75cc9", - strip_prefix = "benchmark-02a354f3f323ae8256948e1dc77ddcb1dfc297da", - urls = ["https://github.com/google/benchmark/archive/02a354f3f323ae8256948e1dc77ddcb1dfc297da.zip"], + name = "com_github_google_benchmark", + sha256 = "6bc180a57d23d4d9515519f92b0c83d61b05b5bab188961f36ac7b06b0d9e9ce", + strip_prefix = "benchmark-1.8.3", + urls = ["https://github.com/google/benchmark/archive/refs/tags/v1.8.3.tar.gz"], ) # Bazel Skylib. http_archive( - name = "bazel_skylib", # 2023-05-31T19:24:07Z - sha256 = "08c0386f45821ce246bbbf77503c973246ed6ee5c3463e41efc197fa9bc3a7f4", - strip_prefix = "bazel-skylib-288731ef9f7f688932bd50e704a91a45ec185f9b", - urls = ["https://github.com/bazelbuild/bazel-skylib/archive/288731ef9f7f688932bd50e704a91a45ec185f9b.zip"], + name = "bazel_skylib", + sha256 = "cd55a062e763b9349921f0f5db8c3933288dc8ba4f76dd9416aac68acee3cb94", + urls = ["https://github.com/bazelbuild/bazel-skylib/releases/download/1.5.0/bazel-skylib-1.5.0.tar.gz"], ) # Bazel platform rules. http_archive( - name = "platforms", # 2023-07-28T19:44:27Z - sha256 = "40eb313613ff00a5c03eed20aba58890046f4d38dec7344f00bb9a8867853526", - strip_prefix = "platforms-4ad40ef271da8176d4fc0194d2089b8a76e19d7b", - urls = ["https://github.com/bazelbuild/platforms/archive/4ad40ef271da8176d4fc0194d2089b8a76e19d7b.zip"], + name = "platforms", + sha256 = "8150406605389ececb6da07cbcb509d5637a3ab9a24bc69b1101531367d89d74", + urls = ["https://github.com/bazelbuild/platforms/releases/download/0.0.8/platforms-0.0.8.tar.gz"], ) diff --git a/absl/BUILD.bazel b/absl/BUILD.bazel index 253c0ae..14c30b3 100644 --- a/absl/BUILD.bazel +++ b/absl/BUILD.bazel @@ -68,6 +68,17 @@ config_setting( visibility = [":__subpackages__"], ) +# x64_windows-clang-cl - used for selecting clang-cl for CI builds +platform( + name = "x64_windows-clang-cl", + constraint_values = [ + "@platforms//cpu:x86_64", + "@platforms//os:windows", + "@bazel_tools//tools/cpp:clang-cl", + ], + visibility = [":__subpackages__"], +) + config_setting( name = "osx", constraint_values = [ diff --git a/absl/abseil.podspec.gen.py b/absl/abseil.podspec.gen.py index 6375298..c83edbf 100755 --- a/absl/abseil.podspec.gen.py +++ b/absl/abseil.podspec.gen.py @@ -30,6 +30,9 @@ :git => 'https://github.com/abseil/abseil-cpp.git', :tag => '${tag}', } + s.resource_bundles = { + s.module_name => 'PrivacyInfo.xcprivacy', + } s.module_name = 'absl' s.header_mappings_dir = 'absl' s.header_dir = 'absl' diff --git a/absl/algorithm/BUILD.bazel b/absl/algorithm/BUILD.bazel index 3a9ab01..ddf9e11 100644 --- a/absl/algorithm/BUILD.bazel +++ b/absl/algorithm/BUILD.bazel @@ -21,7 +21,14 @@ load( "ABSL_TEST_COPTS", ) -package(default_visibility = ["//visibility:public"]) +package( + default_visibility = ["//visibility:public"], + features = [ + "header_modules", + "layering_check", + "parse_headers", + ], +) licenses(["notice"]) @@ -44,24 +51,11 @@ cc_test( deps = [ ":algorithm", "//absl/base:config", + "@com_google_googletest//:gtest", "@com_google_googletest//:gtest_main", ], ) -cc_binary( - name = "algorithm_benchmark", - testonly = 1, - srcs = ["equal_benchmark.cc"], - copts = ABSL_TEST_COPTS, - linkopts = ABSL_DEFAULT_LINKOPTS, - tags = ["benchmark"], - deps = [ - ":algorithm", - "//absl/base:core_headers", - "@com_github_google_benchmark//:benchmark_main", - ], -) - cc_library( name = "container", hdrs = [ @@ -72,6 +66,7 @@ cc_library( deps = [ ":algorithm", "//absl/base:core_headers", + "//absl/base:nullability", "//absl/meta:type_traits", ], ) @@ -87,6 +82,7 @@ cc_test( "//absl/base:core_headers", "//absl/memory", "//absl/types:span", + "@com_google_googletest//:gtest", "@com_google_googletest//:gtest_main", ], ) diff --git a/absl/algorithm/CMakeLists.txt b/absl/algorithm/CMakeLists.txt index 181b49c..5577164 100644 --- a/absl/algorithm/CMakeLists.txt +++ b/absl/algorithm/CMakeLists.txt @@ -50,6 +50,7 @@ absl_cc_library( absl::algorithm absl::core_headers absl::meta + absl::nullability PUBLIC ) diff --git a/absl/algorithm/algorithm.h b/absl/algorithm/algorithm.h index e9b4733..59aeed7 100644 --- a/absl/algorithm/algorithm.h +++ b/absl/algorithm/algorithm.h @@ -31,92 +31,17 @@ namespace absl { ABSL_NAMESPACE_BEGIN -namespace algorithm_internal { - -// Performs comparisons with operator==, similar to C++14's `std::equal_to<>`. -struct EqualTo { - template - bool operator()(const T& a, const U& b) const { - return a == b; - } -}; - -template -bool EqualImpl(InputIter1 first1, InputIter1 last1, InputIter2 first2, - InputIter2 last2, Pred pred, std::input_iterator_tag, - std::input_iterator_tag) { - while (true) { - if (first1 == last1) return first2 == last2; - if (first2 == last2) return false; - if (!pred(*first1, *first2)) return false; - ++first1; - ++first2; - } -} - -template -bool EqualImpl(InputIter1 first1, InputIter1 last1, InputIter2 first2, - InputIter2 last2, Pred&& pred, std::random_access_iterator_tag, - std::random_access_iterator_tag) { - return (last1 - first1 == last2 - first2) && - std::equal(first1, last1, first2, std::forward(pred)); -} - -// When we are using our own internal predicate that just applies operator==, we -// forward to the non-predicate form of std::equal. This enables an optimization -// in libstdc++ that can result in std::memcmp being used for integer types. -template -bool EqualImpl(InputIter1 first1, InputIter1 last1, InputIter2 first2, - InputIter2 last2, algorithm_internal::EqualTo /* unused */, - std::random_access_iterator_tag, - std::random_access_iterator_tag) { - return (last1 - first1 == last2 - first2) && - std::equal(first1, last1, first2); -} - -template -It RotateImpl(It first, It middle, It last, std::true_type) { - return std::rotate(first, middle, last); -} - -template -It RotateImpl(It first, It middle, It last, std::false_type) { - std::rotate(first, middle, last); - return std::next(first, std::distance(middle, last)); -} - -} // namespace algorithm_internal - // equal() +// rotate() // -// Compares the equality of two ranges specified by pairs of iterators, using -// the given predicate, returning true iff for each corresponding iterator i1 -// and i2 in the first and second range respectively, pred(*i1, *i2) == true -// -// This comparison takes at most min(`last1` - `first1`, `last2` - `first2`) -// invocations of the predicate. Additionally, if InputIter1 and InputIter2 are -// both random-access iterators, and `last1` - `first1` != `last2` - `first2`, -// then the predicate is never invoked and the function returns false. +// Historical note: Abseil once provided implementations of these algorithms +// prior to their adoption in C++14. New code should prefer to use the std +// variants. // -// This is a C++11-compatible implementation of C++14 `std::equal`. See -// https://en.cppreference.com/w/cpp/algorithm/equal for more information. -template -bool equal(InputIter1 first1, InputIter1 last1, InputIter2 first2, - InputIter2 last2, Pred&& pred) { - return algorithm_internal::EqualImpl( - first1, last1, first2, last2, std::forward(pred), - typename std::iterator_traits::iterator_category{}, - typename std::iterator_traits::iterator_category{}); -} - -// Overload of equal() that performs comparison of two ranges specified by pairs -// of iterators using operator==. -template -bool equal(InputIter1 first1, InputIter1 last1, InputIter2 first2, - InputIter2 last2) { - return absl::equal(first1, last1, first2, last2, - algorithm_internal::EqualTo{}); -} +// See the documentation for the STL header for more information: +// https://en.cppreference.com/w/cpp/header/algorithm +using std::equal; +using std::rotate; // linear_search() // @@ -133,26 +58,6 @@ bool linear_search(InputIterator first, InputIterator last, return std::find(first, last, value) != last; } -// rotate() -// -// Performs a left rotation on a range of elements (`first`, `last`) such that -// `middle` is now the first element. `rotate()` returns an iterator pointing to -// the first element before rotation. This function is exactly the same as -// `std::rotate`, but fixes a bug in gcc -// <= 4.9 where `std::rotate` returns `void` instead of an iterator. -// -// The complexity of this algorithm is the same as that of `std::rotate`, but if -// `ForwardIterator` is not a random-access iterator, then `absl::rotate` -// performs an additional pass over the range to construct the return value. -template -ForwardIterator rotate(ForwardIterator first, ForwardIterator middle, - ForwardIterator last) { - return algorithm_internal::RotateImpl( - first, middle, last, - std::is_same()); -} - ABSL_NAMESPACE_END } // namespace absl diff --git a/absl/algorithm/container.h b/absl/algorithm/container.h index 679e026..c7bafae 100644 --- a/absl/algorithm/container.h +++ b/absl/algorithm/container.h @@ -52,6 +52,7 @@ #include "absl/algorithm/algorithm.h" #include "absl/base/macros.h" +#include "absl/base/nullability.h" #include "absl/meta/type_traits.h" namespace absl { @@ -116,18 +117,6 @@ template struct IsUnorderedContainer> : std::true_type {}; -// container_algorithm_internal::c_size. It is meant for internal use only. - -template -auto c_size(C& c) -> decltype(c.size()) { - return c.size(); -} - -template -constexpr std::size_t c_size(T (&)[N]) { - return N; -} - } // namespace container_algorithm_internal // PUBLIC API @@ -348,20 +337,10 @@ container_algorithm_internal::ContainerDifferenceType c_count_if( template container_algorithm_internal::ContainerIterPairType c_mismatch(C1& c1, C2& c2) { - auto first1 = container_algorithm_internal::c_begin(c1); - auto last1 = container_algorithm_internal::c_end(c1); - auto first2 = container_algorithm_internal::c_begin(c2); - auto last2 = container_algorithm_internal::c_end(c2); - - for (; first1 != last1 && first2 != last2; ++first1, (void)++first2) { - // Negates equality because Cpp17EqualityComparable doesn't require clients - // to overload both `operator==` and `operator!=`. - if (!(*first1 == *first2)) { - break; - } - } - - return std::make_pair(first1, first2); + return std::mismatch(container_algorithm_internal::c_begin(c1), + container_algorithm_internal::c_end(c1), + container_algorithm_internal::c_begin(c2), + container_algorithm_internal::c_end(c2)); } // Overload of c_mismatch() for using a predicate evaluation other than `==` as @@ -370,56 +349,33 @@ container_algorithm_internal::ContainerIterPairType c_mismatch(C1& c1, template container_algorithm_internal::ContainerIterPairType c_mismatch( C1& c1, C2& c2, BinaryPredicate pred) { - auto first1 = container_algorithm_internal::c_begin(c1); - auto last1 = container_algorithm_internal::c_end(c1); - auto first2 = container_algorithm_internal::c_begin(c2); - auto last2 = container_algorithm_internal::c_end(c2); - - for (; first1 != last1 && first2 != last2; ++first1, (void)++first2) { - if (!pred(*first1, *first2)) { - break; - } - } - - return std::make_pair(first1, first2); + return std::mismatch(container_algorithm_internal::c_begin(c1), + container_algorithm_internal::c_end(c1), + container_algorithm_internal::c_begin(c2), + container_algorithm_internal::c_end(c2), pred); } // c_equal() // // Container-based version of the `std::equal()` function to // test whether two containers are equal. -// -// NOTE: the semantics of c_equal() are slightly different than those of -// equal(): while the latter iterates over the second container only up to the -// size of the first container, c_equal() also checks whether the container -// sizes are equal. This better matches expectations about c_equal() based on -// its signature. -// -// Example: -// vector v1 = <1, 2, 3>; -// vector v2 = <1, 2, 3, 4>; -// equal(std::begin(v1), std::end(v1), std::begin(v2)) returns true -// c_equal(v1, v2) returns false - template bool c_equal(const C1& c1, const C2& c2) { - return ((container_algorithm_internal::c_size(c1) == - container_algorithm_internal::c_size(c2)) && - std::equal(container_algorithm_internal::c_begin(c1), - container_algorithm_internal::c_end(c1), - container_algorithm_internal::c_begin(c2))); + return std::equal(container_algorithm_internal::c_begin(c1), + container_algorithm_internal::c_end(c1), + container_algorithm_internal::c_begin(c2), + container_algorithm_internal::c_end(c2)); } // Overload of c_equal() for using a predicate evaluation other than `==` as // the function's test condition. template bool c_equal(const C1& c1, const C2& c2, BinaryPredicate&& pred) { - return ((container_algorithm_internal::c_size(c1) == - container_algorithm_internal::c_size(c2)) && - std::equal(container_algorithm_internal::c_begin(c1), - container_algorithm_internal::c_end(c1), - container_algorithm_internal::c_begin(c2), - std::forward(pred))); + return std::equal(container_algorithm_internal::c_begin(c1), + container_algorithm_internal::c_end(c1), + container_algorithm_internal::c_begin(c2), + container_algorithm_internal::c_end(c2), + std::forward(pred)); } // c_is_permutation() @@ -428,20 +384,20 @@ bool c_equal(const C1& c1, const C2& c2, BinaryPredicate&& pred) { // to test whether a container is a permutation of another. template bool c_is_permutation(const C1& c1, const C2& c2) { - using std::begin; - using std::end; - return c1.size() == c2.size() && - std::is_permutation(begin(c1), end(c1), begin(c2)); + return std::is_permutation(container_algorithm_internal::c_begin(c1), + container_algorithm_internal::c_end(c1), + container_algorithm_internal::c_begin(c2), + container_algorithm_internal::c_end(c2)); } // Overload of c_is_permutation() for using a predicate evaluation other than // `==` as the function's test condition. template bool c_is_permutation(const C1& c1, const C2& c2, BinaryPredicate&& pred) { - using std::begin; - using std::end; - return c1.size() == c2.size() && - std::is_permutation(begin(c1), end(c1), begin(c2), + return std::is_permutation(container_algorithm_internal::c_begin(c1), + container_algorithm_internal::c_end(c1), + container_algorithm_internal::c_begin(c2), + container_algorithm_internal::c_end(c2), std::forward(pred)); } @@ -818,6 +774,36 @@ void c_shuffle(RandomAccessContainer& c, UniformRandomBitGenerator&& gen) { std::forward(gen)); } +// c_sample() +// +// Container-based version of the `std::sample()` function to +// randomly sample elements from the container without replacement using a +// `gen()` uniform random number generator and write them to an iterator range. +template +OutputIterator c_sample(const C& c, OutputIterator result, Distance n, + UniformRandomBitGenerator&& gen) { +#if defined(__cpp_lib_sample) && __cpp_lib_sample >= 201603L + return std::sample(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), result, n, + std::forward(gen)); +#else + // Fall back to a stable selection-sampling implementation. + auto first = container_algorithm_internal::c_begin(c); + Distance unsampled_elements = c_distance(c); + n = (std::min)(n, unsampled_elements); + for (; n != 0; ++first) { + Distance r = + std::uniform_int_distribution(0, --unsampled_elements)(gen); + if (r < n) { + *result++ = *first; + --n; + } + } + return result; +#endif +} + //------------------------------------------------------------------------------ // Partition functions //------------------------------------------------------------------------------ @@ -1657,7 +1643,7 @@ bool c_prev_permutation(C& c, LessThan&& comp) { // // Container-based version of the `std::iota()` function // to compute successive values of `value`, as if incremented with `++value` -// after each element is written. and write them to the container. +// after each element is written, and write them to the container. template void c_iota(Sequence& sequence, const T& value) { std::iota(container_algorithm_internal::c_begin(sequence), diff --git a/absl/base/BUILD.bazel b/absl/base/BUILD.bazel index fb008db..0eb735d 100644 --- a/absl/base/BUILD.bazel +++ b/absl/base/BUILD.bazel @@ -21,7 +21,14 @@ load( "ABSL_TEST_COPTS", ) -package(default_visibility = ["//visibility:public"]) +package( + default_visibility = ["//visibility:public"], + features = [ + "header_modules", + "layering_check", + "parse_headers", + ], +) licenses(["notice"]) @@ -62,6 +69,14 @@ cc_library( ], ) +cc_library( + name = "no_destructor", + hdrs = ["no_destructor.h"], + copts = ABSL_DEFAULT_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [":config"], +) + cc_library( name = "nullability", srcs = ["internal/nullability_impl.h"], @@ -160,9 +175,6 @@ cc_library( cc_library( name = "core_headers", - srcs = [ - "internal/thread_annotations.h", - ], hdrs = [ "attributes.h", "const_init.h", @@ -273,6 +285,7 @@ cc_library( ":cycleclock_internal", ":dynamic_annotations", ":log_severity", + ":nullability", ":raw_logging_internal", ":spinlock_wait", "//absl/meta:type_traits", @@ -302,6 +315,7 @@ cc_test( ":atomic_hook", ":atomic_hook_test_helper", ":core_headers", + "@com_google_googletest//:gtest", "@com_google_googletest//:gtest_main", ], ) @@ -317,6 +331,7 @@ cc_test( deps = [ ":base", ":core_headers", + "@com_google_googletest//:gtest", "@com_google_googletest//:gtest_main", ], ) @@ -344,6 +359,7 @@ cc_test( deps = [ ":config", ":throw_delegate", + "@com_google_googletest//:gtest", "@com_google_googletest//:gtest_main", ], ) @@ -357,6 +373,7 @@ cc_test( deps = [ ":errno_saver", ":strerror", + "@com_google_googletest//:gtest", "@com_google_googletest//:gtest_main", ], ) @@ -380,7 +397,9 @@ cc_library( name = "pretty_function", hdrs = ["internal/pretty_function.h"], linkopts = ABSL_DEFAULT_LINKOPTS, - visibility = ["//absl:__subpackages__"], + visibility = [ + "//absl:__subpackages__", + ], ) cc_library( @@ -409,6 +428,7 @@ cc_test( deps = [ ":exception_safety_testing", "//absl/memory", + "@com_google_googletest//:gtest", "@com_google_googletest//:gtest_main", ], ) @@ -426,6 +446,7 @@ cc_test( linkopts = ABSL_DEFAULT_LINKOPTS, deps = [ ":base_internal", + "@com_google_googletest//:gtest", "@com_google_googletest//:gtest_main", ], ) @@ -440,6 +461,7 @@ cc_test( ":base_internal", "//absl/memory", "//absl/strings", + "@com_google_googletest//:gtest", "@com_google_googletest//:gtest_main", ], ) @@ -478,6 +500,7 @@ cc_test( ":config", ":core_headers", "//absl/synchronization", + "@com_google_googletest//:gtest", "@com_google_googletest//:gtest_main", ], ) @@ -494,6 +517,7 @@ cc_library( deps = [ ":base", ":base_internal", + ":no_destructor", ":raw_logging_internal", "//absl/synchronization", "@com_github_google_benchmark//:benchmark_main", @@ -525,6 +549,7 @@ cc_library( ":base", ":config", ":core_headers", + ":nullability", ], ) @@ -535,6 +560,7 @@ cc_test( deps = [ ":config", ":endian", + "@com_google_googletest//:gtest", "@com_google_googletest//:gtest_main", ], ) @@ -547,6 +573,7 @@ cc_test( deps = [ ":config", "//absl/synchronization:thread_pool", + "@com_google_googletest//:gtest", "@com_google_googletest//:gtest_main", ], ) @@ -560,16 +587,47 @@ cc_test( ":base", ":core_headers", "//absl/synchronization", + "@com_google_googletest//:gtest", + "@com_google_googletest//:gtest_main", + ], +) + +cc_test( + name = "no_destructor_test", + srcs = ["no_destructor_test.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":config", + ":no_destructor", + ":raw_logging_internal", + "@com_google_googletest//:gtest", "@com_google_googletest//:gtest_main", ], ) +cc_binary( + name = "no_destructor_benchmark", + testonly = 1, + srcs = ["no_destructor_benchmark.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + tags = ["benchmark"], + visibility = ["//visibility:private"], + deps = [ + ":no_destructor", + ":raw_logging_internal", + "@com_github_google_benchmark//:benchmark_main", + ], +) + cc_test( name = "nullability_test", srcs = ["nullability_test.cc"], deps = [ ":core_headers", ":nullability", + "@com_google_googletest//:gtest", "@com_google_googletest//:gtest_main", ], ) @@ -582,6 +640,7 @@ cc_test( deps = [ ":raw_logging_internal", "//absl/strings", + "@com_google_googletest//:gtest", "@com_google_googletest//:gtest_main", ], ) @@ -595,6 +654,7 @@ cc_test( deps = [ ":base", "//absl/synchronization", + "@com_google_googletest//:gtest", "@com_google_googletest//:gtest_main", ], ) @@ -628,6 +688,7 @@ cc_test( ":base", ":core_headers", "//absl/synchronization", + "@com_google_googletest//:gtest", "@com_google_googletest//:gtest_main", ], ) @@ -643,6 +704,7 @@ cc_test( ":base", "//absl/synchronization", "@com_github_google_benchmark//:benchmark_main", + "@com_google_googletest//:gtest", ], ) @@ -669,6 +731,7 @@ cc_test( linkopts = ABSL_DEFAULT_LINKOPTS, deps = [ ":scoped_set_env", + "@com_google_googletest//:gtest", "@com_google_googletest//:gtest_main", ], ) @@ -684,6 +747,7 @@ cc_test( "//absl/flags:flag_internal", "//absl/flags:marshalling", "//absl/strings", + "@com_google_googletest//:gtest", "@com_google_googletest//:gtest_main", ], ) @@ -713,6 +777,7 @@ cc_test( deps = [ ":strerror", "//absl/strings", + "@com_google_googletest//:gtest", "@com_google_googletest//:gtest_main", ], ) @@ -752,6 +817,7 @@ cc_test( linkopts = ABSL_DEFAULT_LINKOPTS, deps = [ ":fast_type_id", + "@com_google_googletest//:gtest", "@com_google_googletest//:gtest_main", ], ) @@ -759,14 +825,13 @@ cc_test( cc_library( name = "prefetch", hdrs = [ - "internal/prefetch.h", "prefetch.h", ], copts = ABSL_DEFAULT_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, deps = [ ":config", - ":core_headers", # TODO(b/265984188): remove + ":core_headers", ], ) @@ -774,13 +839,13 @@ cc_test( name = "prefetch_test", size = "small", srcs = [ - "internal/prefetch_test.cc", "prefetch_test.cc", ], copts = ABSL_TEST_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, deps = [ ":prefetch", + "@com_google_googletest//:gtest", "@com_google_googletest//:gtest_main", ], ) @@ -795,6 +860,7 @@ cc_test( deps = [ ":core_headers", "//absl/strings", + "@com_google_googletest//:gtest", "@com_google_googletest//:gtest_main", ], ) @@ -808,6 +874,7 @@ cc_test( deps = [ ":core_headers", "//absl/types:optional", + "@com_google_googletest//:gtest", "@com_google_googletest//:gtest_main", ], ) diff --git a/absl/base/CMakeLists.txt b/absl/base/CMakeLists.txt index 76c4ff1..4cfc228 100644 --- a/absl/base/CMakeLists.txt +++ b/absl/base/CMakeLists.txt @@ -49,11 +49,23 @@ absl_cc_library( SRCS "log_severity.cc" DEPS + absl::config absl::core_headers COPTS ${ABSL_DEFAULT_COPTS} ) +absl_cc_library( + NAME + no_destructor + HDRS + "no_destructor.h" + DEPS + absl::config + COPTS + ${ABSL_DEFAULT_COPTS} +) + absl_cc_library( NAME nullability @@ -155,7 +167,6 @@ absl_cc_library( "optimization.h" "port.h" "thread_annotations.h" - "internal/thread_annotations.h" COPTS ${ABSL_DEFAULT_COPTS} DEPS @@ -236,6 +247,7 @@ absl_cc_library( absl::core_headers absl::dynamic_annotations absl::log_severity + absl::nullability absl::raw_logging_internal absl::spinlock_wait absl::type_traits @@ -464,6 +476,7 @@ absl_cc_library( absl::base absl::config absl::core_headers + absl::nullability PUBLIC ) @@ -508,6 +521,20 @@ absl_cc_test( GTest::gtest_main ) +absl_cc_test( + NAME + no_destructor_test + SRCS + "no_destructor_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::no_destructor + absl::config + absl::raw_logging_internal + GTest::gtest_main +) + absl_cc_test( NAME raw_logging_test @@ -677,14 +704,13 @@ absl_cc_library( prefetch HDRS "prefetch.h" - "internal/prefetch.h" COPTS ${ABSL_DEFAULT_COPTS} LINKOPTS ${ABSL_DEFAULT_LINKOPTS} DEPS absl::config - absl::core_headers # TODO(b/265984188): remove + absl::core_headers ) absl_cc_test( @@ -692,7 +718,6 @@ absl_cc_test( prefetch_test SRCS "prefetch_test.cc" - "internal/prefetch_test.cc" COPTS ${ABSL_TEST_COPTS} DEPS diff --git a/absl/base/attributes.h b/absl/base/attributes.h index a7f279a..d4f67a1 100644 --- a/absl/base/attributes.h +++ b/absl/base/attributes.h @@ -687,7 +687,7 @@ // When deprecating Abseil code, it is sometimes necessary to turn off the // warning within Abseil, until the deprecated code is actually removed. The -// deprecated code can be surrounded with these directives to acheive that +// deprecated code can be surrounded with these directives to achieve that // result. // // class ABSL_DEPRECATED("Use Bar instead") Foo; @@ -747,9 +747,52 @@ #define ABSL_CONST_INIT #endif -// These annotations are not available yet due to fear of breaking code. -#define ABSL_ATTRIBUTE_PURE_FUNCTION -#define ABSL_ATTRIBUTE_CONST_FUNCTION +// ABSL_ATTRIBUTE_PURE_FUNCTION +// +// ABSL_ATTRIBUTE_PURE_FUNCTION is used to annotate declarations of "pure" +// functions. A function is pure if its return value is only a function of its +// arguments. The pure attribute prohibits a function from modifying the state +// of the program that is observable by means other than inspecting the +// function's return value. Declaring such functions with the pure attribute +// allows the compiler to avoid emitting some calls in repeated invocations of +// the function with the same argument values. +// +// Example: +// +// ABSL_ATTRIBUTE_PURE_FUNCTION std::string FormatTime(Time t); +#if ABSL_HAVE_CPP_ATTRIBUTE(gnu::pure) +#define ABSL_ATTRIBUTE_PURE_FUNCTION [[gnu::pure]] +#elif ABSL_HAVE_ATTRIBUTE(pure) +#define ABSL_ATTRIBUTE_PURE_FUNCTION __attribute__((pure)) +#else +// If the attribute isn't defined, we'll fallback to ABSL_MUST_USE_RESULT since +// pure functions are useless if its return is ignored. +#define ABSL_ATTRIBUTE_PURE_FUNCTION ABSL_MUST_USE_RESULT +#endif + +// ABSL_ATTRIBUTE_CONST_FUNCTION +// +// ABSL_ATTRIBUTE_CONST_FUNCTION is used to annotate declarations of "const" +// functions. A const function is similar to a pure function, with one +// exception: Pure functions may return value that depend on a non-volatile +// object that isn't provided as a function argument, while the const function +// is guaranteed to return the same result given the same arguments. +// +// Example: +// +// ABSL_ATTRIBUTE_CONST_FUNCTION int64_t ToInt64Milliseconds(Duration d); +#if defined(_MSC_VER) && !defined(__clang__) +// Put the MSVC case first since MSVC seems to parse const as a C++ keyword. +#define ABSL_ATTRIBUTE_CONST_FUNCTION ABSL_ATTRIBUTE_PURE_FUNCTION +#elif ABSL_HAVE_CPP_ATTRIBUTE(gnu::const) +#define ABSL_ATTRIBUTE_CONST_FUNCTION [[gnu::const]] +#elif ABSL_HAVE_ATTRIBUTE(const) +#define ABSL_ATTRIBUTE_CONST_FUNCTION __attribute__((const)) +#else +// Since const functions are more restrictive pure function, we'll fallback to a +// pure function if the const attribute is not handled. +#define ABSL_ATTRIBUTE_CONST_FUNCTION ABSL_ATTRIBUTE_PURE_FUNCTION +#endif // ABSL_ATTRIBUTE_LIFETIME_BOUND indicates that a resource owned by a function // parameter or implicit object parameter is retained by the return value of the @@ -800,15 +843,11 @@ // See also the upstream documentation: // https://clang.llvm.org/docs/AttributeReference.html#trivial-abi // -#if ABSL_HAVE_CPP_ATTRIBUTE(clang::trivial_abi) -#define ABSL_ATTRIBUTE_TRIVIAL_ABI [[clang::trivial_abi]] -#define ABSL_HAVE_ATTRIBUTE_TRIVIAL_ABI 1 -#elif ABSL_HAVE_ATTRIBUTE(trivial_abi) -#define ABSL_ATTRIBUTE_TRIVIAL_ABI __attribute__((trivial_abi)) -#define ABSL_HAVE_ATTRIBUTE_TRIVIAL_ABI 1 -#else +// b/321691395 - This is currently disabled in open-source builds since +// compiler support differs. If system libraries compiled with GCC are mixed +// with libraries compiled with Clang, types will have different ideas about +// their ABI, leading to hard to debug crashes. #define ABSL_ATTRIBUTE_TRIVIAL_ABI -#endif // ABSL_ATTRIBUTE_NO_UNIQUE_ADDRESS // diff --git a/absl/base/call_once.h b/absl/base/call_once.h index 08436ba..7b0e69c 100644 --- a/absl/base/call_once.h +++ b/absl/base/call_once.h @@ -37,6 +37,7 @@ #include "absl/base/internal/scheduling_mode.h" #include "absl/base/internal/spinlock_wait.h" #include "absl/base/macros.h" +#include "absl/base/nullability.h" #include "absl/base/optimization.h" #include "absl/base/port.h" @@ -46,7 +47,8 @@ ABSL_NAMESPACE_BEGIN class once_flag; namespace base_internal { -std::atomic* ControlWord(absl::once_flag* flag); +absl::Nonnull*> ControlWord( + absl::Nonnull flag); } // namespace base_internal // call_once() @@ -89,7 +91,8 @@ class once_flag { once_flag& operator=(const once_flag&) = delete; private: - friend std::atomic* base_internal::ControlWord(once_flag* flag); + friend absl::Nonnull*> base_internal::ControlWord( + absl::Nonnull flag); std::atomic control_; }; @@ -103,7 +106,8 @@ namespace base_internal { // Like call_once, but uses KERNEL_ONLY scheduling. Intended to be used to // initialize entities used by the scheduler implementation. template -void LowLevelCallOnce(absl::once_flag* flag, Callable&& fn, Args&&... args); +void LowLevelCallOnce(absl::Nonnull flag, Callable&& fn, + Args&&... args); // Disables scheduling while on stack when scheduling mode is non-cooperative. // No effect for cooperative scheduling modes. @@ -143,10 +147,10 @@ enum { }; template -ABSL_ATTRIBUTE_NOINLINE -void CallOnceImpl(std::atomic* control, - base_internal::SchedulingMode scheduling_mode, Callable&& fn, - Args&&... args) { +ABSL_ATTRIBUTE_NOINLINE void CallOnceImpl( + absl::Nonnull*> control, + base_internal::SchedulingMode scheduling_mode, Callable&& fn, + Args&&... args) { #ifndef NDEBUG { uint32_t old_control = control->load(std::memory_order_relaxed); @@ -185,12 +189,14 @@ void CallOnceImpl(std::atomic* control, } // else *control is already kOnceDone } -inline std::atomic* ControlWord(once_flag* flag) { +inline absl::Nonnull*> ControlWord( + absl::Nonnull flag) { return &flag->control_; } template -void LowLevelCallOnce(absl::once_flag* flag, Callable&& fn, Args&&... args) { +void LowLevelCallOnce(absl::Nonnull flag, Callable&& fn, + Args&&... args) { std::atomic* once = base_internal::ControlWord(flag); uint32_t s = once->load(std::memory_order_acquire); if (ABSL_PREDICT_FALSE(s != base_internal::kOnceDone)) { diff --git a/absl/base/casts.h b/absl/base/casts.h index d195888..e0b11bb 100644 --- a/absl/base/casts.h +++ b/absl/base/casts.h @@ -90,7 +90,7 @@ ABSL_NAMESPACE_BEGIN // // Such implicit cast chaining may be useful within template logic. template -constexpr To implicit_cast(typename absl::internal::identity_t to) { +constexpr To implicit_cast(typename absl::internal::type_identity_t to) { return to; } diff --git a/absl/base/config.h b/absl/base/config.h index 1de7993..c9165ac 100644 --- a/absl/base/config.h +++ b/absl/base/config.h @@ -75,6 +75,12 @@ #define ABSL_INTERNAL_CPLUSPLUS_LANG __cplusplus #endif +#if defined(ABSL_INTERNAL_CPLUSPLUS_LANG) && \ + ABSL_INTERNAL_CPLUSPLUS_LANG >= 202002L +// Include library feature test macros. +#include +#endif + #if defined(__APPLE__) // Included for TARGET_OS_IPHONE, __IPHONE_OS_VERSION_MIN_REQUIRED, // __IPHONE_8_0. @@ -111,8 +117,8 @@ // // LTS releases can be obtained from // https://github.com/abseil/abseil-cpp/releases. -#define ABSL_LTS_RELEASE_VERSION 20230802 -#define ABSL_LTS_RELEASE_PATCH_LEVEL 0 +#define ABSL_LTS_RELEASE_VERSION 20240116 +#define ABSL_LTS_RELEASE_PATCH_LEVEL 1 // Helper macro to convert a CPP variable to a string literal. #define ABSL_INTERNAL_DO_TOKEN_STR(x) #x @@ -332,8 +338,8 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' || #ifdef ABSL_HAVE_INTRINSIC_INT128 #error ABSL_HAVE_INTRINSIC_INT128 cannot be directly set #elif defined(__SIZEOF_INT128__) -#if (defined(__clang__) && !defined(_WIN32)) || \ - (defined(__CUDACC__) && __CUDACC_VER_MAJOR__ >= 9) || \ +#if (defined(__clang__) && !defined(_WIN32)) || \ + (defined(__CUDACC__) && __CUDACC_VER_MAJOR__ >= 9) || \ (defined(__GNUC__) && !defined(__clang__) && !defined(__CUDACC__)) #define ABSL_HAVE_INTRINSIC_INT128 1 #elif defined(__CUDACC__) @@ -395,7 +401,7 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' || // Windows _WIN32 // NaCL __native_client__ // AsmJS __asmjs__ -// WebAssembly __wasm__ +// WebAssembly (Emscripten) __EMSCRIPTEN__ // Fuchsia __Fuchsia__ // // Note that since Android defines both __ANDROID__ and __linux__, one @@ -407,11 +413,11 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' || // POSIX.1-2001. #ifdef ABSL_HAVE_MMAP #error ABSL_HAVE_MMAP cannot be directly set -#elif defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__) || \ - defined(_AIX) || defined(__ros__) || defined(__native_client__) || \ - defined(__asmjs__) || defined(__wasm__) || defined(__Fuchsia__) || \ - defined(__sun) || defined(__ASYLO__) || defined(__myriad2__) || \ - defined(__HAIKU__) || defined(__OpenBSD__) || defined(__NetBSD__) || \ +#elif defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__) || \ + defined(_AIX) || defined(__ros__) || defined(__native_client__) || \ + defined(__asmjs__) || defined(__EMSCRIPTEN__) || defined(__Fuchsia__) || \ + defined(__sun) || defined(__ASYLO__) || defined(__myriad2__) || \ + defined(__HAIKU__) || defined(__OpenBSD__) || defined(__NetBSD__) || \ defined(__QNX__) || defined(__VXWORKS__) || defined(__hexagon__) #define ABSL_HAVE_MMAP 1 #endif @@ -484,6 +490,8 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' || // https://sourceforge.net/p/mingw-w64/mingw-w64/ci/master/tree/mingw-w64-crt/misc/alarm.c #elif defined(__EMSCRIPTEN__) // emscripten doesn't support signals +#elif defined(__wasi__) +// WASI doesn't support signals #elif defined(__Fuchsia__) // Signals don't exist on fuchsia. #elif defined(__native_client__) @@ -536,14 +544,14 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' || // and // https://github.com/llvm/llvm-project/commit/0bc451e7e137c4ccadcd3377250874f641ca514a // The second has the actually correct versions, thus, is what we copy here. -#if defined(__APPLE__) && \ - ((defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && \ - __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ < 101300) || \ - (defined(__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__) && \ - __ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__ < 120000) || \ - (defined(__ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__) && \ - __ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__ < 50000) || \ - (defined(__ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__) && \ +#if defined(__APPLE__) && \ + ((defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && \ + __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ < 101300) || \ + (defined(__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__) && \ + __ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__ < 120000) || \ + (defined(__ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__) && \ + __ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__ < 50000) || \ + (defined(__ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__) && \ __ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__ < 120000)) #define ABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE 1 #else @@ -555,6 +563,8 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' || // Checks whether C++17 std::any is available. #ifdef ABSL_HAVE_STD_ANY #error "ABSL_HAVE_STD_ANY cannot be directly set." +#elif defined(__cpp_lib_any) && __cpp_lib_any >= 201606L +#define ABSL_HAVE_STD_ANY 1 #elif defined(ABSL_INTERNAL_CPLUSPLUS_LANG) && \ ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L && \ !ABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE @@ -566,7 +576,9 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' || // Checks whether C++17 std::optional is available. #ifdef ABSL_HAVE_STD_OPTIONAL #error "ABSL_HAVE_STD_OPTIONAL cannot be directly set." -#elif defined(ABSL_INTERNAL_CPLUSPLUS_LANG) && \ +#elif defined(__cpp_lib_optional) && __cpp_lib_optional >= 202106L +#define ABSL_HAVE_STD_OPTIONAL 1 +#elif defined(ABSL_INTERNAL_CPLUSPLUS_LANG) && \ ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L && \ !ABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE #define ABSL_HAVE_STD_OPTIONAL 1 @@ -577,6 +589,8 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' || // Checks whether C++17 std::variant is available. #ifdef ABSL_HAVE_STD_VARIANT #error "ABSL_HAVE_STD_VARIANT cannot be directly set." +#elif defined(__cpp_lib_variant) && __cpp_lib_variant >= 201606L +#define ABSL_HAVE_STD_VARIANT 1 #elif defined(ABSL_INTERNAL_CPLUSPLUS_LANG) && \ ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L && \ !ABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE @@ -588,11 +602,29 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' || // Checks whether C++17 std::string_view is available. #ifdef ABSL_HAVE_STD_STRING_VIEW #error "ABSL_HAVE_STD_STRING_VIEW cannot be directly set." +#elif defined(__cpp_lib_string_view) && __cpp_lib_string_view >= 201606L +#define ABSL_HAVE_STD_STRING_VIEW 1 #elif defined(ABSL_INTERNAL_CPLUSPLUS_LANG) && \ ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L #define ABSL_HAVE_STD_STRING_VIEW 1 #endif +// ABSL_HAVE_STD_ORDERING +// +// Checks whether C++20 std::{partial,weak,strong}_ordering are available. +// +// __cpp_lib_three_way_comparison is missing on libc++ +// (https://github.com/llvm/llvm-project/issues/73953) so treat it as defined +// when building in C++20 mode. +#ifdef ABSL_HAVE_STD_ORDERING +#error "ABSL_HAVE_STD_ORDERING cannot be directly set." +#elif (defined(__cpp_lib_three_way_comparison) && \ + __cpp_lib_three_way_comparison >= 201907L) || \ + (defined(ABSL_INTERNAL_CPLUSPLUS_LANG) && \ + ABSL_INTERNAL_CPLUSPLUS_LANG >= 202002L) +#define ABSL_HAVE_STD_ORDERING 1 +#endif + // ABSL_USES_STD_ANY // // Indicates whether absl::any is an alias for std::any. @@ -655,6 +687,22 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' || #error options.h is misconfigured. #endif +// ABSL_USES_STD_ORDERING +// +// Indicates whether absl::{partial,weak,strong}_ordering are aliases for the +// std:: ordering types. +#if !defined(ABSL_OPTION_USE_STD_ORDERING) +#error options.h is misconfigured. +#elif ABSL_OPTION_USE_STD_ORDERING == 0 || \ + (ABSL_OPTION_USE_STD_ORDERING == 2 && !defined(ABSL_HAVE_STD_ORDERING)) +#undef ABSL_USES_STD_ORDERING +#elif ABSL_OPTION_USE_STD_ORDERING == 1 || \ + (ABSL_OPTION_USE_STD_ORDERING == 2 && defined(ABSL_HAVE_STD_ORDERING)) +#define ABSL_USES_STD_ORDERING 1 +#else +#error options.h is misconfigured. +#endif + // In debug mode, MSVC 2017's std::variant throws a EXCEPTION_ACCESS_VIOLATION // SEH exception from emplace for variant when constructing the // struct can throw. This defeats some of variant_test and @@ -837,11 +885,30 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' || // RTTI support. #ifdef ABSL_INTERNAL_HAS_RTTI #error ABSL_INTERNAL_HAS_RTTI cannot be directly set -#elif (defined(__GNUC__) && defined(__GXX_RTTI)) || \ - (defined(_MSC_VER) && defined(_CPPRTTI)) || \ - (!defined(__GNUC__) && !defined(_MSC_VER)) +#elif ABSL_HAVE_FEATURE(cxx_rtti) #define ABSL_INTERNAL_HAS_RTTI 1 -#endif // !defined(__GNUC__) || defined(__GXX_RTTI) +#elif defined(__GNUC__) && defined(__GXX_RTTI) +#define ABSL_INTERNAL_HAS_RTTI 1 +#elif defined(_MSC_VER) && defined(_CPPRTTI) +#define ABSL_INTERNAL_HAS_RTTI 1 +#elif !defined(__GNUC__) && !defined(_MSC_VER) +// Unknown compiler, default to RTTI +#define ABSL_INTERNAL_HAS_RTTI 1 +#endif + +// `ABSL_INTERNAL_HAS_CXA_DEMANGLE` determines whether `abi::__cxa_demangle` is +// available. +#ifdef ABSL_INTERNAL_HAS_CXA_DEMANGLE +#error ABSL_INTERNAL_HAS_CXA_DEMANGLE cannot be directly set +#elif defined(OS_ANDROID) && (defined(__i386__) || defined(__x86_64__)) +#define ABSL_INTERNAL_HAS_CXA_DEMANGLE 0 +#elif defined(__GNUC__) && defined(__GNUC_MINOR__) && \ + (__GNUC__ >= 4 || (__GNUC__ >= 3 && __GNUC_MINOR__ >= 4)) && \ + !defined(__mips__) +#define ABSL_INTERNAL_HAS_CXA_DEMANGLE 1 +#elif defined(__clang__) && !defined(_MSC_VER) +#define ABSL_INTERNAL_HAS_CXA_DEMANGLE 1 +#endif // ABSL_INTERNAL_HAVE_SSE is used for compile-time detection of SSE support. // See https://gcc.gnu.org/onlinedocs/gcc/x86-Options.html for an overview of @@ -928,8 +995,8 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' || #if __EMSCRIPTEN_tiny__ >= 1000 #error __EMSCRIPTEN_tiny__ is too big to fit in ABSL_INTERNAL_EMSCRIPTEN_VERSION #endif -#define ABSL_INTERNAL_EMSCRIPTEN_VERSION \ - ((__EMSCRIPTEN_major__)*1000000 + (__EMSCRIPTEN_minor__)*1000 + \ +#define ABSL_INTERNAL_EMSCRIPTEN_VERSION \ + ((__EMSCRIPTEN_major__) * 1000000 + (__EMSCRIPTEN_minor__) * 1000 + \ (__EMSCRIPTEN_tiny__)) #endif #endif diff --git a/absl/base/internal/endian.h b/absl/base/internal/endian.h index 50747d7..943f3d9 100644 --- a/absl/base/internal/endian.h +++ b/absl/base/internal/endian.h @@ -22,6 +22,7 @@ #include "absl/base/casts.h" #include "absl/base/config.h" #include "absl/base/internal/unaligned_access.h" +#include "absl/base/nullability.h" #include "absl/base/port.h" namespace absl { @@ -160,27 +161,27 @@ inline int64_t ToHost(int64_t x) { } // Functions to do unaligned loads and stores in little-endian order. -inline uint16_t Load16(const void *p) { +inline uint16_t Load16(absl::Nonnull p) { return ToHost16(ABSL_INTERNAL_UNALIGNED_LOAD16(p)); } -inline void Store16(void *p, uint16_t v) { +inline void Store16(absl::Nonnull p, uint16_t v) { ABSL_INTERNAL_UNALIGNED_STORE16(p, FromHost16(v)); } -inline uint32_t Load32(const void *p) { +inline uint32_t Load32(absl::Nonnull p) { return ToHost32(ABSL_INTERNAL_UNALIGNED_LOAD32(p)); } -inline void Store32(void *p, uint32_t v) { +inline void Store32(absl::Nonnull p, uint32_t v) { ABSL_INTERNAL_UNALIGNED_STORE32(p, FromHost32(v)); } -inline uint64_t Load64(const void *p) { +inline uint64_t Load64(absl::Nonnull p) { return ToHost64(ABSL_INTERNAL_UNALIGNED_LOAD64(p)); } -inline void Store64(void *p, uint64_t v) { +inline void Store64(absl::Nonnull p, uint64_t v) { ABSL_INTERNAL_UNALIGNED_STORE64(p, FromHost64(v)); } @@ -250,27 +251,27 @@ inline int64_t ToHost(int64_t x) { } // Functions to do unaligned loads and stores in big-endian order. -inline uint16_t Load16(const void *p) { +inline uint16_t Load16(absl::Nonnull p) { return ToHost16(ABSL_INTERNAL_UNALIGNED_LOAD16(p)); } -inline void Store16(void *p, uint16_t v) { +inline void Store16(absl::Nonnull p, uint16_t v) { ABSL_INTERNAL_UNALIGNED_STORE16(p, FromHost16(v)); } -inline uint32_t Load32(const void *p) { +inline uint32_t Load32(absl::Nonnull p) { return ToHost32(ABSL_INTERNAL_UNALIGNED_LOAD32(p)); } -inline void Store32(void *p, uint32_t v) { +inline void Store32(absl::Nonnullp, uint32_t v) { ABSL_INTERNAL_UNALIGNED_STORE32(p, FromHost32(v)); } -inline uint64_t Load64(const void *p) { +inline uint64_t Load64(absl::Nonnull p) { return ToHost64(ABSL_INTERNAL_UNALIGNED_LOAD64(p)); } -inline void Store64(void *p, uint64_t v) { +inline void Store64(absl::Nonnull p, uint64_t v) { ABSL_INTERNAL_UNALIGNED_STORE64(p, FromHost64(v)); } diff --git a/absl/base/internal/identity.h b/absl/base/internal/identity.h index a3154ed..365207b 100644 --- a/absl/base/internal/identity.h +++ b/absl/base/internal/identity.h @@ -22,13 +22,15 @@ namespace absl { ABSL_NAMESPACE_BEGIN namespace internal { +// This is a back-fill of C++20's `std::type_identity`. template -struct identity { +struct type_identity { typedef T type; }; +// This is a back-fill of C++20's `std::type_identity_t`. template -using identity_t = typename identity::type; +using type_identity_t = typename type_identity::type; } // namespace internal ABSL_NAMESPACE_END diff --git a/absl/base/internal/inline_variable.h b/absl/base/internal/inline_variable.h index df933fa..09daf0f 100644 --- a/absl/base/internal/inline_variable.h +++ b/absl/base/internal/inline_variable.h @@ -63,12 +63,12 @@ // Bug: https://bugs.llvm.org/show_bug.cgi?id=35862 // // Note: -// identity_t is used here so that the const and name are in the +// type_identity_t is used here so that the const and name are in the // appropriate place for pointer types, reference types, function pointer // types, etc.. #if defined(__clang__) #define ABSL_INTERNAL_EXTERN_DECL(type, name) \ - extern const ::absl::internal::identity_t name; + extern const ::absl::internal::type_identity_t name; #else // Otherwise, just define the macro to do nothing. #define ABSL_INTERNAL_EXTERN_DECL(type, name) #endif // defined(__clang__) @@ -76,30 +76,31 @@ // See above comment at top of file for details. #define ABSL_INTERNAL_INLINE_CONSTEXPR(type, name, init) \ ABSL_INTERNAL_EXTERN_DECL(type, name) \ - inline constexpr ::absl::internal::identity_t name = init + inline constexpr ::absl::internal::type_identity_t name = init #else // See above comment at top of file for details. // // Note: -// identity_t is used here so that the const and name are in the +// type_identity_t is used here so that the const and name are in the // appropriate place for pointer types, reference types, function pointer // types, etc.. -#define ABSL_INTERNAL_INLINE_CONSTEXPR(var_type, name, init) \ - template \ - struct AbslInternalInlineVariableHolder##name { \ - static constexpr ::absl::internal::identity_t kInstance = init; \ - }; \ - \ - template \ - constexpr ::absl::internal::identity_t \ - AbslInternalInlineVariableHolder##name::kInstance; \ - \ - static constexpr const ::absl::internal::identity_t& \ - name = /* NOLINT */ \ - AbslInternalInlineVariableHolder##name<>::kInstance; \ - static_assert(sizeof(void (*)(decltype(name))) != 0, \ +#define ABSL_INTERNAL_INLINE_CONSTEXPR(var_type, name, init) \ + template \ + struct AbslInternalInlineVariableHolder##name { \ + static constexpr ::absl::internal::type_identity_t kInstance = \ + init; \ + }; \ + \ + template \ + constexpr ::absl::internal::type_identity_t \ + AbslInternalInlineVariableHolder##name::kInstance; \ + \ + static constexpr const ::absl::internal::type_identity_t& \ + name = /* NOLINT */ \ + AbslInternalInlineVariableHolder##name<>::kInstance; \ + static_assert(sizeof(void (*)(decltype(name))) != 0, \ "Silence unused variable warnings.") #endif // __cpp_inline_variables diff --git a/absl/base/internal/low_level_alloc.cc b/absl/base/internal/low_level_alloc.cc index 6d2cfea..a563f7b 100644 --- a/absl/base/internal/low_level_alloc.cc +++ b/absl/base/internal/low_level_alloc.cc @@ -329,7 +329,7 @@ size_t GetPageSize() { SYSTEM_INFO system_info; GetSystemInfo(&system_info); return std::max(system_info.dwPageSize, system_info.dwAllocationGranularity); -#elif defined(__wasm__) || defined(__asmjs__) +#elif defined(__wasm__) || defined(__asmjs__) || defined(__hexagon__) return getpagesize(); #else return static_cast(sysconf(_SC_PAGESIZE)); diff --git a/absl/base/internal/prefetch.h b/absl/base/internal/prefetch.h deleted file mode 100644 index aecfd87..0000000 --- a/absl/base/internal/prefetch.h +++ /dev/null @@ -1,137 +0,0 @@ -// Copyright 2022 The Abseil Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// TODO(b/265984188): remove all uses and delete this header. - -#ifndef ABSL_BASE_INTERNAL_PREFETCH_H_ -#define ABSL_BASE_INTERNAL_PREFETCH_H_ - -#include "absl/base/attributes.h" -#include "absl/base/config.h" -#include "absl/base/prefetch.h" - -#ifdef __SSE__ -#include -#endif - -#if defined(_MSC_VER) && defined(ABSL_INTERNAL_HAVE_SSE) -#include -#pragma intrinsic(_mm_prefetch) -#endif - -// Compatibility wrappers around __builtin_prefetch, to prefetch data -// for read if supported by the toolchain. - -// Move data into the cache before it is read, or "prefetch" it. -// -// The value of `addr` is the address of the memory to prefetch. If -// the target and compiler support it, data prefetch instructions are -// generated. If the prefetch is done some time before the memory is -// read, it may be in the cache by the time the read occurs. -// -// The function names specify the temporal locality heuristic applied, -// using the names of Intel prefetch instructions: -// -// T0 - high degree of temporal locality; data should be left in as -// many levels of the cache possible -// T1 - moderate degree of temporal locality -// T2 - low degree of temporal locality -// Nta - no temporal locality, data need not be left in the cache -// after the read -// -// Incorrect or gratuitous use of these functions can degrade -// performance, so use them only when representative benchmarks show -// an improvement. -// -// Example usage: -// -// absl::base_internal::PrefetchT0(addr); -// -// Currently, the different prefetch calls behave on some Intel -// architectures as follows: -// -// SNB..SKL SKX -// PrefetchT0() L1/L2/L3 L1/L2 -// PrefetchT1() L2/L3 L2 -// PrefetchT2() L2/L3 L2 -// PrefetchNta() L1/--/L3 L1* -// -// * On SKX PrefetchNta() will bring the line into L1 but will evict -// from L3 cache. This might result in surprising behavior. -// -// SNB = Sandy Bridge, SKL = Skylake, SKX = Skylake Xeon. -// -namespace absl { -ABSL_NAMESPACE_BEGIN -namespace base_internal { - -ABSL_DEPRECATED("Use absl::PrefetchToLocalCache() instead") -inline void PrefetchT0(const void* address) { - absl::PrefetchToLocalCache(address); -} - -ABSL_DEPRECATED("Use absl::PrefetchToLocalCache() instead") -inline void PrefetchNta(const void* address) { - absl::PrefetchToLocalCacheNta(address); -} - -ABSL_DEPRECATED("Use __builtin_prefetch() for advanced prefetch logic instead") -void PrefetchT1(const void* addr); - -ABSL_DEPRECATED("Use __builtin_prefetch() for advanced prefetch logic instead") -void PrefetchT2(const void* addr); - -// Implementation details follow. - -#if ABSL_HAVE_BUILTIN(__builtin_prefetch) || defined(__GNUC__) - -#define ABSL_INTERNAL_HAVE_PREFETCH 1 - -// See __builtin_prefetch: -// https://gcc.gnu.org/onlinedocs/gcc/Other-Builtins.html. -// -// These functions speculatively load for read only. This is -// safe for all currently supported platforms. However, prefetch for -// store may have problems depending on the target platform. -// -inline void PrefetchT1(const void* addr) { - // Note: this uses prefetcht1 on Intel. - __builtin_prefetch(addr, 0, 2); -} -inline void PrefetchT2(const void* addr) { - // Note: this uses prefetcht2 on Intel. - __builtin_prefetch(addr, 0, 1); -} - -#elif defined(ABSL_INTERNAL_HAVE_SSE) - -#define ABSL_INTERNAL_HAVE_PREFETCH 1 - -inline void PrefetchT1(const void* addr) { - _mm_prefetch(reinterpret_cast(addr), _MM_HINT_T1); -} -inline void PrefetchT2(const void* addr) { - _mm_prefetch(reinterpret_cast(addr), _MM_HINT_T2); -} - -#else -inline void PrefetchT1(const void*) {} -inline void PrefetchT2(const void*) {} -#endif - -} // namespace base_internal -ABSL_NAMESPACE_END -} // namespace absl - -#endif // ABSL_BASE_INTERNAL_PREFETCH_H_ diff --git a/absl/base/internal/raw_logging.cc b/absl/base/internal/raw_logging.cc index 4c922cc..d32b40a 100644 --- a/absl/base/internal/raw_logging.cc +++ b/absl/base/internal/raw_logging.cc @@ -42,8 +42,9 @@ // This preprocessor token is also defined in raw_io.cc. If you need to copy // this, consider moving both to config.h instead. #if defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__) || \ - defined(__Fuchsia__) || defined(__native_client__) || \ - defined(__OpenBSD__) || defined(__EMSCRIPTEN__) || defined(__ASYLO__) + defined(__hexagon__) || defined(__Fuchsia__) || \ + defined(__native_client__) || defined(__OpenBSD__) || \ + defined(__EMSCRIPTEN__) || defined(__ASYLO__) #include @@ -56,8 +57,7 @@ // ABSL_HAVE_SYSCALL_WRITE is defined when the platform provides the syscall // syscall(SYS_write, /*int*/ fd, /*char* */ buf, /*size_t*/ len); // for low level operations that want to avoid libc. -#if (defined(__linux__) || defined(__FreeBSD__) || defined(__OpenBSD__)) && \ - !defined(__ANDROID__) +#if (defined(__linux__) || defined(__FreeBSD__)) && !defined(__ANDROID__) #include #define ABSL_HAVE_SYSCALL_WRITE 1 #define ABSL_LOW_LEVEL_WRITE_SUPPORTED 1 @@ -93,8 +93,7 @@ constexpr char kTruncated[] = " ... (message truncated)\n"; bool VADoRawLog(char** buf, int* size, const char* format, va_list ap) ABSL_PRINTF_ATTRIBUTE(3, 0); bool VADoRawLog(char** buf, int* size, const char* format, va_list ap) { - if (*size < 0) - return false; + if (*size < 0) return false; int n = vsnprintf(*buf, static_cast(*size), format, ap); bool result = true; if (n < 0 || n > *size) { @@ -122,8 +121,7 @@ constexpr int kLogBufSize = 3000; bool DoRawLog(char** buf, int* size, const char* format, ...) ABSL_PRINTF_ATTRIBUTE(3, 4); bool DoRawLog(char** buf, int* size, const char* format, ...) { - if (*size < 0) - return false; + if (*size < 0) return false; va_list ap; va_start(ap, format); int n = vsnprintf(*buf, static_cast(*size), format, ap); @@ -242,8 +240,8 @@ void AsyncSignalSafeWriteError(const char* s, size_t len) { _write(/* stderr */ 2, s, static_cast(len)); #else // stderr logging unsupported on this platform - (void) s; - (void) len; + (void)s; + (void)len; #endif } @@ -258,7 +256,7 @@ void RawLog(absl::LogSeverity severity, const char* file, int line, bool RawLoggingFullySupported() { #ifdef ABSL_LOW_LEVEL_WRITE_SUPPORTED return true; -#else // !ABSL_LOW_LEVEL_WRITE_SUPPORTED +#else // !ABSL_LOW_LEVEL_WRITE_SUPPORTED return false; #endif // !ABSL_LOW_LEVEL_WRITE_SUPPORTED } diff --git a/absl/base/internal/raw_logging.h b/absl/base/internal/raw_logging.h index b79550b..d7cfbc5 100644 --- a/absl/base/internal/raw_logging.h +++ b/absl/base/internal/raw_logging.h @@ -108,6 +108,7 @@ #define ABSL_RAW_LOG_INTERNAL_WARNING ::absl::LogSeverity::kWarning #define ABSL_RAW_LOG_INTERNAL_ERROR ::absl::LogSeverity::kError #define ABSL_RAW_LOG_INTERNAL_FATAL ::absl::LogSeverity::kFatal +#define ABSL_RAW_LOG_INTERNAL_DFATAL ::absl::kLogDebugFatal #define ABSL_RAW_LOG_INTERNAL_LEVEL(severity) \ ::absl::NormalizeLogSeverity(severity) @@ -115,6 +116,7 @@ #define ABSL_RAW_LOG_INTERNAL_MAYBE_UNREACHABLE_WARNING #define ABSL_RAW_LOG_INTERNAL_MAYBE_UNREACHABLE_ERROR #define ABSL_RAW_LOG_INTERNAL_MAYBE_UNREACHABLE_FATAL ABSL_UNREACHABLE() +#define ABSL_RAW_LOG_INTERNAL_MAYBE_UNREACHABLE_DFATAL #define ABSL_RAW_LOG_INTERNAL_MAYBE_UNREACHABLE_LEVEL(severity) namespace absl { diff --git a/absl/base/internal/spinlock.h b/absl/base/internal/spinlock.h index 09ba582..2929cd6 100644 --- a/absl/base/internal/spinlock.h +++ b/absl/base/internal/spinlock.h @@ -19,10 +19,10 @@ // - for use by Abseil internal code that Mutex itself depends on // - for async signal safety (see below) -// SpinLock is async signal safe. If a spinlock is used within a signal -// handler, all code that acquires the lock must ensure that the signal cannot -// arrive while they are holding the lock. Typically, this is done by blocking -// the signal. +// SpinLock with a base_internal::SchedulingMode::SCHEDULE_KERNEL_ONLY is async +// signal safe. If a spinlock is used within a signal handler, all code that +// acquires the lock must ensure that the signal cannot arrive while they are +// holding the lock. Typically, this is done by blocking the signal. // // Threads waiting on a SpinLock may be woken in an arbitrary order. @@ -41,6 +41,14 @@ #include "absl/base/internal/tsan_mutex_interface.h" #include "absl/base/thread_annotations.h" +namespace tcmalloc { +namespace tcmalloc_internal { + +class AllocationGuardSpinLockHolder; + +} // namespace tcmalloc_internal +} // namespace tcmalloc + namespace absl { ABSL_NAMESPACE_BEGIN namespace base_internal { @@ -137,6 +145,7 @@ class ABSL_LOCKABLE SpinLock { // Provide access to protected method above. Use for testing only. friend struct SpinLockTest; + friend class tcmalloc::tcmalloc_internal::AllocationGuardSpinLockHolder; private: // lockword_ is used to store the following: @@ -171,6 +180,10 @@ class ABSL_LOCKABLE SpinLock { return scheduling_mode == base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL; } + bool IsCooperative() const { + return lockword_.load(std::memory_order_relaxed) & kSpinLockCooperative; + } + uint32_t TryLockInternal(uint32_t lock_value, uint32_t wait_cycles); void SlowLock() ABSL_ATTRIBUTE_COLD; void SlowUnlock(uint32_t lock_value) ABSL_ATTRIBUTE_COLD; diff --git a/absl/base/internal/sysinfo.cc b/absl/base/internal/sysinfo.cc index 8bcc4fa..79eaba3 100644 --- a/absl/base/internal/sysinfo.cc +++ b/absl/base/internal/sysinfo.cc @@ -34,6 +34,14 @@ #include #endif +#ifdef __FreeBSD__ +#include +#endif + +#ifdef __NetBSD__ +#include +#endif + #if defined(__myriad2__) #include #endif @@ -432,6 +440,18 @@ pid_t GetTID() { return static_cast(tid); } +#elif defined(__FreeBSD__) + +pid_t GetTID() { return static_cast(pthread_getthreadid_np()); } + +#elif defined(__OpenBSD__) + +pid_t GetTID() { return getthrid(); } + +#elif defined(__NetBSD__) + +pid_t GetTID() { return static_cast(_lwp_self()); } + #elif defined(__native_client__) pid_t GetTID() { diff --git a/absl/base/internal/thread_annotations.h b/absl/base/internal/thread_annotations.h deleted file mode 100644 index 8c5c67e..0000000 --- a/absl/base/internal/thread_annotations.h +++ /dev/null @@ -1,280 +0,0 @@ -// Copyright 2019 The Abseil Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ----------------------------------------------------------------------------- -// File: thread_annotations.h -// ----------------------------------------------------------------------------- -// -// WARNING: This is a backwards compatible header and it will be removed after -// the migration to prefixed thread annotations is finished; please include -// "absl/base/thread_annotations.h". -// -// This header file contains macro definitions for thread safety annotations -// that allow developers to document the locking policies of multi-threaded -// code. The annotations can also help program analysis tools to identify -// potential thread safety issues. -// -// These annotations are implemented using compiler attributes. Using the macros -// defined here instead of raw attributes allow for portability and future -// compatibility. -// -// When referring to mutexes in the arguments of the attributes, you should -// use variable names or more complex expressions (e.g. my_object->mutex_) -// that evaluate to a concrete mutex object whenever possible. If the mutex -// you want to refer to is not in scope, you may use a member pointer -// (e.g. &MyClass::mutex_) to refer to a mutex in some (unknown) object. - -#ifndef ABSL_BASE_INTERNAL_THREAD_ANNOTATIONS_H_ -#define ABSL_BASE_INTERNAL_THREAD_ANNOTATIONS_H_ - -// ABSL_LEGACY_THREAD_ANNOTATIONS is a *temporary* compatibility macro that can -// be defined on the compile command-line to restore the legacy spellings of the -// thread annotations macros/functions. The macros in this file are available -// under ABSL_ prefixed spellings in absl/base/thread_annotations.h. This macro -// and the legacy spellings will be removed in the future. -#ifdef ABSL_LEGACY_THREAD_ANNOTATIONS - -#if defined(__clang__) -#define THREAD_ANNOTATION_ATTRIBUTE__(x) __attribute__((x)) -#else -#define THREAD_ANNOTATION_ATTRIBUTE__(x) // no-op -#endif - -// GUARDED_BY() -// -// Documents if a shared field or global variable needs to be protected by a -// mutex. GUARDED_BY() allows the user to specify a particular mutex that -// should be held when accessing the annotated variable. -// -// Although this annotation (and PT_GUARDED_BY, below) cannot be applied to -// local variables, a local variable and its associated mutex can often be -// combined into a small class or struct, thereby allowing the annotation. -// -// Example: -// -// class Foo { -// Mutex mu_; -// int p1_ GUARDED_BY(mu_); -// ... -// }; -#define GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE__(guarded_by(x)) - -// PT_GUARDED_BY() -// -// Documents if the memory location pointed to by a pointer should be guarded -// by a mutex when dereferencing the pointer. -// -// Example: -// class Foo { -// Mutex mu_; -// int *p1_ PT_GUARDED_BY(mu_); -// ... -// }; -// -// Note that a pointer variable to a shared memory location could itself be a -// shared variable. -// -// Example: -// -// // `q_`, guarded by `mu1_`, points to a shared memory location that is -// // guarded by `mu2_`: -// int *q_ GUARDED_BY(mu1_) PT_GUARDED_BY(mu2_); -#define PT_GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE__(pt_guarded_by(x)) - -// ACQUIRED_AFTER() / ACQUIRED_BEFORE() -// -// Documents the acquisition order between locks that can be held -// simultaneously by a thread. For any two locks that need to be annotated -// to establish an acquisition order, only one of them needs the annotation. -// (i.e. You don't have to annotate both locks with both ACQUIRED_AFTER -// and ACQUIRED_BEFORE.) -// -// As with GUARDED_BY, this is only applicable to mutexes that are shared -// fields or global variables. -// -// Example: -// -// Mutex m1_; -// Mutex m2_ ACQUIRED_AFTER(m1_); -#define ACQUIRED_AFTER(...) \ - THREAD_ANNOTATION_ATTRIBUTE__(acquired_after(__VA_ARGS__)) - -#define ACQUIRED_BEFORE(...) \ - THREAD_ANNOTATION_ATTRIBUTE__(acquired_before(__VA_ARGS__)) - -// EXCLUSIVE_LOCKS_REQUIRED() / SHARED_LOCKS_REQUIRED() -// -// Documents a function that expects a mutex to be held prior to entry. -// The mutex is expected to be held both on entry to, and exit from, the -// function. -// -// An exclusive lock allows read-write access to the guarded data member(s), and -// only one thread can acquire a lock exclusively at any one time. A shared lock -// allows read-only access, and any number of threads can acquire a shared lock -// concurrently. -// -// Generally, non-const methods should be annotated with -// EXCLUSIVE_LOCKS_REQUIRED, while const methods should be annotated with -// SHARED_LOCKS_REQUIRED. -// -// Example: -// -// Mutex mu1, mu2; -// int a GUARDED_BY(mu1); -// int b GUARDED_BY(mu2); -// -// void foo() EXCLUSIVE_LOCKS_REQUIRED(mu1, mu2) { ... } -// void bar() const SHARED_LOCKS_REQUIRED(mu1, mu2) { ... } -#define EXCLUSIVE_LOCKS_REQUIRED(...) \ - THREAD_ANNOTATION_ATTRIBUTE__(exclusive_locks_required(__VA_ARGS__)) - -#define SHARED_LOCKS_REQUIRED(...) \ - THREAD_ANNOTATION_ATTRIBUTE__(shared_locks_required(__VA_ARGS__)) - -// LOCKS_EXCLUDED() -// -// Documents the locks acquired in the body of the function. These locks -// cannot be held when calling this function (as Abseil's `Mutex` locks are -// non-reentrant). -#define LOCKS_EXCLUDED(...) \ - THREAD_ANNOTATION_ATTRIBUTE__(locks_excluded(__VA_ARGS__)) - -// LOCK_RETURNED() -// -// Documents a function that returns a mutex without acquiring it. For example, -// a public getter method that returns a pointer to a private mutex should -// be annotated with LOCK_RETURNED. -#define LOCK_RETURNED(x) \ - THREAD_ANNOTATION_ATTRIBUTE__(lock_returned(x)) - -// LOCKABLE -// -// Documents if a class/type is a lockable type (such as the `Mutex` class). -#define LOCKABLE \ - THREAD_ANNOTATION_ATTRIBUTE__(lockable) - -// SCOPED_LOCKABLE -// -// Documents if a class does RAII locking (such as the `MutexLock` class). -// The constructor should use `LOCK_FUNCTION()` to specify the mutex that is -// acquired, and the destructor should use `UNLOCK_FUNCTION()` with no -// arguments; the analysis will assume that the destructor unlocks whatever the -// constructor locked. -#define SCOPED_LOCKABLE \ - THREAD_ANNOTATION_ATTRIBUTE__(scoped_lockable) - -// EXCLUSIVE_LOCK_FUNCTION() -// -// Documents functions that acquire a lock in the body of a function, and do -// not release it. -#define EXCLUSIVE_LOCK_FUNCTION(...) \ - THREAD_ANNOTATION_ATTRIBUTE__(exclusive_lock_function(__VA_ARGS__)) - -// SHARED_LOCK_FUNCTION() -// -// Documents functions that acquire a shared (reader) lock in the body of a -// function, and do not release it. -#define SHARED_LOCK_FUNCTION(...) \ - THREAD_ANNOTATION_ATTRIBUTE__(shared_lock_function(__VA_ARGS__)) - -// UNLOCK_FUNCTION() -// -// Documents functions that expect a lock to be held on entry to the function, -// and release it in the body of the function. -#define UNLOCK_FUNCTION(...) \ - THREAD_ANNOTATION_ATTRIBUTE__(unlock_function(__VA_ARGS__)) - -// EXCLUSIVE_TRYLOCK_FUNCTION() / SHARED_TRYLOCK_FUNCTION() -// -// Documents functions that try to acquire a lock, and return success or failure -// (or a non-boolean value that can be interpreted as a boolean). -// The first argument should be `true` for functions that return `true` on -// success, or `false` for functions that return `false` on success. The second -// argument specifies the mutex that is locked on success. If unspecified, this -// mutex is assumed to be `this`. -#define EXCLUSIVE_TRYLOCK_FUNCTION(...) \ - THREAD_ANNOTATION_ATTRIBUTE__(exclusive_trylock_function(__VA_ARGS__)) - -#define SHARED_TRYLOCK_FUNCTION(...) \ - THREAD_ANNOTATION_ATTRIBUTE__(shared_trylock_function(__VA_ARGS__)) - -// ASSERT_EXCLUSIVE_LOCK() / ASSERT_SHARED_LOCK() -// -// Documents functions that dynamically check to see if a lock is held, and fail -// if it is not held. -#define ASSERT_EXCLUSIVE_LOCK(...) \ - THREAD_ANNOTATION_ATTRIBUTE__(assert_exclusive_lock(__VA_ARGS__)) - -#define ASSERT_SHARED_LOCK(...) \ - THREAD_ANNOTATION_ATTRIBUTE__(assert_shared_lock(__VA_ARGS__)) - -// NO_THREAD_SAFETY_ANALYSIS -// -// Turns off thread safety checking within the body of a particular function. -// This annotation is used to mark functions that are known to be correct, but -// the locking behavior is more complicated than the analyzer can handle. -#define NO_THREAD_SAFETY_ANALYSIS \ - THREAD_ANNOTATION_ATTRIBUTE__(no_thread_safety_analysis) - -//------------------------------------------------------------------------------ -// Tool-Supplied Annotations -//------------------------------------------------------------------------------ - -// TS_UNCHECKED should be placed around lock expressions that are not valid -// C++ syntax, but which are present for documentation purposes. These -// annotations will be ignored by the analysis. -#define TS_UNCHECKED(x) "" - -// TS_FIXME is used to mark lock expressions that are not valid C++ syntax. -// It is used by automated tools to mark and disable invalid expressions. -// The annotation should either be fixed, or changed to TS_UNCHECKED. -#define TS_FIXME(x) "" - -// Like NO_THREAD_SAFETY_ANALYSIS, this turns off checking within the body of -// a particular function. However, this attribute is used to mark functions -// that are incorrect and need to be fixed. It is used by automated tools to -// avoid breaking the build when the analysis is updated. -// Code owners are expected to eventually fix the routine. -#define NO_THREAD_SAFETY_ANALYSIS_FIXME NO_THREAD_SAFETY_ANALYSIS - -// Similar to NO_THREAD_SAFETY_ANALYSIS_FIXME, this macro marks a GUARDED_BY -// annotation that needs to be fixed, because it is producing thread safety -// warning. It disables the GUARDED_BY. -#define GUARDED_BY_FIXME(x) - -// Disables warnings for a single read operation. This can be used to avoid -// warnings when it is known that the read is not actually involved in a race, -// but the compiler cannot confirm that. -#define TS_UNCHECKED_READ(x) thread_safety_analysis::ts_unchecked_read(x) - - -namespace thread_safety_analysis { - -// Takes a reference to a guarded data member, and returns an unguarded -// reference. -template -inline const T& ts_unchecked_read(const T& v) NO_THREAD_SAFETY_ANALYSIS { - return v; -} - -template -inline T& ts_unchecked_read(T& v) NO_THREAD_SAFETY_ANALYSIS { - return v; -} - -} // namespace thread_safety_analysis - -#endif // defined(ABSL_LEGACY_THREAD_ANNOTATIONS) - -#endif // ABSL_BASE_INTERNAL_THREAD_ANNOTATIONS_H_ diff --git a/absl/base/internal/thread_identity.cc b/absl/base/internal/thread_identity.cc index 252443e..0471a25 100644 --- a/absl/base/internal/thread_identity.cc +++ b/absl/base/internal/thread_identity.cc @@ -16,8 +16,12 @@ #if !defined(_WIN32) || defined(__MINGW32__) #include +#ifndef __wasi__ +// WASI does not provide this header, either way we disable use +// of signals with it below. #include #endif +#endif #include #include @@ -80,10 +84,12 @@ void SetCurrentThreadIdentity(ThreadIdentity* identity, absl::call_once(init_thread_identity_key_once, AllocateThreadIdentityKey, reclaimer); -#if defined(__EMSCRIPTEN__) || defined(__MINGW32__) || defined(__hexagon__) - // Emscripten and MinGW pthread implementations does not support signals. - // See https://kripken.github.io/emscripten-site/docs/porting/pthreads.html - // for more information. +#if defined(__wasi__) || defined(__EMSCRIPTEN__) || defined(__MINGW32__) || \ + defined(__hexagon__) + // Emscripten, WASI and MinGW pthread implementations does not support + // signals. See + // https://kripken.github.io/emscripten-site/docs/porting/pthreads.html for + // more information. pthread_setspecific(thread_identity_pthread_key, reinterpret_cast(identity)); #else diff --git a/absl/base/internal/unaligned_access.h b/absl/base/internal/unaligned_access.h index 093dd9b..4fea457 100644 --- a/absl/base/internal/unaligned_access.h +++ b/absl/base/internal/unaligned_access.h @@ -23,6 +23,7 @@ #include "absl/base/attributes.h" #include "absl/base/config.h" +#include "absl/base/nullability.h" // unaligned APIs @@ -35,29 +36,35 @@ namespace absl { ABSL_NAMESPACE_BEGIN namespace base_internal { -inline uint16_t UnalignedLoad16(const void *p) { +inline uint16_t UnalignedLoad16(absl::Nonnull p) { uint16_t t; memcpy(&t, p, sizeof t); return t; } -inline uint32_t UnalignedLoad32(const void *p) { +inline uint32_t UnalignedLoad32(absl::Nonnull p) { uint32_t t; memcpy(&t, p, sizeof t); return t; } -inline uint64_t UnalignedLoad64(const void *p) { +inline uint64_t UnalignedLoad64(absl::Nonnull p) { uint64_t t; memcpy(&t, p, sizeof t); return t; } -inline void UnalignedStore16(void *p, uint16_t v) { memcpy(p, &v, sizeof v); } +inline void UnalignedStore16(absl::Nonnull p, uint16_t v) { + memcpy(p, &v, sizeof v); +} -inline void UnalignedStore32(void *p, uint32_t v) { memcpy(p, &v, sizeof v); } +inline void UnalignedStore32(absl::Nonnull p, uint32_t v) { + memcpy(p, &v, sizeof v); +} -inline void UnalignedStore64(void *p, uint64_t v) { memcpy(p, &v, sizeof v); } +inline void UnalignedStore64(absl::Nonnull p, uint64_t v) { + memcpy(p, &v, sizeof v); +} } // namespace base_internal ABSL_NAMESPACE_END diff --git a/absl/base/log_severity.cc b/absl/base/log_severity.cc index 60a8fc1..8e7bbbc 100644 --- a/absl/base/log_severity.cc +++ b/absl/base/log_severity.cc @@ -17,6 +17,7 @@ #include #include "absl/base/attributes.h" +#include "absl/base/config.h" namespace absl { ABSL_NAMESPACE_BEGIN diff --git a/absl/base/log_severity.h b/absl/base/log_severity.h index 8bdca38..de9702a 100644 --- a/absl/base/log_severity.h +++ b/absl/base/log_severity.h @@ -64,6 +64,8 @@ ABSL_NAMESPACE_BEGIN // --my_log_level=info // --my_log_level=0 // +// `DFATAL` and `kLogDebugFatal` are similarly accepted. +// // Unparsing a flag produces the same result as `absl::LogSeverityName()` for // the standard levels and a base-ten integer otherwise. enum class LogSeverity : int { @@ -82,18 +84,28 @@ constexpr std::array LogSeverities() { absl::LogSeverity::kError, absl::LogSeverity::kFatal}}; } +// `absl::kLogDebugFatal` equals `absl::LogSeverity::kFatal` in debug builds +// (i.e. when `NDEBUG` is not defined) and `absl::LogSeverity::kError` +// otherwise. Avoid ODR-using this variable as it has internal linkage and thus +// distinct storage in different TUs. +#ifdef NDEBUG +static constexpr absl::LogSeverity kLogDebugFatal = absl::LogSeverity::kError; +#else +static constexpr absl::LogSeverity kLogDebugFatal = absl::LogSeverity::kFatal; +#endif + // LogSeverityName() // // Returns the all-caps string representation (e.g. "INFO") of the specified // severity level if it is one of the standard levels and "UNKNOWN" otherwise. constexpr const char* LogSeverityName(absl::LogSeverity s) { - return s == absl::LogSeverity::kInfo - ? "INFO" - : s == absl::LogSeverity::kWarning - ? "WARNING" - : s == absl::LogSeverity::kError - ? "ERROR" - : s == absl::LogSeverity::kFatal ? "FATAL" : "UNKNOWN"; + switch (s) { + case absl::LogSeverity::kInfo: return "INFO"; + case absl::LogSeverity::kWarning: return "WARNING"; + case absl::LogSeverity::kError: return "ERROR"; + case absl::LogSeverity::kFatal: return "FATAL"; + } + return "UNKNOWN"; } // NormalizeLogSeverity() @@ -101,9 +113,10 @@ constexpr const char* LogSeverityName(absl::LogSeverity s) { // Values less than `kInfo` normalize to `kInfo`; values greater than `kFatal` // normalize to `kError` (**NOT** `kFatal`). constexpr absl::LogSeverity NormalizeLogSeverity(absl::LogSeverity s) { - return s < absl::LogSeverity::kInfo - ? absl::LogSeverity::kInfo - : s > absl::LogSeverity::kFatal ? absl::LogSeverity::kError : s; + absl::LogSeverity n = s; + if (n < absl::LogSeverity::kInfo) n = absl::LogSeverity::kInfo; + if (n > absl::LogSeverity::kFatal) n = absl::LogSeverity::kError; + return n; } constexpr absl::LogSeverity NormalizeLogSeverity(int s) { return absl::NormalizeLogSeverity(static_cast(s)); diff --git a/absl/base/no_destructor.h b/absl/base/no_destructor.h new file mode 100644 index 0000000..d4b16a6 --- /dev/null +++ b/absl/base/no_destructor.h @@ -0,0 +1,217 @@ +// Copyright 2023 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: no_destructor.h +// ----------------------------------------------------------------------------- +// +// This header file defines the absl::NoDestructor wrapper for defining a +// static type that does not need to be destructed upon program exit. Instead, +// such an object survives during program exit (and can be safely accessed at +// any time). +// +// Objects of such type, if constructed safely and under the right conditions, +// provide two main benefits over other alternatives: +// +// * Global objects not normally allowed due to concerns of destruction order +// (i.e. no "complex globals") can be safely allowed, provided that such +// objects can be constant initialized. +// * Function scope static objects can be optimized to avoid heap allocation, +// pointer chasing, and allow lazy construction. +// +// See below for complete details. + + +#ifndef ABSL_BASE_NO_DESTRUCTOR_H_ +#define ABSL_BASE_NO_DESTRUCTOR_H_ + +#include +#include +#include + +#include "absl/base/config.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN + +// absl::NoDestructor +// +// NoDestructor is a wrapper around an object of type T that behaves as an +// object of type T but never calls T's destructor. NoDestructor makes it +// safer and/or more efficient to use such objects in static storage contexts: +// as global or function scope static variables. +// +// An instance of absl::NoDestructor has similar type semantics to an +// instance of T: +// +// * Constructs in the same manner as an object of type T through perfect +// forwarding. +// * Provides pointer/reference semantic access to the object of type T via +// `->`, `*`, and `get()`. +// (Note that `const NoDestructor` works like a pointer to const `T`.) +// +// An object of type NoDestructor should be defined in static storage: +// as either a global static object, or as a function scope static variable. +// +// Additionally, NoDestructor provides the following benefits: +// +// * Never calls T's destructor for the object +// * If the object is a function-local static variable, the type can be +// lazily constructed. +// +// An object of type NoDestructor is "trivially destructible" in the notion +// that its destructor is never run. Provided that an object of this type can be +// safely initialized and does not need to be cleaned up on program shutdown, +// NoDestructor allows you to define global static variables, since Google's +// C++ style guide ban on such objects doesn't apply to objects that are +// trivially destructible. +// +// Usage as Global Static Variables +// +// NoDestructor allows declaration of a global object with a non-trivial +// constructor in static storage without needing to add a destructor. +// However, such objects still need to worry about initialization order, so +// such objects should be const initialized: +// +// // Global or namespace scope. +// ABSL_CONST_INIT absl::NoDestructor reg{"foo", "bar", 8008}; +// +// Note that if your object already has a trivial destructor, you don't need to +// use NoDestructor. +// +// Usage as Function Scope Static Variables +// +// Function static objects will be lazily initialized within static storage: +// +// // Function scope. +// const std::string& MyString() { +// static const absl::NoDestructor x("foo"); +// return *x; +// } +// +// For function static variables, NoDestructor avoids heap allocation and can be +// inlined in static storage, resulting in exactly-once, thread-safe +// construction of an object, and very fast access thereafter (the cost is a few +// extra cycles). +// +// Using NoDestructor in this manner is generally better than other patterns +// which require pointer chasing: +// +// // Prefer using absl::NoDestructor instead for the static variable. +// const std::string& MyString() { +// static const std::string* x = new std::string("foo"); +// return *x; +// } +// +template +class NoDestructor { + public: + // Forwards arguments to the T's constructor: calls T(args...). + template &...), + void(NoDestructor&)>::value, + int>::type = 0> + explicit constexpr NoDestructor(Ts&&... args) + : impl_(std::forward(args)...) {} + + // Forwards copy and move construction for T. Enables usage like this: + // static NoDestructor> x{{{"1", "2", "3"}}}; + // static NoDestructor> x{{1, 2, 3}}; + explicit constexpr NoDestructor(const T& x) : impl_(x) {} + explicit constexpr NoDestructor(T&& x) + : impl_(std::move(x)) {} + + // No copying. + NoDestructor(const NoDestructor&) = delete; + NoDestructor& operator=(const NoDestructor&) = delete; + + // Pretend to be a smart pointer to T with deep constness. + // Never returns a null pointer. + T& operator*() { return *get(); } + T* operator->() { return get(); } + T* get() { return impl_.get(); } + const T& operator*() const { return *get(); } + const T* operator->() const { return get(); } + const T* get() const { return impl_.get(); } + + private: + class DirectImpl { + public: + template + explicit constexpr DirectImpl(Args&&... args) + : value_(std::forward(args)...) {} + const T* get() const { return &value_; } + T* get() { return &value_; } + + private: + T value_; + }; + + class PlacementImpl { + public: + template + explicit PlacementImpl(Args&&... args) { + new (&space_) T(std::forward(args)...); + } + const T* get() const { + return Launder(reinterpret_cast(&space_)); + } + T* get() { return Launder(reinterpret_cast(&space_)); } + + private: + template + static P* Launder(P* p) { +#if defined(__cpp_lib_launder) && __cpp_lib_launder >= 201606L + return std::launder(p); +#elif ABSL_HAVE_BUILTIN(__builtin_launder) + return __builtin_launder(p); +#else + // When `std::launder` or equivalent are not available, we rely on + // undefined behavior, which works as intended on Abseil's officially + // supported platforms as of Q3 2023. +#if defined(__GNUC__) && !defined(__clang__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wstrict-aliasing" +#endif + return p; +#if defined(__GNUC__) && !defined(__clang__) +#pragma GCC diagnostic pop +#endif +#endif + } + + alignas(T) unsigned char space_[sizeof(T)]; + }; + + // If the object is trivially destructible we use a member directly to avoid + // potential once-init runtime initialization. It somewhat defeats the + // purpose of NoDestructor in this case, but this makes the class more + // friendly to generic code. + std::conditional_t::value, DirectImpl, + PlacementImpl> + impl_; +}; + +#ifdef ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION +// Provide 'Class Template Argument Deduction': the type of NoDestructor's T +// will be the same type as the argument passed to NoDestructor's constructor. +template +NoDestructor(T) -> NoDestructor; +#endif // ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION + +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_NO_DESTRUCTOR_H_ diff --git a/absl/base/optimization.h b/absl/base/optimization.h index ad0121a..f985995 100644 --- a/absl/base/optimization.h +++ b/absl/base/optimization.h @@ -25,6 +25,7 @@ #include #include "absl/base/config.h" +#include "absl/base/options.h" // ABSL_BLOCK_TAIL_CALL_OPTIMIZATION // diff --git a/absl/base/options.h b/absl/base/options.h index f308e1b..d7bf8cf 100644 --- a/absl/base/options.h +++ b/absl/base/options.h @@ -176,6 +176,32 @@ #define ABSL_OPTION_USE_STD_VARIANT 2 +// ABSL_OPTION_USE_STD_ORDERING +// +// This option controls whether absl::{partial,weak,strong}_ordering are +// implemented as aliases to the std:: ordering types, or as an independent +// implementation. +// +// A value of 0 means to use Abseil's implementation. This requires only C++11 +// support, and is expected to work on every toolchain we support. +// +// A value of 1 means to use aliases. This requires that all code using Abseil +// is built in C++20 mode or later. +// +// A value of 2 means to detect the C++ version being used to compile Abseil, +// and use an alias only if working std:: ordering types are available. This +// option is useful when you are building your program from source. It should +// not be used otherwise -- for example, if you are distributing Abseil in a +// binary package manager -- since in mode 2, they will name different types, +// with different mangled names and binary layout, depending on the compiler +// flags passed by the end user. For more info, see +// https://abseil.io/about/design/dropin-types. +// +// User code should not inspect this macro. To check in the preprocessor if +// the ordering types are aliases of std:: ordering types, use the feature macro +// ABSL_USES_STD_ORDERING. + +#define ABSL_OPTION_USE_STD_ORDERING 2 // ABSL_OPTION_USE_INLINE_NAMESPACE // ABSL_OPTION_INLINE_NAMESPACE_NAME @@ -200,7 +226,7 @@ // allowed. #define ABSL_OPTION_USE_INLINE_NAMESPACE 1 -#define ABSL_OPTION_INLINE_NAMESPACE_NAME lts_20230802 +#define ABSL_OPTION_INLINE_NAMESPACE_NAME lts_20240116 // ABSL_OPTION_HARDENED // diff --git a/absl/base/prefetch.h b/absl/base/prefetch.h index de7a180..eb40a44 100644 --- a/absl/base/prefetch.h +++ b/absl/base/prefetch.h @@ -24,17 +24,19 @@ #ifndef ABSL_BASE_PREFETCH_H_ #define ABSL_BASE_PREFETCH_H_ +#include "absl/base/attributes.h" #include "absl/base/config.h" #if defined(ABSL_INTERNAL_HAVE_SSE) #include #endif -#if defined(_MSC_VER) && _MSC_VER >= 1900 && \ - (defined(_M_X64) || defined(_M_IX86)) +#if defined(_MSC_VER) #include +#if defined(ABSL_INTERNAL_HAVE_SSE) #pragma intrinsic(_mm_prefetch) #endif +#endif namespace absl { ABSL_NAMESPACE_BEGIN @@ -140,21 +142,24 @@ void PrefetchToLocalCacheForWrite(const void* addr); // See __builtin_prefetch: // https://gcc.gnu.org/onlinedocs/gcc/Other-Builtins.html. // -inline void PrefetchToLocalCache(const void* addr) { +ABSL_ATTRIBUTE_ALWAYS_INLINE inline void PrefetchToLocalCache( + const void* addr) { __builtin_prefetch(addr, 0, 3); } -inline void PrefetchToLocalCacheNta(const void* addr) { +ABSL_ATTRIBUTE_ALWAYS_INLINE inline void PrefetchToLocalCacheNta( + const void* addr) { __builtin_prefetch(addr, 0, 0); } -inline void PrefetchToLocalCacheForWrite(const void* addr) { +ABSL_ATTRIBUTE_ALWAYS_INLINE inline void PrefetchToLocalCacheForWrite( + const void* addr) { // [x86] gcc/clang don't generate PREFETCHW for __builtin_prefetch(.., 1) // unless -march=broadwell or newer; this is not generally the default, so we // manually emit prefetchw. PREFETCHW is recognized as a no-op on older Intel // processors and has been present on AMD processors since the K6-2. -#if defined(__x86_64__) - asm("prefetchw (%0)" : : "r"(addr)); +#if defined(__x86_64__) && !defined(__PRFCHW__) + asm("prefetchw %0" : : "m"(*reinterpret_cast(addr))); #else __builtin_prefetch(addr, 1, 3); #endif @@ -164,15 +169,18 @@ inline void PrefetchToLocalCacheForWrite(const void* addr) { #define ABSL_HAVE_PREFETCH 1 -inline void PrefetchToLocalCache(const void* addr) { +ABSL_ATTRIBUTE_ALWAYS_INLINE inline void PrefetchToLocalCache( + const void* addr) { _mm_prefetch(reinterpret_cast(addr), _MM_HINT_T0); } -inline void PrefetchToLocalCacheNta(const void* addr) { +ABSL_ATTRIBUTE_ALWAYS_INLINE inline void PrefetchToLocalCacheNta( + const void* addr) { _mm_prefetch(reinterpret_cast(addr), _MM_HINT_NTA); } -inline void PrefetchToLocalCacheForWrite(const void* addr) { +ABSL_ATTRIBUTE_ALWAYS_INLINE inline void PrefetchToLocalCacheForWrite( + const void* addr) { #if defined(_MM_HINT_ET0) _mm_prefetch(reinterpret_cast(addr), _MM_HINT_ET0); #elif !defined(_MSC_VER) && defined(__x86_64__) @@ -180,15 +188,18 @@ inline void PrefetchToLocalCacheForWrite(const void* addr) { // up, PREFETCHW is recognized as a no-op on older Intel processors // and has been present on AMD processors since the K6-2. We have this // disabled for MSVC compilers as this miscompiles on older MSVC compilers. - asm("prefetchw (%0)" : : "r"(addr)); + asm("prefetchw %0" : : "m"(*reinterpret_cast(addr))); #endif } #else -inline void PrefetchToLocalCache(const void* addr) {} -inline void PrefetchToLocalCacheNta(const void* addr) {} -inline void PrefetchToLocalCacheForWrite(const void* addr) {} +ABSL_ATTRIBUTE_ALWAYS_INLINE inline void PrefetchToLocalCache( + const void* addr) {} +ABSL_ATTRIBUTE_ALWAYS_INLINE inline void PrefetchToLocalCacheNta( + const void* addr) {} +ABSL_ATTRIBUTE_ALWAYS_INLINE inline void PrefetchToLocalCacheForWrite( + const void* addr) {} #endif diff --git a/absl/base/spinlock_test_common.cc b/absl/base/spinlock_test_common.cc index 52ecf58..e904715 100644 --- a/absl/base/spinlock_test_common.cc +++ b/absl/base/spinlock_test_common.cc @@ -51,6 +51,8 @@ struct SpinLockTest { static int64_t DecodeWaitCycles(uint32_t lock_value) { return SpinLock::DecodeWaitCycles(lock_value); } + + static bool IsCooperative(const SpinLock& l) { return l.IsCooperative(); } }; namespace { @@ -266,6 +268,17 @@ TEST(SpinLockWithThreads, DoesNotDeadlock) { base_internal::NumCPUs() * 2); } +TEST(SpinLockTest, IsCooperative) { + SpinLock default_constructor; + EXPECT_TRUE(SpinLockTest::IsCooperative(default_constructor)); + + SpinLock cooperative(base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL); + EXPECT_TRUE(SpinLockTest::IsCooperative(cooperative)); + + SpinLock kernel_only(base_internal::SCHEDULE_KERNEL_ONLY); + EXPECT_FALSE(SpinLockTest::IsCooperative(kernel_only)); +} + } // namespace } // namespace base_internal ABSL_NAMESPACE_END diff --git a/absl/base/thread_annotations.h b/absl/base/thread_annotations.h index bc8a620..4a3f3e3 100644 --- a/absl/base/thread_annotations.h +++ b/absl/base/thread_annotations.h @@ -36,8 +36,6 @@ #include "absl/base/attributes.h" #include "absl/base/config.h" -// TODO(mbonadei): Remove after the backward compatibility period. -#include "absl/base/internal/thread_annotations.h" // IWYU pragma: export // ABSL_GUARDED_BY() // diff --git a/absl/cleanup/BUILD.bazel b/absl/cleanup/BUILD.bazel index 2154d9f..984d571 100644 --- a/absl/cleanup/BUILD.bazel +++ b/absl/cleanup/BUILD.bazel @@ -19,7 +19,14 @@ load( "ABSL_TEST_COPTS", ) -package(default_visibility = ["//visibility:public"]) +package( + default_visibility = ["//visibility:public"], + features = [ + "header_modules", + "layering_check", + "parse_headers", + ], +) licenses(["notice"]) @@ -60,6 +67,7 @@ cc_test( ":cleanup", "//absl/base:config", "//absl/utility", + "@com_google_googletest//:gtest", "@com_google_googletest//:gtest_main", ], ) diff --git a/absl/container/BUILD.bazel b/absl/container/BUILD.bazel index f22da59..0ba2fa7 100644 --- a/absl/container/BUILD.bazel +++ b/absl/container/BUILD.bazel @@ -21,7 +21,14 @@ load( "ABSL_TEST_COPTS", ) -package(default_visibility = ["//visibility:public"]) +package( + default_visibility = ["//visibility:public"], + features = [ + "header_modules", + "layering_check", + "parse_headers", + ], +) licenses(["notice"]) @@ -47,6 +54,7 @@ cc_test( "//absl/types:any", "//absl/types:optional", "//absl/utility", + "@com_google_googletest//:gtest", "@com_google_googletest//:gtest_main", ], ) @@ -73,12 +81,13 @@ cc_test( copts = ABSL_TEST_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, deps = [ - ":counting_allocator", ":fixed_array", + ":test_allocator", "//absl/base:config", "//absl/base:exception_testing", "//absl/hash:hash_testing", "//absl/memory", + "@com_google_googletest//:gtest", "@com_google_googletest//:gtest_main", ], ) @@ -92,6 +101,7 @@ cc_test( ":fixed_array", "//absl/base:config", "//absl/base:exception_safety_testing", + "@com_google_googletest//:gtest", "@com_google_googletest//:gtest_main", ], ) @@ -116,6 +126,7 @@ cc_library( linkopts = ABSL_DEFAULT_LINKOPTS, deps = [ ":compressed_tuple", + "//absl/base:config", "//absl/base:core_headers", "//absl/memory", "//absl/meta:type_traits", @@ -139,13 +150,12 @@ cc_library( ) cc_library( - name = "counting_allocator", + name = "test_allocator", testonly = 1, - hdrs = ["internal/counting_allocator.h"], - copts = ABSL_DEFAULT_COPTS, + copts = ABSL_TEST_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, + textual_hdrs = ["internal/test_allocator.h"], visibility = ["//visibility:private"], - deps = ["//absl/base:config"], ) cc_test( @@ -154,8 +164,8 @@ cc_test( copts = ABSL_TEST_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, deps = [ - ":counting_allocator", ":inlined_vector", + ":test_allocator", ":test_instance_tracker", "//absl/base:config", "//absl/base:core_headers", @@ -164,6 +174,7 @@ cc_test( "//absl/log:check", "//absl/memory", "//absl/strings", + "@com_google_googletest//:gtest", "@com_google_googletest//:gtest_main", ], ) @@ -192,6 +203,7 @@ cc_test( ":inlined_vector", "//absl/base:config", "//absl/base:exception_safety_testing", + "@com_google_googletest//:gtest", "@com_google_googletest//:gtest_main", ], ) @@ -216,6 +228,7 @@ cc_test( linkopts = ABSL_DEFAULT_LINKOPTS, deps = [ ":test_instance_tracker", + "@com_google_googletest//:gtest", "@com_google_googletest//:gtest_main", ], ) @@ -256,7 +269,9 @@ cc_test( ":unordered_map_members_test", ":unordered_map_modifiers_test", "//absl/log:check", + "//absl/meta:type_traits", "//absl/types:any", + "@com_google_googletest//:gtest", "@com_google_googletest//:gtest_main", ], ) @@ -283,15 +298,18 @@ cc_test( linkopts = ABSL_DEFAULT_LINKOPTS, tags = ["no_test_loonix"], deps = [ + ":container_memory", ":flat_hash_set", ":hash_generator_testing", ":unordered_set_constructor_test", ":unordered_set_lookup_test", ":unordered_set_members_test", ":unordered_set_modifiers_test", + "//absl/base:config", "//absl/log:check", "//absl/memory", "//absl/strings", + "@com_google_googletest//:gtest", "@com_google_googletest//:gtest_main", ], ) @@ -326,6 +344,7 @@ cc_test( ":unordered_map_lookup_test", ":unordered_map_members_test", ":unordered_map_modifiers_test", + "@com_google_googletest//:gtest", "@com_google_googletest//:gtest_main", ], ) @@ -357,6 +376,7 @@ cc_test( ":unordered_set_lookup_test", ":unordered_set_members_test", ":unordered_set_modifiers_test", + "@com_google_googletest//:gtest", "@com_google_googletest//:gtest_main", ], ) @@ -383,7 +403,10 @@ cc_test( deps = [ ":container_memory", ":test_instance_tracker", + "//absl/base:no_destructor", + "//absl/meta:type_traits", "//absl/strings", + "@com_google_googletest//:gtest", "@com_google_googletest//:gtest_main", ], ) @@ -417,6 +440,7 @@ cc_test( "//absl/strings", "//absl/strings:cord", "//absl/strings:cord_test_helpers", + "@com_google_googletest//:gtest", "@com_google_googletest//:gtest_main", ], ) @@ -430,6 +454,7 @@ cc_library( linkopts = ABSL_DEFAULT_LINKOPTS, deps = [ ":hash_policy_testing", + "//absl/base:no_destructor", "//absl/memory", "//absl/meta:type_traits", "//absl/strings", @@ -455,6 +480,7 @@ cc_test( linkopts = ABSL_DEFAULT_LINKOPTS, deps = [ ":hash_policy_testing", + "@com_google_googletest//:gtest", "@com_google_googletest//:gtest_main", ], ) @@ -477,6 +503,7 @@ cc_test( linkopts = ABSL_DEFAULT_LINKOPTS, deps = [ ":hash_policy_traits", + "@com_google_googletest//:gtest", "@com_google_googletest//:gtest_main", ], ) @@ -497,6 +524,8 @@ cc_test( linkopts = ABSL_DEFAULT_LINKOPTS, deps = [ ":common_policy_traits", + "//absl/base:config", + "@com_google_googletest//:gtest", "@com_google_googletest//:gtest_main", ], ) @@ -560,6 +589,7 @@ cc_test( "//absl/synchronization", "//absl/synchronization:thread_pool", "//absl/time", + "@com_google_googletest//:gtest", "@com_google_googletest//:gtest_main", ], ) @@ -580,6 +610,8 @@ cc_test( deps = [ ":hash_policy_traits", ":node_slot_policy", + "//absl/base:config", + "@com_google_googletest//:gtest", "@com_google_googletest//:gtest_main", ], ) @@ -592,6 +624,8 @@ cc_library( deps = [ ":container_memory", ":raw_hash_set", + "//absl/base:config", + "//absl/base:core_headers", "//absl/base:throw_delegate", ], ) @@ -651,13 +685,19 @@ cc_test( ":hash_function_defaults", ":hash_policy_testing", ":hashtable_debug", + ":hashtablez_sampler", ":raw_hash_set", + ":test_allocator", "//absl/base", "//absl/base:config", "//absl/base:core_headers", "//absl/base:prefetch", + "//absl/hash", "//absl/log", + "//absl/memory", + "//absl/meta:type_traits", "//absl/strings", + "@com_google_googletest//:gtest", "@com_google_googletest//:gtest_main", ], ) @@ -694,10 +734,12 @@ cc_binary( ":hash_function_defaults", ":hashtable_debug", ":raw_hash_set", + "//absl/base:no_destructor", "//absl/random", "//absl/random:distributions", "//absl/strings", "//absl/strings:str_format", + "//absl/types:optional", ], ) @@ -710,7 +752,8 @@ cc_test( deps = [ ":raw_hash_set", ":tracked", - "//absl/base:core_headers", + "//absl/base:config", + "@com_google_googletest//:gtest", "@com_google_googletest//:gtest_main", ], ) @@ -723,6 +766,7 @@ cc_library( deps = [ "//absl/base:config", "//absl/base:core_headers", + "//absl/debugging:demangle_internal", "//absl/meta:type_traits", "//absl/strings", "//absl/types:span", @@ -741,9 +785,10 @@ cc_test( deps = [ ":layout", "//absl/base:config", - "//absl/base:core_headers", "//absl/log:check", "//absl/types:span", + "//absl/utility", + "@com_google_googletest//:gtest", "@com_google_googletest//:gtest_main", ], ) @@ -889,6 +934,7 @@ cc_test( ":unordered_set_lookup_test", ":unordered_set_members_test", ":unordered_set_modifiers_test", + "@com_google_googletest//:gtest", "@com_google_googletest//:gtest_main", ], ) @@ -904,6 +950,7 @@ cc_test( ":unordered_map_lookup_test", ":unordered_map_members_test", ":unordered_map_modifiers_test", + "@com_google_googletest//:gtest", "@com_google_googletest//:gtest_main", ], ) @@ -920,6 +967,7 @@ cc_test( ":flat_hash_set", ":node_hash_map", ":node_hash_set", + "@com_google_googletest//:gtest", "@com_google_googletest//:gtest_main", ], ) @@ -989,7 +1037,7 @@ cc_test( deps = [ ":btree", ":btree_test_common", - ":counting_allocator", + ":test_allocator", ":test_instance_tracker", "//absl/algorithm:container", "//absl/base:core_headers", @@ -1001,6 +1049,7 @@ cc_test( "//absl/strings", "//absl/types:compare", "//absl/types:optional", + "@com_google_googletest//:gtest", "@com_google_googletest//:gtest_main", ], ) @@ -1031,5 +1080,6 @@ cc_binary( "//absl/strings:str_format", "//absl/time", "@com_github_google_benchmark//:benchmark_main", + "@com_google_googletest//:gtest", ], ) diff --git a/absl/container/CMakeLists.txt b/absl/container/CMakeLists.txt index 39d95e0..128cc0e 100644 --- a/absl/container/CMakeLists.txt +++ b/absl/container/CMakeLists.txt @@ -77,13 +77,13 @@ absl_cc_test( absl::btree_test_common absl::compare absl::core_headers - absl::counting_allocator absl::flags absl::hash_testing absl::optional absl::random_random absl::raw_logging_internal absl::strings + absl::test_allocator absl::test_instance_tracker GTest::gmock_main ) @@ -145,11 +145,11 @@ absl_cc_test( ${ABSL_TEST_COPTS} DEPS absl::fixed_array - absl::counting_allocator absl::config absl::exception_testing absl::hash_testing absl::memory + absl::test_allocator GTest::gmock_main ) @@ -177,6 +177,7 @@ absl_cc_library( ${ABSL_DEFAULT_COPTS} DEPS absl::compressed_tuple + absl::config absl::core_headers absl::memory absl::span @@ -204,13 +205,14 @@ absl_cc_library( # Internal-only target, do not depend on directly. absl_cc_library( NAME - counting_allocator + test_allocator HDRS - "internal/counting_allocator.h" + "internal/test_allocator.h" COPTS ${ABSL_DEFAULT_COPTS} DEPS absl::config + GTest::gmock ) absl_cc_test( @@ -224,12 +226,12 @@ absl_cc_test( absl::check absl::config absl::core_headers - absl::counting_allocator absl::exception_testing absl::hash_testing absl::inlined_vector absl::memory absl::strings + absl::test_allocator absl::test_instance_tracker GTest::gmock_main ) @@ -304,6 +306,7 @@ absl_cc_test( absl::check absl::flat_hash_map absl::hash_generator_testing + absl::type_traits absl::unordered_map_constructor_test absl::unordered_map_lookup_test absl::unordered_map_members_test @@ -338,6 +341,8 @@ absl_cc_test( "-DUNORDERED_SET_CXX17" DEPS absl::check + absl::config + absl::container_memory absl::flat_hash_set absl::hash_generator_testing absl::memory @@ -445,8 +450,10 @@ absl_cc_test( ${ABSL_TEST_COPTS} DEPS absl::container_memory + absl::no_destructor absl::strings absl::test_instance_tracker + absl::type_traits GTest::gmock_main ) @@ -497,6 +504,7 @@ absl_cc_library( absl::hash_policy_testing absl::memory absl::meta + absl::no_destructor absl::strings TESTONLY ) @@ -575,6 +583,7 @@ absl_cc_test( ${ABSL_TEST_COPTS} DEPS absl::common_policy_traits + absl::config GTest::gmock_main ) @@ -658,6 +667,7 @@ absl_cc_test( COPTS ${ABSL_TEST_COPTS} DEPS + absl::config absl::hash_policy_traits absl::node_slot_policy GTest::gmock_main @@ -672,7 +682,9 @@ absl_cc_library( COPTS ${ABSL_DEFAULT_COPTS} DEPS + absl::config absl::container_memory + absl::core_headers absl::raw_hash_set absl::throw_delegate PUBLIC @@ -736,13 +748,18 @@ absl_cc_test( absl::core_headers absl::flat_hash_map absl::flat_hash_set + absl::hash absl::hash_function_defaults absl::hash_policy_testing absl::hashtable_debug + absl::hashtablez_sampler absl::log + absl::memory absl::prefetch absl::raw_hash_set absl::strings + absl::test_allocator + absl::type_traits GTest::gmock_main ) @@ -754,9 +771,9 @@ absl_cc_test( COPTS ${ABSL_TEST_COPTS} DEPS + absl::config absl::raw_hash_set absl::tracked - absl::core_headers GTest::gmock_main ) @@ -771,6 +788,7 @@ absl_cc_library( DEPS absl::config absl::core_headers + absl::debugging_internal absl::meta absl::strings absl::span @@ -789,8 +807,8 @@ absl_cc_test( absl::layout absl::check absl::config - absl::core_headers absl::span + absl::utility GTest::gmock_main ) diff --git a/absl/container/btree_map.h b/absl/container/btree_map.h index cd3ee2b..0f62f0b 100644 --- a/absl/container/btree_map.h +++ b/absl/container/btree_map.h @@ -53,6 +53,7 @@ #ifndef ABSL_CONTAINER_BTREE_MAP_H_ #define ABSL_CONTAINER_BTREE_MAP_H_ +#include "absl/base/attributes.h" #include "absl/container/internal/btree.h" // IWYU pragma: export #include "absl/container/internal/btree_container.h" // IWYU pragma: export @@ -864,7 +865,8 @@ struct map_params : common_params - static auto key(const V &value) -> decltype(value.first) { + static auto key(const V &value ABSL_ATTRIBUTE_LIFETIME_BOUND) + -> decltype((value.first)) { return value.first; } static const Key &key(const slot_type *s) { return slot_policy::key(s); } diff --git a/absl/container/flat_hash_map.h b/absl/container/flat_hash_map.h index 8f4d993..acd013b 100644 --- a/absl/container/flat_hash_map.h +++ b/absl/container/flat_hash_map.h @@ -64,7 +64,7 @@ struct FlatHashMapPolicy; // `insert()`, provided that the map is provided a compatible heterogeneous // hashing function and equality operator. // * Invalidates any references and pointers to elements within the table after -// `rehash()`. +// `rehash()` and when the table is moved. // * Contains a `capacity()` member function indicating the number of element // slots (open, deleted, and empty) within the hash map. // * Returns `void` from the `erase(iterator)` overload. @@ -579,9 +579,9 @@ struct FlatHashMapPolicy { } template - static void transfer(Allocator* alloc, slot_type* new_slot, + static auto transfer(Allocator* alloc, slot_type* new_slot, slot_type* old_slot) { - slot_policy::transfer(alloc, new_slot, old_slot); + return slot_policy::transfer(alloc, new_slot, old_slot); } template diff --git a/absl/container/flat_hash_set.h b/absl/container/flat_hash_set.h index c789c7e..a94a82a 100644 --- a/absl/container/flat_hash_set.h +++ b/absl/container/flat_hash_set.h @@ -60,7 +60,7 @@ struct FlatHashSetPolicy; // that the set is provided a compatible heterogeneous hashing function and // equality operator. // * Invalidates any references and pointers to elements within the table after -// `rehash()`. +// `rehash()` and when the table is moved. // * Contains a `capacity()` member function indicating the number of element // slots (open, deleted, and empty) within the hash set. // * Returns `void` from the `erase(iterator)` overload. diff --git a/absl/container/internal/btree.h b/absl/container/internal/btree.h index 569faa0..91df57a 100644 --- a/absl/container/internal/btree.h +++ b/absl/container/internal/btree.h @@ -79,6 +79,7 @@ namespace container_internal { #ifdef ABSL_BTREE_ENABLE_GENERATIONS #error ABSL_BTREE_ENABLE_GENERATIONS cannot be directly set #elif defined(ABSL_HAVE_ADDRESS_SANITIZER) || \ + defined(ABSL_HAVE_HWADDRESS_SANITIZER) || \ defined(ABSL_HAVE_MEMORY_SANITIZER) // When compiled in sanitizer mode, we add generation integers to the nodes and // iterators. When iterators are used, we validate that the container has not @@ -572,13 +573,6 @@ class btree_node { btree_node(btree_node const &) = delete; btree_node &operator=(btree_node const &) = delete; - // Public for EmptyNodeType. - constexpr static size_type Alignment() { - static_assert(LeafLayout(1).Alignment() == InternalLayout().Alignment(), - "Alignment of all nodes must be equal."); - return InternalLayout().Alignment(); - } - protected: btree_node() = default; @@ -653,6 +647,12 @@ class btree_node { return InternalLayout().AllocSize(); } + constexpr static size_type Alignment() { + static_assert(LeafLayout(1).Alignment() == InternalLayout().Alignment(), + "Alignment of all nodes must be equal."); + return InternalLayout().Alignment(); + } + // N is the index of the type in the Layout definition. // ElementType is the Nth type in the Layout definition. template @@ -1122,8 +1122,11 @@ class btree_iterator : private btree_iterator_generation_info { using const_reference = typename params_type::const_reference; using slot_type = typename params_type::slot_type; - using iterator = - btree_iterator; + // In sets, all iterators are const. + using iterator = absl::conditional_t< + is_map_container::value, + btree_iterator, + btree_iterator>; using const_iterator = btree_iterator; @@ -1318,7 +1321,7 @@ class btree { // We use a static empty node for the root/leftmost/rightmost of empty btrees // in order to avoid branching in begin()/end(). - struct alignas(node_type::Alignment()) EmptyNodeType : node_type { + struct EmptyNodeType : node_type { using field_type = typename node_type::field_type; node_type *parent; #ifdef ABSL_BTREE_ENABLE_GENERATIONS @@ -1331,25 +1334,12 @@ class btree { // as a leaf node). max_count() is never called when the tree is empty. field_type max_count = node_type::kInternalNodeMaxCount + 1; -#ifdef _MSC_VER - // MSVC has constexpr code generations bugs here. - EmptyNodeType() : parent(this) {} -#else - explicit constexpr EmptyNodeType(node_type *p) : parent(p) {} -#endif + constexpr EmptyNodeType() : parent(this) {} }; static node_type *EmptyNode() { -#ifdef _MSC_VER - static EmptyNodeType *empty_node = new EmptyNodeType; - // This assert fails on some other construction methods. - assert(empty_node->parent == empty_node); - return empty_node; -#else - static constexpr EmptyNodeType empty_node( - const_cast(&empty_node)); + alignas(node_type::Alignment()) static constexpr EmptyNodeType empty_node; return const_cast(&empty_node); -#endif } enum : uint32_t { @@ -2420,7 +2410,7 @@ auto btree

::operator=(btree &&other) noexcept -> btree & { using std::swap; if (absl::allocator_traits< - allocator_type>::propagate_on_container_copy_assignment::value) { + allocator_type>::propagate_on_container_move_assignment::value) { swap(root_, other.root_); // Note: `rightmost_` also contains the allocator and the key comparator. swap(rightmost_, other.rightmost_); @@ -2534,6 +2524,10 @@ auto btree

::rebalance_after_delete(iterator iter) -> iterator { return res; } +// Note: we tried implementing this more efficiently by erasing all of the +// elements in [begin, end) at once and then doing rebalancing once at the end +// (rather than interleaving deletion and rebalancing), but that adds a lot of +// complexity, which seems to outweigh the performance win. template auto btree

::erase_range(iterator begin, iterator end) -> std::pair { @@ -2863,7 +2857,8 @@ inline auto btree

::internal_emplace(iterator iter, Args &&...args) } } (void)replaced_node; -#ifdef ABSL_HAVE_ADDRESS_SANITIZER +#if defined(ABSL_HAVE_ADDRESS_SANITIZER) || \ + defined(ABSL_HAVE_HWADDRESS_SANITIZER) if (!replaced_node) { assert(iter.node_->is_leaf()); if (iter.node_->is_root()) { diff --git a/absl/container/internal/common_policy_traits.h b/absl/container/internal/common_policy_traits.h index 3558a54..57eac67 100644 --- a/absl/container/internal/common_policy_traits.h +++ b/absl/container/internal/common_policy_traits.h @@ -93,11 +93,13 @@ struct common_policy_traits { struct Rank0 : Rank1 {}; // Use auto -> decltype as an enabler. + // P::transfer returns std::true_type if transfer uses memcpy (e.g. in + // node_slot_policy). template static auto transfer_impl(Alloc* alloc, slot_type* new_slot, slot_type* old_slot, Rank0) - -> decltype((void)P::transfer(alloc, new_slot, old_slot)) { - P::transfer(alloc, new_slot, old_slot); + -> decltype(P::transfer(alloc, new_slot, old_slot)) { + return P::transfer(alloc, new_slot, old_slot); } #if defined(__cpp_lib_launder) && __cpp_lib_launder >= 201606 // This overload returns true_type for the trait below. diff --git a/absl/container/internal/container_memory.h b/absl/container/internal/container_memory.h index f59ca4e..3262d4e 100644 --- a/absl/container/internal/container_memory.h +++ b/absl/container/internal/container_memory.h @@ -122,10 +122,10 @@ auto TupleRefImpl(T&& t, absl::index_sequence) // Returns a tuple of references to the elements of the input tuple. T must be a // tuple. template -auto TupleRef(T&& t) -> decltype( - TupleRefImpl(std::forward(t), - absl::make_index_sequence< - std::tuple_size::type>::value>())) { +auto TupleRef(T&& t) -> decltype(TupleRefImpl( + std::forward(t), + absl::make_index_sequence< + std::tuple_size::type>::value>())) { return TupleRefImpl( std::forward(t), absl::make_index_sequence< @@ -156,8 +156,8 @@ void ConstructFromTuple(Alloc* alloc, T* ptr, Tuple&& t) { // Constructs T using the args specified in the tuple and calls F with the // constructed value. template -decltype(std::declval()(std::declval())) WithConstructed( - Tuple&& t, F&& f) { +decltype(std::declval()(std::declval())) WithConstructed(Tuple&& t, + F&& f) { return memory_internal::WithConstructedImpl( std::forward(t), absl::make_index_sequence< @@ -423,16 +423,19 @@ struct map_slot_policy { } template - static void transfer(Allocator* alloc, slot_type* new_slot, + static auto transfer(Allocator* alloc, slot_type* new_slot, slot_type* old_slot) { + auto is_relocatable = + typename absl::is_trivially_relocatable::type(); + emplace(new_slot); #if defined(__cpp_lib_launder) && __cpp_lib_launder >= 201606 - if (absl::is_trivially_relocatable()) { + if (is_relocatable) { // TODO(b/247130232,b/251814870): remove casts after fixing warnings. std::memcpy(static_cast(std::launder(&new_slot->value)), static_cast(&old_slot->value), sizeof(value_type)); - return; + return is_relocatable; } #endif @@ -444,6 +447,7 @@ struct map_slot_policy { std::move(old_slot->value)); } destroy(alloc, old_slot); + return is_relocatable; } }; diff --git a/absl/container/internal/counting_allocator.h b/absl/container/internal/counting_allocator.h deleted file mode 100644 index 66068a5..0000000 --- a/absl/container/internal/counting_allocator.h +++ /dev/null @@ -1,122 +0,0 @@ -// Copyright 2018 The Abseil Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#ifndef ABSL_CONTAINER_INTERNAL_COUNTING_ALLOCATOR_H_ -#define ABSL_CONTAINER_INTERNAL_COUNTING_ALLOCATOR_H_ - -#include -#include - -#include "absl/base/config.h" - -namespace absl { -ABSL_NAMESPACE_BEGIN -namespace container_internal { - -// This is a stateful allocator, but the state lives outside of the -// allocator (in whatever test is using the allocator). This is odd -// but helps in tests where the allocator is propagated into nested -// containers - that chain of allocators uses the same state and is -// thus easier to query for aggregate allocation information. -template -class CountingAllocator { - public: - using Allocator = std::allocator; - using AllocatorTraits = std::allocator_traits; - using value_type = typename AllocatorTraits::value_type; - using pointer = typename AllocatorTraits::pointer; - using const_pointer = typename AllocatorTraits::const_pointer; - using size_type = typename AllocatorTraits::size_type; - using difference_type = typename AllocatorTraits::difference_type; - - CountingAllocator() = default; - explicit CountingAllocator(int64_t* bytes_used) : bytes_used_(bytes_used) {} - CountingAllocator(int64_t* bytes_used, int64_t* instance_count) - : bytes_used_(bytes_used), instance_count_(instance_count) {} - - template - CountingAllocator(const CountingAllocator& x) - : bytes_used_(x.bytes_used_), instance_count_(x.instance_count_) {} - - pointer allocate( - size_type n, - typename AllocatorTraits::const_void_pointer hint = nullptr) { - Allocator allocator; - pointer ptr = AllocatorTraits::allocate(allocator, n, hint); - if (bytes_used_ != nullptr) { - *bytes_used_ += n * sizeof(T); - } - return ptr; - } - - void deallocate(pointer p, size_type n) { - Allocator allocator; - AllocatorTraits::deallocate(allocator, p, n); - if (bytes_used_ != nullptr) { - *bytes_used_ -= n * sizeof(T); - } - } - - template - void construct(U* p, Args&&... args) { - Allocator allocator; - AllocatorTraits::construct(allocator, p, std::forward(args)...); - if (instance_count_ != nullptr) { - *instance_count_ += 1; - } - } - - template - void destroy(U* p) { - Allocator allocator; - // Ignore GCC warning bug. -#if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(12, 0) -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wuse-after-free" -#endif - AllocatorTraits::destroy(allocator, p); -#if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(12, 0) -#pragma GCC diagnostic pop -#endif - if (instance_count_ != nullptr) { - *instance_count_ -= 1; - } - } - - template - class rebind { - public: - using other = CountingAllocator; - }; - - friend bool operator==(const CountingAllocator& a, - const CountingAllocator& b) { - return a.bytes_used_ == b.bytes_used_ && - a.instance_count_ == b.instance_count_; - } - - friend bool operator!=(const CountingAllocator& a, - const CountingAllocator& b) { - return !(a == b); - } - - int64_t* bytes_used_ = nullptr; - int64_t* instance_count_ = nullptr; -}; - -} // namespace container_internal -ABSL_NAMESPACE_END -} // namespace absl - -#endif // ABSL_CONTAINER_INTERNAL_COUNTING_ALLOCATOR_H_ diff --git a/absl/container/internal/hash_generator_testing.cc b/absl/container/internal/hash_generator_testing.cc index 59cc5aa..e89dfdb 100644 --- a/absl/container/internal/hash_generator_testing.cc +++ b/absl/container/internal/hash_generator_testing.cc @@ -16,6 +16,8 @@ #include +#include "absl/base/no_destructor.h" + namespace absl { ABSL_NAMESPACE_BEGIN namespace container_internal { @@ -41,11 +43,11 @@ class RandomDeviceSeedSeq { } // namespace std::mt19937_64* GetSharedRng() { - static auto* rng = [] { + static absl::NoDestructor rng([] { RandomDeviceSeedSeq seed_seq; - return new std::mt19937_64(seed_seq); - }(); - return rng; + return std::mt19937_64(seed_seq); + }()); + return rng.get(); } std::string Generator::operator()() const { @@ -59,7 +61,7 @@ std::string Generator::operator()() const { } absl::string_view Generator::operator()() const { - static auto* arena = new std::deque(); + static absl::NoDestructor> arena; // NOLINTNEXTLINE(runtime/int) std::uniform_int_distribution chars(0x20, 0x7E); arena->emplace_back(); diff --git a/absl/container/internal/hashtable_debug.h b/absl/container/internal/hashtable_debug.h index 19d5212..c79c1a9 100644 --- a/absl/container/internal/hashtable_debug.h +++ b/absl/container/internal/hashtable_debug.h @@ -95,14 +95,6 @@ size_t AllocatedByteSize(const C& c) { HashtableDebugAccess::AllocatedByteSize(c); } -// Returns a tight lower bound for AllocatedByteSize(c) where `c` is of type `C` -// and `c.size()` is equal to `num_elements`. -template -size_t LowerBoundAllocatedByteSize(size_t num_elements) { - return absl::container_internal::hashtable_debug_internal:: - HashtableDebugAccess::LowerBoundAllocatedByteSize(num_elements); -} - } // namespace container_internal ABSL_NAMESPACE_END } // namespace absl diff --git a/absl/container/internal/hashtablez_sampler.h b/absl/container/internal/hashtablez_sampler.h index d8fd8f3..e41ee2d 100644 --- a/absl/container/internal/hashtablez_sampler.h +++ b/absl/container/internal/hashtablez_sampler.h @@ -137,18 +137,7 @@ class HashtablezInfoHandle { UnsampleSlow(info_); } - HashtablezInfoHandle(const HashtablezInfoHandle&) = delete; - HashtablezInfoHandle& operator=(const HashtablezInfoHandle&) = delete; - - HashtablezInfoHandle(HashtablezInfoHandle&& o) noexcept - : info_(absl::exchange(o.info_, nullptr)) {} - HashtablezInfoHandle& operator=(HashtablezInfoHandle&& o) noexcept { - if (ABSL_PREDICT_FALSE(info_ != nullptr)) { - UnsampleSlow(info_); - } - info_ = absl::exchange(o.info_, nullptr); - return *this; - } + inline bool IsSampled() const { return ABSL_PREDICT_FALSE(info_ != nullptr); } inline void RecordStorageChanged(size_t size, size_t capacity) { if (ABSL_PREDICT_TRUE(info_ == nullptr)) return; @@ -198,6 +187,7 @@ class HashtablezInfoHandle { explicit HashtablezInfoHandle(std::nullptr_t) {} inline void Unregister() {} + inline bool IsSampled() const { return false; } inline void RecordStorageChanged(size_t /*size*/, size_t /*capacity*/) {} inline void RecordRehash(size_t /*total_probe_length*/) {} inline void RecordReservation(size_t /*target_capacity*/) {} diff --git a/absl/container/internal/inlined_vector.h b/absl/container/internal/inlined_vector.h index b2a602d..0eb9c34 100644 --- a/absl/container/internal/inlined_vector.h +++ b/absl/container/internal/inlined_vector.h @@ -26,6 +26,7 @@ #include #include "absl/base/attributes.h" +#include "absl/base/config.h" #include "absl/base/macros.h" #include "absl/container/internal/compressed_tuple.h" #include "absl/memory/memory.h" @@ -384,7 +385,17 @@ class Storage { bool GetIsAllocated() const { return GetSizeAndIsAllocated() & 1; } - Pointer GetAllocatedData() { return data_.allocated.allocated_data; } + Pointer GetAllocatedData() { + // GCC 12 has a false-positive -Wmaybe-uninitialized warning here. +#if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(12, 0) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wmaybe-uninitialized" +#endif + return data_.allocated.allocated_data; +#if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(12, 0) +#pragma GCC diagnostic pop +#endif + } ConstPointer GetAllocatedData() const { return data_.allocated.allocated_data; diff --git a/absl/container/internal/layout.h b/absl/container/internal/layout.h index a59a243..a4ba610 100644 --- a/absl/container/internal/layout.h +++ b/absl/container/internal/layout.h @@ -55,7 +55,7 @@ // `Partial()` comes in handy when the array sizes are embedded into the // allocation. // -// // size_t[1] containing N, size_t[1] containing M, double[N], int[M]. +// // size_t[0] containing N, size_t[1] containing M, double[N], int[M]. // using L = Layout; // // unsigned char* Allocate(size_t n, size_t m) { @@ -172,6 +172,7 @@ #include #include "absl/base/config.h" +#include "absl/debugging/internal/demangle.h" #include "absl/meta/type_traits.h" #include "absl/strings/str_cat.h" #include "absl/types/span.h" @@ -181,14 +182,6 @@ #include #endif -#if defined(__GXX_RTTI) -#define ABSL_INTERNAL_HAS_CXA_DEMANGLE -#endif - -#ifdef ABSL_INTERNAL_HAS_CXA_DEMANGLE -#include -#endif - namespace absl { ABSL_NAMESPACE_BEGIN namespace container_internal { @@ -294,19 +287,11 @@ constexpr size_t Max(size_t a, size_t b, Ts... rest) { template std::string TypeName() { std::string out; - int status = 0; - char* demangled = nullptr; -#ifdef ABSL_INTERNAL_HAS_CXA_DEMANGLE - demangled = abi::__cxa_demangle(typeid(T).name(), nullptr, nullptr, &status); -#endif - if (status == 0 && demangled != nullptr) { // Demangling succeeded. - absl::StrAppend(&out, "<", demangled, ">"); - free(demangled); - } else { -#if defined(__GXX_RTTI) || defined(_CPPRTTI) - absl::StrAppend(&out, "<", typeid(T).name(), ">"); +#if ABSL_INTERNAL_HAS_RTTI + absl::StrAppend(&out, "<", + absl::debugging_internal::DemangleString(typeid(T).name()), + ">"); #endif - } return out; } diff --git a/absl/container/internal/node_slot_policy.h b/absl/container/internal/node_slot_policy.h index baba574..3f1874d 100644 --- a/absl/container/internal/node_slot_policy.h +++ b/absl/container/internal/node_slot_policy.h @@ -62,9 +62,12 @@ struct node_slot_policy { Policy::delete_element(alloc, *slot); } + // Returns true_type to indicate that transfer can use memcpy. template - static void transfer(Alloc*, slot_type* new_slot, slot_type* old_slot) { + static std::true_type transfer(Alloc*, slot_type* new_slot, + slot_type* old_slot) { *new_slot = *old_slot; + return {}; } static size_t space_used(const slot_type* slot) { diff --git a/absl/container/internal/raw_hash_map.h b/absl/container/internal/raw_hash_map.h index 2d5a871..97182bc 100644 --- a/absl/container/internal/raw_hash_map.h +++ b/absl/container/internal/raw_hash_map.h @@ -19,6 +19,8 @@ #include #include +#include "absl/base/attributes.h" +#include "absl/base/config.h" #include "absl/base/internal/throw_delegate.h" #include "absl/container/internal/container_memory.h" #include "absl/container/internal/raw_hash_set.h" // IWYU pragma: export @@ -175,13 +177,20 @@ class raw_hash_map : public raw_hash_set { template MappedReference

operator[](key_arg&& key) ABSL_ATTRIBUTE_LIFETIME_BOUND { - return Policy::value(&*try_emplace(std::forward(key)).first); + // It is safe to use unchecked_deref here because try_emplace + // will always return an iterator pointing to a valid item in the table, + // since it inserts if nothing is found for the given key. + return Policy::value( + &this->unchecked_deref(try_emplace(std::forward(key)).first)); } template MappedReference

operator[](const key_arg& key) ABSL_ATTRIBUTE_LIFETIME_BOUND { - return Policy::value(&*try_emplace(key).first); + // It is safe to use unchecked_deref here because try_emplace + // will always return an iterator pointing to a valid item in the table, + // since it inserts if nothing is found for the given key. + return Policy::value(&this->unchecked_deref(try_emplace(key).first)); } private: diff --git a/absl/container/internal/raw_hash_set.cc b/absl/container/internal/raw_hash_set.cc index 2ff95b6..9f8ea51 100644 --- a/absl/container/internal/raw_hash_set.cc +++ b/absl/container/internal/raw_hash_set.cc @@ -17,11 +17,13 @@ #include #include #include +#include #include #include "absl/base/attributes.h" #include "absl/base/config.h" #include "absl/base/dynamic_annotations.h" +#include "absl/container/internal/container_memory.h" #include "absl/hash/hash.h" namespace absl { @@ -67,6 +69,16 @@ inline size_t RandomSeed() { return value ^ static_cast(reinterpret_cast(&counter)); } +bool ShouldRehashForBugDetection(const ctrl_t* ctrl, size_t capacity) { + // Note: we can't use the abseil-random library because abseil-random + // depends on swisstable. We want to return true with probability + // `min(1, RehashProbabilityConstant() / capacity())`. In order to do this, + // we probe based on a random hash and see if the offset is less than + // RehashProbabilityConstant(). + return probe(ctrl, capacity, absl::HashOf(RandomSeed())).offset() < + RehashProbabilityConstant(); +} + } // namespace GenerationType* EmptyGeneration() { @@ -84,13 +96,12 @@ bool CommonFieldsGenerationInfoEnabled:: size_t capacity) const { if (reserved_growth_ == kReservedGrowthJustRanOut) return true; if (reserved_growth_ > 0) return false; - // Note: we can't use the abseil-random library because abseil-random - // depends on swisstable. We want to return true with probability - // `min(1, RehashProbabilityConstant() / capacity())`. In order to do this, - // we probe based on a random hash and see if the offset is less than - // RehashProbabilityConstant(). - return probe(ctrl, capacity, absl::HashOf(RandomSeed())).offset() < - RehashProbabilityConstant(); + return ShouldRehashForBugDetection(ctrl, capacity); +} + +bool CommonFieldsGenerationInfoEnabled::should_rehash_for_bug_detection_on_move( + const ctrl_t* ctrl, size_t capacity) const { + return ShouldRehashForBugDetection(ctrl, capacity); } bool ShouldInsertBackwards(size_t hash, const ctrl_t* ctrl) { @@ -117,14 +128,6 @@ FindInfo find_first_non_full_outofline(const CommonFields& common, return find_first_non_full(common, hash); } -// Returns the address of the ith slot in slots where each slot occupies -// slot_size. -static inline void* SlotAddress(void* slot_array, size_t slot, - size_t slot_size) { - return reinterpret_cast(reinterpret_cast(slot_array) + - (slot * slot_size)); -} - // Returns the address of the slot just after slot assuming each slot has the // specified size. static inline void* NextSlot(void* slot, size_t slot_size) { @@ -218,26 +221,35 @@ void DropDeletesWithoutResize(CommonFields& common, common.infoz().RecordRehash(total_probe_length); } -void EraseMetaOnly(CommonFields& c, ctrl_t* it, size_t slot_size) { - assert(IsFull(*it) && "erasing a dangling iterator"); - c.set_size(c.size() - 1); - const auto index = static_cast(it - c.control()); +static bool WasNeverFull(CommonFields& c, size_t index) { + if (is_single_group(c.capacity())) { + return true; + } const size_t index_before = (index - Group::kWidth) & c.capacity(); - const auto empty_after = Group(it).MaskEmpty(); + const auto empty_after = Group(c.control() + index).MaskEmpty(); const auto empty_before = Group(c.control() + index_before).MaskEmpty(); // We count how many consecutive non empties we have to the right and to the // left of `it`. If the sum is >= kWidth then there is at least one probe // window that might have seen a full group. - bool was_never_full = empty_before && empty_after && - static_cast(empty_after.TrailingZeros()) + - empty_before.LeadingZeros() < - Group::kWidth; - - SetCtrl(c, index, was_never_full ? ctrl_t::kEmpty : ctrl_t::kDeleted, - slot_size); - c.set_growth_left(c.growth_left() + (was_never_full ? 1 : 0)); + return empty_before && empty_after && + static_cast(empty_after.TrailingZeros()) + + empty_before.LeadingZeros() < + Group::kWidth; +} + +void EraseMetaOnly(CommonFields& c, size_t index, size_t slot_size) { + assert(IsFull(c.control()[index]) && "erasing a dangling iterator"); + c.decrement_size(); c.infoz().RecordErase(); + + if (WasNeverFull(c, index)) { + SetCtrl(c, index, ctrl_t::kEmpty, slot_size); + c.set_growth_left(c.growth_left() + 1); + return; + } + + SetCtrl(c, index, ctrl_t::kDeleted, slot_size); } void ClearBackingArray(CommonFields& c, const PolicyFunctions& policy, @@ -245,19 +257,124 @@ void ClearBackingArray(CommonFields& c, const PolicyFunctions& policy, c.set_size(0); if (reuse) { ResetCtrl(c, policy.slot_size); + ResetGrowthLeft(c); c.infoz().RecordStorageChanged(0, c.capacity()); } else { + // We need to record infoz before calling dealloc, which will unregister + // infoz. + c.infoz().RecordClearedReservation(); + c.infoz().RecordStorageChanged(0, 0); (*policy.dealloc)(c, policy); c.set_control(EmptyGroup()); c.set_generation_ptr(EmptyGeneration()); c.set_slots(nullptr); c.set_capacity(0); - c.infoz().RecordClearedReservation(); - assert(c.size() == 0); - c.infoz().RecordStorageChanged(0, 0); } } +void HashSetResizeHelper::GrowIntoSingleGroupShuffleControlBytes( + ctrl_t* new_ctrl, size_t new_capacity) const { + assert(is_single_group(new_capacity)); + constexpr size_t kHalfWidth = Group::kWidth / 2; + assert(old_capacity_ < kHalfWidth); + + const size_t half_old_capacity = old_capacity_ / 2; + + // NOTE: operations are done with compile time known size = kHalfWidth. + // Compiler optimizes that into single ASM operation. + + // Copy second half of bytes to the beginning. + // We potentially copy more bytes in order to have compile time known size. + // Mirrored bytes from the old_ctrl_ will also be copied. + // In case of old_capacity_ == 3, we will copy 1st element twice. + // Examples: + // old_ctrl = 0S0EEEEEEE... + // new_ctrl = S0EEEEEEEE... + // + // old_ctrl = 01S01EEEEE... + // new_ctrl = 1S01EEEEEE... + // + // old_ctrl = 0123456S0123456EE... + // new_ctrl = 456S0123?????????... + std::memcpy(new_ctrl, old_ctrl_ + half_old_capacity + 1, kHalfWidth); + // Clean up copied kSentinel from old_ctrl. + new_ctrl[half_old_capacity] = ctrl_t::kEmpty; + + // Clean up damaged or uninitialized bytes. + + // Clean bytes after the intended size of the copy. + // Example: + // new_ctrl = 1E01EEEEEEE???? + // *new_ctrl= 1E0EEEEEEEE???? + // position / + std::memset(new_ctrl + old_capacity_ + 1, static_cast(ctrl_t::kEmpty), + kHalfWidth); + // Clean non-mirrored bytes that are not initialized. + // For small old_capacity that may be inside of mirrored bytes zone. + // Examples: + // new_ctrl = 1E0EEEEEEEE??????????.... + // *new_ctrl= 1E0EEEEEEEEEEEEE?????.... + // position / + // + // new_ctrl = 456E0123???????????... + // *new_ctrl= 456E0123EEEEEEEE???... + // position / + std::memset(new_ctrl + kHalfWidth, static_cast(ctrl_t::kEmpty), + kHalfWidth); + // Clean last mirrored bytes that are not initialized + // and will not be overwritten by mirroring. + // Examples: + // new_ctrl = 1E0EEEEEEEEEEEEE???????? + // *new_ctrl= 1E0EEEEEEEEEEEEEEEEEEEEE + // position S / + // + // new_ctrl = 456E0123EEEEEEEE??????????????? + // *new_ctrl= 456E0123EEEEEEEE???????EEEEEEEE + // position S / + std::memset(new_ctrl + new_capacity + kHalfWidth, + static_cast(ctrl_t::kEmpty), kHalfWidth); + + // Create mirrored bytes. old_capacity_ < kHalfWidth + // Example: + // new_ctrl = 456E0123EEEEEEEE???????EEEEEEEE + // *new_ctrl= 456E0123EEEEEEEE456E0123EEEEEEE + // position S/ + ctrl_t g[kHalfWidth]; + std::memcpy(g, new_ctrl, kHalfWidth); + std::memcpy(new_ctrl + new_capacity + 1, g, kHalfWidth); + + // Finally set sentinel to its place. + new_ctrl[new_capacity] = ctrl_t::kSentinel; +} + +void HashSetResizeHelper::GrowIntoSingleGroupShuffleTransferableSlots( + void* old_slots, void* new_slots, size_t slot_size) const { + assert(old_capacity_ > 0); + const size_t half_old_capacity = old_capacity_ / 2; + + SanitizerUnpoisonMemoryRegion(old_slots, slot_size * old_capacity_); + std::memcpy(new_slots, + SlotAddress(old_slots, half_old_capacity + 1, slot_size), + slot_size * half_old_capacity); + std::memcpy(SlotAddress(new_slots, half_old_capacity + 1, slot_size), + old_slots, slot_size * (half_old_capacity + 1)); +} + +void HashSetResizeHelper::GrowSizeIntoSingleGroupTransferable( + CommonFields& c, void* old_slots, size_t slot_size) { + assert(old_capacity_ < Group::kWidth / 2); + assert(is_single_group(c.capacity())); + assert(IsGrowingIntoSingleGroupApplicable(old_capacity_, c.capacity())); + + GrowIntoSingleGroupShuffleControlBytes(c.control(), c.capacity()); + GrowIntoSingleGroupShuffleTransferableSlots(old_slots, c.slot_array(), + slot_size); + + // We poison since GrowIntoSingleGroupShuffleTransferableSlots + // may leave empty slots unpoisoned. + PoisonSingleGroupEmptySlots(c, slot_size); +} + } // namespace container_internal ABSL_NAMESPACE_END } // namespace absl diff --git a/absl/container/internal/raw_hash_set.h b/absl/container/internal/raw_hash_set.h index 5f89d8e..3518bc3 100644 --- a/absl/container/internal/raw_hash_set.h +++ b/absl/container/internal/raw_hash_set.h @@ -62,6 +62,9 @@ // pseudo-struct: // // struct BackingArray { +// // Sampling handler. This field isn't present when the sampling is +// // disabled or this allocation hasn't been selected for sampling. +// HashtablezInfoHandle infoz_; // // The number of elements we can insert before growing the capacity. // size_t growth_left; // // Control bytes for the "real" slots. @@ -175,25 +178,29 @@ #define ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_ #include +#include #include #include #include #include +#include #include #include #include -#include #include #include #include +#include "absl/base/attributes.h" #include "absl/base/config.h" #include "absl/base/internal/endian.h" #include "absl/base/internal/raw_logging.h" +#include "absl/base/macros.h" #include "absl/base/optimization.h" +#include "absl/base/options.h" #include "absl/base/port.h" #include "absl/base/prefetch.h" -#include "absl/container/internal/common.h" +#include "absl/container/internal/common.h" // IWYU pragma: export // for node_handle #include "absl/container/internal/compressed_tuple.h" #include "absl/container/internal/container_memory.h" #include "absl/container/internal/hash_policy_traits.h" @@ -227,6 +234,7 @@ namespace container_internal { #ifdef ABSL_SWISSTABLE_ENABLE_GENERATIONS #error ABSL_SWISSTABLE_ENABLE_GENERATIONS cannot be directly set #elif defined(ABSL_HAVE_ADDRESS_SANITIZER) || \ + defined(ABSL_HAVE_HWADDRESS_SANITIZER) || \ defined(ABSL_HAVE_MEMORY_SANITIZER) // When compiled in sanitizer mode, we add generation integers to the backing // array and iterators. In the backing array, we store the generation between @@ -262,8 +270,21 @@ void SwapAlloc(AllocType& lhs, AllocType& rhs, swap(lhs, rhs); } template -void SwapAlloc(AllocType& /*lhs*/, AllocType& /*rhs*/, - std::false_type /* propagate_on_container_swap */) {} +void SwapAlloc(AllocType& lhs, AllocType& rhs, + std::false_type /* propagate_on_container_swap */) { + (void)lhs; + (void)rhs; + assert(lhs == rhs && + "It's UB to call swap with unequal non-propagating allocators."); +} + +template +void CopyAlloc(AllocType& lhs, AllocType& rhs, + std::true_type /* propagate_alloc */) { + lhs = rhs; +} +template +void CopyAlloc(AllocType&, AllocType&, std::false_type /* propagate_alloc */) {} // The state for a probe sequence. // @@ -361,7 +382,7 @@ uint32_t TrailingZeros(T x) { // width of an abstract bit in the representation. // This mask provides operations for any number of real bits set in an abstract // bit. To add iteration on top of that, implementation must guarantee no more -// than one real bit is set in an abstract bit. +// than the most significant real bit is set in a set abstract bit. template class NonIterableBitMask { public: @@ -388,7 +409,9 @@ class NonIterableBitMask { uint32_t LeadingZeros() const { constexpr int total_significant_bits = SignificantBits << Shift; constexpr int extra_bits = sizeof(T) * 8 - total_significant_bits; - return static_cast(countl_zero(mask_ << extra_bits)) >> Shift; + return static_cast( + countl_zero(static_cast(mask_ << extra_bits))) >> + Shift; } T mask_; @@ -418,6 +441,10 @@ class BitMask : public NonIterableBitMask { using const_iterator = BitMask; BitMask& operator++() { + if (Shift == 3) { + constexpr uint64_t msbs = 0x8080808080808080ULL; + this->mask_ &= msbs; + } this->mask_ &= (this->mask_ - 1); return *this; } @@ -590,29 +617,39 @@ struct GroupSse2Impl { } // Returns a bitmask representing the positions of slots that match hash. - BitMask Match(h2_t hash) const { + BitMask Match(h2_t hash) const { auto match = _mm_set1_epi8(static_cast(hash)); - return BitMask( - static_cast(_mm_movemask_epi8(_mm_cmpeq_epi8(match, ctrl)))); + BitMask result = BitMask(0); + result = BitMask( + static_cast(_mm_movemask_epi8(_mm_cmpeq_epi8(match, ctrl)))); + return result; } // Returns a bitmask representing the positions of empty slots. - NonIterableBitMask MaskEmpty() const { + NonIterableBitMask MaskEmpty() const { #ifdef ABSL_INTERNAL_HAVE_SSSE3 // This only works because ctrl_t::kEmpty is -128. - return NonIterableBitMask( - static_cast(_mm_movemask_epi8(_mm_sign_epi8(ctrl, ctrl)))); + return NonIterableBitMask( + static_cast(_mm_movemask_epi8(_mm_sign_epi8(ctrl, ctrl)))); #else auto match = _mm_set1_epi8(static_cast(ctrl_t::kEmpty)); - return NonIterableBitMask( - static_cast(_mm_movemask_epi8(_mm_cmpeq_epi8(match, ctrl)))); + return NonIterableBitMask( + static_cast(_mm_movemask_epi8(_mm_cmpeq_epi8(match, ctrl)))); #endif } + // Returns a bitmask representing the positions of full slots. + // Note: for `is_small()` tables group may contain the "same" slot twice: + // original and mirrored. + BitMask MaskFull() const { + return BitMask( + static_cast(_mm_movemask_epi8(ctrl) ^ 0xffff)); + } + // Returns a bitmask representing the positions of empty or deleted slots. - NonIterableBitMask MaskEmptyOrDeleted() const { + NonIterableBitMask MaskEmptyOrDeleted() const { auto special = _mm_set1_epi8(static_cast(ctrl_t::kSentinel)); - return NonIterableBitMask(static_cast( + return NonIterableBitMask(static_cast( _mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl)))); } @@ -651,9 +688,8 @@ struct GroupAArch64Impl { BitMask Match(h2_t hash) const { uint8x8_t dup = vdup_n_u8(hash); auto mask = vceq_u8(ctrl, dup); - constexpr uint64_t msbs = 0x8080808080808080ULL; return BitMask( - vget_lane_u64(vreinterpret_u64_u8(mask), 0) & msbs); + vget_lane_u64(vreinterpret_u64_u8(mask), 0)); } NonIterableBitMask MaskEmpty() const { @@ -665,6 +701,17 @@ struct GroupAArch64Impl { return NonIterableBitMask(mask); } + // Returns a bitmask representing the positions of full slots. + // Note: for `is_small()` tables group may contain the "same" slot twice: + // original and mirrored. + BitMask MaskFull() const { + uint64_t mask = vget_lane_u64( + vreinterpret_u64_u8(vcge_s8(vreinterpret_s8_u8(ctrl), + vdup_n_s8(static_cast(0)))), + 0); + return BitMask(mask); + } + NonIterableBitMask MaskEmptyOrDeleted() const { uint64_t mask = vget_lane_u64(vreinterpret_u64_u8(vcgt_s8( @@ -729,13 +776,21 @@ struct GroupPortableImpl { NonIterableBitMask MaskEmpty() const { constexpr uint64_t msbs = 0x8080808080808080ULL; - return NonIterableBitMask((ctrl & (~ctrl << 6)) & + return NonIterableBitMask((ctrl & ~(ctrl << 6)) & msbs); } + // Returns a bitmask representing the positions of full slots. + // Note: for `is_small()` tables group may contain the "same" slot twice: + // original and mirrored. + BitMask MaskFull() const { + constexpr uint64_t msbs = 0x8080808080808080ULL; + return BitMask((ctrl ^ msbs) & msbs); + } + NonIterableBitMask MaskEmptyOrDeleted() const { constexpr uint64_t msbs = 0x8080808080808080ULL; - return NonIterableBitMask((ctrl & (~ctrl << 7)) & + return NonIterableBitMask((ctrl & ~(ctrl << 7)) & msbs); } @@ -760,10 +815,21 @@ struct GroupPortableImpl { #ifdef ABSL_INTERNAL_HAVE_SSE2 using Group = GroupSse2Impl; +using GroupEmptyOrDeleted = GroupSse2Impl; #elif defined(ABSL_INTERNAL_HAVE_ARM_NEON) && defined(ABSL_IS_LITTLE_ENDIAN) using Group = GroupAArch64Impl; +// For Aarch64, we use the portable implementation for counting and masking +// empty or deleted group elements. This is to avoid the latency of moving +// between data GPRs and Neon registers when it does not provide a benefit. +// Using Neon is profitable when we call Match(), but is not when we don't, +// which is the case when we do *EmptyOrDeleted operations. It is difficult to +// make a similar approach beneficial on other architectures such as x86 since +// they have much lower GPR <-> vector register transfer latency and 16-wide +// Groups. +using GroupEmptyOrDeleted = GroupPortableImpl; #else using Group = GroupPortableImpl; +using GroupEmptyOrDeleted = GroupPortableImpl; #endif // When there is an insertion with no reserved growth, we rehash with @@ -802,15 +868,19 @@ class CommonFieldsGenerationInfoEnabled { // whenever reserved_growth_ is zero. bool should_rehash_for_bug_detection_on_insert(const ctrl_t* ctrl, size_t capacity) const; + // Similar to above, except that we don't depend on reserved_growth_. + bool should_rehash_for_bug_detection_on_move(const ctrl_t* ctrl, + size_t capacity) const; void maybe_increment_generation_on_insert() { if (reserved_growth_ == kReservedGrowthJustRanOut) reserved_growth_ = 0; if (reserved_growth_ > 0) { if (--reserved_growth_ == 0) reserved_growth_ = kReservedGrowthJustRanOut; } else { - *generation_ = NextGeneration(*generation_); + increment_generation(); } } + void increment_generation() { *generation_ = NextGeneration(*generation_); } void reset_reserved_growth(size_t reservation, size_t size) { reserved_growth_ = reservation - size; } @@ -856,7 +926,11 @@ class CommonFieldsGenerationInfoDisabled { bool should_rehash_for_bug_detection_on_insert(const ctrl_t*, size_t) const { return false; } + bool should_rehash_for_bug_detection_on_move(const ctrl_t*, size_t) const { + return false; + } void maybe_increment_generation_on_insert() {} + void increment_generation() {} void reset_reserved_growth(size_t, size_t) {} size_t reserved_growth() const { return 0; } void set_reserved_growth(size_t) {} @@ -909,9 +983,11 @@ using HashSetIteratorGenerationInfo = HashSetIteratorGenerationInfoDisabled; // A valid capacity is a non-zero integer `2^m - 1`. inline bool IsValidCapacity(size_t n) { return ((n + 1) & n) == 0 && n > 0; } -// Computes the offset from the start of the backing allocation of the control -// bytes. growth_left is stored at the beginning of the backing array. -inline size_t ControlOffset() { return sizeof(size_t); } +// Computes the offset from the start of the backing allocation of control. +// infoz and growth_left are stored at the beginning of the backing array. +inline size_t ControlOffset(bool has_infoz) { + return (has_infoz ? sizeof(HashtablezInfoHandle) : 0) + sizeof(size_t); +} // Returns the number of "cloned control bytes". // @@ -922,24 +998,26 @@ constexpr size_t NumClonedBytes() { return Group::kWidth - 1; } // Given the capacity of a table, computes the offset (from the start of the // backing allocation) of the generation counter (if it exists). -inline size_t GenerationOffset(size_t capacity) { +inline size_t GenerationOffset(size_t capacity, bool has_infoz) { assert(IsValidCapacity(capacity)); const size_t num_control_bytes = capacity + 1 + NumClonedBytes(); - return ControlOffset() + num_control_bytes; + return ControlOffset(has_infoz) + num_control_bytes; } // Given the capacity of a table, computes the offset (from the start of the // backing allocation) at which the slots begin. -inline size_t SlotOffset(size_t capacity, size_t slot_align) { +inline size_t SlotOffset(size_t capacity, size_t slot_align, bool has_infoz) { assert(IsValidCapacity(capacity)); - return (GenerationOffset(capacity) + NumGenerationBytes() + slot_align - 1) & + return (GenerationOffset(capacity, has_infoz) + NumGenerationBytes() + + slot_align - 1) & (~slot_align + 1); } // Given the capacity of a table, computes the total size of the backing // array. -inline size_t AllocSize(size_t capacity, size_t slot_size, size_t slot_align) { - return SlotOffset(capacity, slot_align) + capacity * slot_size; +inline size_t AllocSize(size_t capacity, size_t slot_size, size_t slot_align, + bool has_infoz) { + return SlotOffset(capacity, slot_align, has_infoz) + capacity * slot_size; } // CommonFields hold the fields in raw_hash_set that do not depend @@ -954,28 +1032,15 @@ class CommonFields : public CommonFieldsGenerationInfo { CommonFields& operator=(const CommonFields&) = delete; // Movable - CommonFields(CommonFields&& that) - : CommonFieldsGenerationInfo( - std::move(static_cast(that))), - // Explicitly copying fields into "this" and then resetting "that" - // fields generates less code then calling absl::exchange per field. - control_(that.control()), - slots_(that.slot_array()), - capacity_(that.capacity()), - compressed_tuple_(that.size(), std::move(that.infoz())) { - that.set_control(EmptyGroup()); - that.set_slots(nullptr); - that.set_capacity(0); - that.set_size(0); - } + CommonFields(CommonFields&& that) = default; CommonFields& operator=(CommonFields&&) = default; ctrl_t* control() const { return control_; } void set_control(ctrl_t* c) { control_ = c; } void* backing_array_start() const { - // growth_left is stored before control bytes. + // growth_left (and maybe infoz) is stored before control bytes. assert(reinterpret_cast(control()) % alignof(size_t) == 0); - return control() - sizeof(size_t); + return control() - ControlOffset(has_infoz()); } // Note: we can't use slots() because Qt defines "slots" as a macro. @@ -983,8 +1048,18 @@ class CommonFields : public CommonFieldsGenerationInfo { void set_slots(void* s) { slots_ = s; } // The number of filled slots. - size_t size() const { return compressed_tuple_.template get<0>(); } - void set_size(size_t s) { compressed_tuple_.template get<0>() = s; } + size_t size() const { return size_ >> HasInfozShift(); } + void set_size(size_t s) { + size_ = (s << HasInfozShift()) | (size_ & HasInfozMask()); + } + void increment_size() { + assert(size() < capacity()); + size_ += size_t{1} << HasInfozShift(); + } + void decrement_size() { + assert(size() > 0); + size_ -= size_t{1} << HasInfozShift(); + } // The total number of available slots. size_t capacity() const { return capacity_; } @@ -996,28 +1071,52 @@ class CommonFields : public CommonFieldsGenerationInfo { // The number of slots we can still fill without needing to rehash. // This is stored in the heap allocation before the control bytes. size_t growth_left() const { - return *reinterpret_cast(backing_array_start()); + const size_t* gl_ptr = reinterpret_cast(control()) - 1; + assert(reinterpret_cast(gl_ptr) % alignof(size_t) == 0); + return *gl_ptr; } void set_growth_left(size_t gl) { - *reinterpret_cast(backing_array_start()) = gl; + size_t* gl_ptr = reinterpret_cast(control()) - 1; + assert(reinterpret_cast(gl_ptr) % alignof(size_t) == 0); + *gl_ptr = gl; } - HashtablezInfoHandle& infoz() { return compressed_tuple_.template get<1>(); } - const HashtablezInfoHandle& infoz() const { - return compressed_tuple_.template get<1>(); + bool has_infoz() const { + return ABSL_PREDICT_FALSE((size_ & HasInfozMask()) != 0); + } + void set_has_infoz(bool has_infoz) { + size_ = (size() << HasInfozShift()) | static_cast(has_infoz); + } + + HashtablezInfoHandle infoz() { + return has_infoz() + ? *reinterpret_cast(backing_array_start()) + : HashtablezInfoHandle(); + } + void set_infoz(HashtablezInfoHandle infoz) { + assert(has_infoz()); + *reinterpret_cast(backing_array_start()) = infoz; } bool should_rehash_for_bug_detection_on_insert() const { return CommonFieldsGenerationInfo:: should_rehash_for_bug_detection_on_insert(control(), capacity()); } + bool should_rehash_for_bug_detection_on_move() const { + return CommonFieldsGenerationInfo:: + should_rehash_for_bug_detection_on_move(control(), capacity()); + } + void maybe_increment_generation_on_move() { + if (capacity() == 0) return; + increment_generation(); + } void reset_reserved_growth(size_t reservation) { CommonFieldsGenerationInfo::reset_reserved_growth(reservation, size()); } // The size of the backing array allocation. size_t alloc_size(size_t slot_size, size_t slot_align) const { - return AllocSize(capacity(), slot_size, slot_align); + return AllocSize(capacity(), slot_size, slot_align, has_infoz()); } // Returns the number of control bytes set to kDeleted. For testing only. @@ -1027,9 +1126,14 @@ class CommonFields : public CommonFieldsGenerationInfo { } private: - // TODO(b/259599413): Investigate removing some of these fields: + // We store the has_infoz bit in the lowest bit of size_. + static constexpr size_t HasInfozShift() { return 1; } + static constexpr size_t HasInfozMask() { + return (size_t{1} << HasInfozShift()) - 1; + } + + // TODO(b/182800944): Investigate removing some of these fields: // - control/slots can be derived from each other - // - we can use 6 bits for capacity since it's always a power of two minus 1 // The control bytes (and, also, a pointer near to the base of the backing // array). @@ -1044,12 +1148,16 @@ class CommonFields : public CommonFieldsGenerationInfo { // `control`. May be null for empty tables. void* slots_ = nullptr; + // The number of slots in the backing array. This is always 2^N-1 for an + // integer N. NOTE: we tried experimenting with compressing the capacity and + // storing it together with size_: (a) using 6 bits to store the corresponding + // power (N in 2^N-1), and (b) storing 2^N as the most significant bit of + // size_ and storing size in the low bits. Both of these experiments were + // regressions, presumably because we need capacity to do find operations. size_t capacity_ = 0; - // Bundle together size and HashtablezInfoHandle to ensure EBO for - // HashtablezInfoHandle when sampling is turned off. - absl::container_internal::CompressedTuple - compressed_tuple_{0u, HashtablezInfoHandle{}}; + // The size and also has one bit that stores whether we have infoz. + size_t size_ = 0; }; template @@ -1139,35 +1247,39 @@ inline void AssertIsFull(const ctrl_t* ctrl, GenerationType generation, const GenerationType* generation_ptr, const char* operation) { if (!SwisstableDebugEnabled()) return; - if (ctrl == nullptr) { - ABSL_INTERNAL_LOG(FATAL, - std::string(operation) + " called on end() iterator."); - } - if (ctrl == EmptyGroup()) { - ABSL_INTERNAL_LOG(FATAL, std::string(operation) + - " called on default-constructed iterator."); + // `SwisstableDebugEnabled()` is also true for release builds with hardening + // enabled. To minimize their impact in those builds: + // - use `ABSL_PREDICT_FALSE()` to provide a compiler hint for code layout + // - use `ABSL_RAW_LOG()` with a format string to reduce code size and improve + // the chances that the hot paths will be inlined. + if (ABSL_PREDICT_FALSE(ctrl == nullptr)) { + ABSL_RAW_LOG(FATAL, "%s called on end() iterator.", operation); + } + if (ABSL_PREDICT_FALSE(ctrl == EmptyGroup())) { + ABSL_RAW_LOG(FATAL, "%s called on default-constructed iterator.", + operation); } if (SwisstableGenerationsEnabled()) { - if (generation != *generation_ptr) { - ABSL_INTERNAL_LOG(FATAL, - std::string(operation) + - " called on invalid iterator. The table could have " - "rehashed since this iterator was initialized."); + if (ABSL_PREDICT_FALSE(generation != *generation_ptr)) { + ABSL_RAW_LOG(FATAL, + "%s called on invalid iterator. The table could have " + "rehashed or moved since this iterator was initialized.", + operation); } - if (!IsFull(*ctrl)) { - ABSL_INTERNAL_LOG( + if (ABSL_PREDICT_FALSE(!IsFull(*ctrl))) { + ABSL_RAW_LOG( FATAL, - std::string(operation) + - " called on invalid iterator. The element was likely erased."); + "%s called on invalid iterator. The element was likely erased.", + operation); } } else { - if (!IsFull(*ctrl)) { - ABSL_INTERNAL_LOG( + if (ABSL_PREDICT_FALSE(!IsFull(*ctrl))) { + ABSL_RAW_LOG( FATAL, - std::string(operation) + - " called on invalid iterator. The element might have been erased " - "or the table might have rehashed. Consider running with " - "--config=asan to diagnose rehashing issues."); + "%s called on invalid iterator. The element might have been erased " + "or the table might have rehashed. Consider running with " + "--config=asan to diagnose rehashing issues.", + operation); } } } @@ -1180,13 +1292,13 @@ inline void AssertIsValidForComparison(const ctrl_t* ctrl, const bool ctrl_is_valid_for_comparison = ctrl == nullptr || ctrl == EmptyGroup() || IsFull(*ctrl); if (SwisstableGenerationsEnabled()) { - if (generation != *generation_ptr) { - ABSL_INTERNAL_LOG(FATAL, - "Invalid iterator comparison. The table could have " - "rehashed since this iterator was initialized."); + if (ABSL_PREDICT_FALSE(generation != *generation_ptr)) { + ABSL_RAW_LOG(FATAL, + "Invalid iterator comparison. The table could have rehashed " + "or moved since this iterator was initialized."); } - if (!ctrl_is_valid_for_comparison) { - ABSL_INTERNAL_LOG( + if (ABSL_PREDICT_FALSE(!ctrl_is_valid_for_comparison)) { + ABSL_RAW_LOG( FATAL, "Invalid iterator comparison. The element was likely erased."); } } else { @@ -1226,10 +1338,15 @@ inline void AssertSameContainer(const ctrl_t* ctrl_a, const ctrl_t* ctrl_b, const GenerationType* generation_ptr_a, const GenerationType* generation_ptr_b) { if (!SwisstableDebugEnabled()) return; + // `SwisstableDebugEnabled()` is also true for release builds with hardening + // enabled. To minimize their impact in those builds: + // - use `ABSL_PREDICT_FALSE()` to provide a compiler hint for code layout + // - use `ABSL_RAW_LOG()` with a format string to reduce code size and improve + // the chances that the hot paths will be inlined. const bool a_is_default = ctrl_a == EmptyGroup(); const bool b_is_default = ctrl_b == EmptyGroup(); - if (a_is_default != b_is_default) { - ABSL_INTERNAL_LOG( + if (ABSL_PREDICT_FALSE(a_is_default != b_is_default)) { + ABSL_RAW_LOG( FATAL, "Invalid iterator comparison. Comparing default-constructed iterator " "with non-default-constructed iterator."); @@ -1237,36 +1354,36 @@ inline void AssertSameContainer(const ctrl_t* ctrl_a, const ctrl_t* ctrl_b, if (a_is_default && b_is_default) return; if (SwisstableGenerationsEnabled()) { - if (generation_ptr_a == generation_ptr_b) return; + if (ABSL_PREDICT_TRUE(generation_ptr_a == generation_ptr_b)) return; const bool a_is_empty = IsEmptyGeneration(generation_ptr_a); const bool b_is_empty = IsEmptyGeneration(generation_ptr_b); if (a_is_empty != b_is_empty) { - ABSL_INTERNAL_LOG(FATAL, - "Invalid iterator comparison. Comparing iterator from " - "a non-empty hashtable with an iterator from an empty " - "hashtable."); + ABSL_RAW_LOG(FATAL, + "Invalid iterator comparison. Comparing iterator from a " + "non-empty hashtable with an iterator from an empty " + "hashtable."); } if (a_is_empty && b_is_empty) { - ABSL_INTERNAL_LOG(FATAL, - "Invalid iterator comparison. Comparing iterators from " - "different empty hashtables."); + ABSL_RAW_LOG(FATAL, + "Invalid iterator comparison. Comparing iterators from " + "different empty hashtables."); } const bool a_is_end = ctrl_a == nullptr; const bool b_is_end = ctrl_b == nullptr; if (a_is_end || b_is_end) { - ABSL_INTERNAL_LOG(FATAL, - "Invalid iterator comparison. Comparing iterator with " - "an end() iterator from a different hashtable."); + ABSL_RAW_LOG(FATAL, + "Invalid iterator comparison. Comparing iterator with an " + "end() iterator from a different hashtable."); } - ABSL_INTERNAL_LOG(FATAL, - "Invalid iterator comparison. Comparing non-end() " - "iterators from different hashtables."); + ABSL_RAW_LOG(FATAL, + "Invalid iterator comparison. Comparing non-end() iterators " + "from different hashtables."); } else { ABSL_HARDENING_ASSERT( AreItersFromSameContainer(ctrl_a, ctrl_b, slot_a, slot_b) && "Invalid iterator comparison. The iterators may be from different " - "containers or the container might have rehashed. Consider running " - "with --config=asan to diagnose rehashing issues."); + "containers or the container might have rehashed or moved. Consider " + "running with --config=asan to diagnose issues."); } } @@ -1289,6 +1406,12 @@ struct FindInfo { // `ShouldInsertBackwards()` for small tables. inline bool is_small(size_t capacity) { return capacity < Group::kWidth - 1; } +// Whether a table fits entirely into a probing group. +// Arbitrary order of elements in such tables is correct. +inline bool is_single_group(size_t capacity) { + return capacity <= Group::kWidth; +} + // Begins a probing operation on `common.control`, using `hash`. inline probe_seq probe(const ctrl_t* ctrl, const size_t capacity, size_t hash) { @@ -1310,7 +1433,7 @@ inline FindInfo find_first_non_full(const CommonFields& common, size_t hash) { auto seq = probe(common, hash); const ctrl_t* ctrl = common.control(); while (true) { - Group g{ctrl + seq.offset()}; + GroupEmptyOrDeleted g{ctrl + seq.offset()}; auto mask = g.MaskEmptyOrDeleted(); if (mask) { #if !defined(NDEBUG) @@ -1351,7 +1474,6 @@ inline void ResetCtrl(CommonFields& common, size_t slot_size) { capacity + 1 + NumClonedBytes()); ctrl[capacity] = ctrl_t::kSentinel; SanitizerPoisonMemoryRegion(common.slot_array(), slot_size * capacity); - ResetGrowthLeft(common); } // Sets `ctrl[i]` to `h`. @@ -1386,38 +1508,263 @@ constexpr size_t BackingArrayAlignment(size_t align_of_slot) { return (std::max)(align_of_slot, alignof(size_t)); } -template -ABSL_ATTRIBUTE_NOINLINE void InitializeSlots(CommonFields& c, Alloc alloc) { - assert(c.capacity()); - // Folks with custom allocators often make unwarranted assumptions about the - // behavior of their classes vis-a-vis trivial destructability and what - // calls they will or won't make. Avoid sampling for people with custom - // allocators to get us out of this mess. This is not a hard guarantee but - // a workaround while we plan the exact guarantee we want to provide. - const size_t sample_size = - (std::is_same>::value && - c.slot_array() == nullptr) - ? SizeOfSlot - : 0; - - const size_t cap = c.capacity(); - const size_t alloc_size = AllocSize(cap, SizeOfSlot, AlignOfSlot); - // growth_left (which is a size_t) is stored with the backing array. - char* mem = static_cast( - Allocate(&alloc, alloc_size)); - const GenerationType old_generation = c.generation(); - c.set_generation_ptr( - reinterpret_cast(mem + GenerationOffset(cap))); - c.set_generation(NextGeneration(old_generation)); - c.set_control(reinterpret_cast(mem + ControlOffset())); - c.set_slots(mem + SlotOffset(cap, AlignOfSlot)); - ResetCtrl(c, SizeOfSlot); - if (sample_size) { - c.infoz() = Sample(sample_size); - } - c.infoz().RecordStorageChanged(c.size(), cap); +// Returns the address of the ith slot in slots where each slot occupies +// slot_size. +inline void* SlotAddress(void* slot_array, size_t slot, size_t slot_size) { + return reinterpret_cast(reinterpret_cast(slot_array) + + (slot * slot_size)); } +// Helper class to perform resize of the hash set. +// +// It contains special optimizations for small group resizes. +// See GrowIntoSingleGroupShuffleControlBytes for details. +class HashSetResizeHelper { + public: + explicit HashSetResizeHelper(CommonFields& c) + : old_ctrl_(c.control()), + old_capacity_(c.capacity()), + had_infoz_(c.has_infoz()) {} + + // Optimized for small groups version of `find_first_non_full` applicable + // only right after calling `raw_hash_set::resize`. + // It has implicit assumption that `resize` will call + // `GrowSizeIntoSingleGroup*` in case `IsGrowingIntoSingleGroupApplicable`. + // Falls back to `find_first_non_full` in case of big groups, so it is + // safe to use after `rehash_and_grow_if_necessary`. + static FindInfo FindFirstNonFullAfterResize(const CommonFields& c, + size_t old_capacity, + size_t hash) { + if (!IsGrowingIntoSingleGroupApplicable(old_capacity, c.capacity())) { + return find_first_non_full(c, hash); + } + // Find a location for the new element non-deterministically. + // Note that any position is correct. + // It will located at `half_old_capacity` or one of the other + // empty slots with approximately 50% probability each. + size_t offset = probe(c, hash).offset(); + + // Note that we intentionally use unsigned int underflow. + if (offset - (old_capacity + 1) >= old_capacity) { + // Offset fall on kSentinel or into the mostly occupied first half. + offset = old_capacity / 2; + } + assert(IsEmpty(c.control()[offset])); + return FindInfo{offset, 0}; + } + + ctrl_t* old_ctrl() const { return old_ctrl_; } + size_t old_capacity() const { return old_capacity_; } + + // Allocates a backing array for the hashtable. + // Reads `capacity` and updates all other fields based on the result of + // the allocation. + // + // It also may do the folowing actions: + // 1. initialize control bytes + // 2. initialize slots + // 3. deallocate old slots. + // + // We are bundling a lot of functionality + // in one ABSL_ATTRIBUTE_NOINLINE function in order to minimize binary code + // duplication in raw_hash_set<>::resize. + // + // `c.capacity()` must be nonzero. + // POSTCONDITIONS: + // 1. CommonFields is initialized. + // + // if IsGrowingIntoSingleGroupApplicable && TransferUsesMemcpy + // Both control bytes and slots are fully initialized. + // old_slots are deallocated. + // infoz.RecordRehash is called. + // + // if IsGrowingIntoSingleGroupApplicable && !TransferUsesMemcpy + // Control bytes are fully initialized. + // infoz.RecordRehash is called. + // GrowSizeIntoSingleGroup must be called to finish slots initialization. + // + // if !IsGrowingIntoSingleGroupApplicable + // Control bytes are initialized to empty table via ResetCtrl. + // raw_hash_set<>::resize must insert elements regularly. + // infoz.RecordRehash is called if old_capacity == 0. + // + // Returns IsGrowingIntoSingleGroupApplicable result to avoid recomputation. + template + ABSL_ATTRIBUTE_NOINLINE bool InitializeSlots(CommonFields& c, void* old_slots, + Alloc alloc) { + assert(c.capacity()); + // Folks with custom allocators often make unwarranted assumptions about the + // behavior of their classes vis-a-vis trivial destructability and what + // calls they will or won't make. Avoid sampling for people with custom + // allocators to get us out of this mess. This is not a hard guarantee but + // a workaround while we plan the exact guarantee we want to provide. + const size_t sample_size = + (std::is_same>::value && + c.slot_array() == nullptr) + ? SizeOfSlot + : 0; + HashtablezInfoHandle infoz = + sample_size > 0 ? Sample(sample_size) : c.infoz(); + + const bool has_infoz = infoz.IsSampled(); + const size_t cap = c.capacity(); + const size_t alloc_size = + AllocSize(cap, SizeOfSlot, AlignOfSlot, has_infoz); + char* mem = static_cast( + Allocate(&alloc, alloc_size)); + const GenerationType old_generation = c.generation(); + c.set_generation_ptr(reinterpret_cast( + mem + GenerationOffset(cap, has_infoz))); + c.set_generation(NextGeneration(old_generation)); + c.set_control(reinterpret_cast(mem + ControlOffset(has_infoz))); + c.set_slots(mem + SlotOffset(cap, AlignOfSlot, has_infoz)); + ResetGrowthLeft(c); + + const bool grow_single_group = + IsGrowingIntoSingleGroupApplicable(old_capacity_, c.capacity()); + if (old_capacity_ != 0 && grow_single_group) { + if (TransferUsesMemcpy) { + GrowSizeIntoSingleGroupTransferable(c, old_slots, SizeOfSlot); + DeallocateOld(alloc, SizeOfSlot, old_slots); + } else { + GrowIntoSingleGroupShuffleControlBytes(c.control(), c.capacity()); + } + } else { + ResetCtrl(c, SizeOfSlot); + } + + c.set_has_infoz(has_infoz); + if (has_infoz) { + infoz.RecordStorageChanged(c.size(), cap); + if (grow_single_group || old_capacity_ == 0) { + infoz.RecordRehash(0); + } + c.set_infoz(infoz); + } + return grow_single_group; + } + + // Relocates slots into new single group consistent with + // GrowIntoSingleGroupShuffleControlBytes. + // + // PRECONDITIONS: + // 1. GrowIntoSingleGroupShuffleControlBytes was already called. + template + void GrowSizeIntoSingleGroup(CommonFields& c, Alloc& alloc_ref, + typename PolicyTraits::slot_type* old_slots) { + assert(old_capacity_ < Group::kWidth / 2); + assert(IsGrowingIntoSingleGroupApplicable(old_capacity_, c.capacity())); + using slot_type = typename PolicyTraits::slot_type; + assert(is_single_group(c.capacity())); + + auto* new_slots = reinterpret_cast(c.slot_array()); + + size_t shuffle_bit = old_capacity_ / 2 + 1; + for (size_t i = 0; i < old_capacity_; ++i) { + if (IsFull(old_ctrl_[i])) { + size_t new_i = i ^ shuffle_bit; + SanitizerUnpoisonMemoryRegion(new_slots + new_i, sizeof(slot_type)); + PolicyTraits::transfer(&alloc_ref, new_slots + new_i, old_slots + i); + } + } + PoisonSingleGroupEmptySlots(c, sizeof(slot_type)); + } + + // Deallocates old backing array. + template + void DeallocateOld(CharAlloc alloc_ref, size_t slot_size, void* old_slots) { + SanitizerUnpoisonMemoryRegion(old_slots, slot_size * old_capacity_); + Deallocate( + &alloc_ref, old_ctrl_ - ControlOffset(had_infoz_), + AllocSize(old_capacity_, slot_size, AlignOfSlot, had_infoz_)); + } + + private: + // Returns true if `GrowSizeIntoSingleGroup` can be used for resizing. + static bool IsGrowingIntoSingleGroupApplicable(size_t old_capacity, + size_t new_capacity) { + // NOTE that `old_capacity < new_capacity` in order to have + // `old_capacity < Group::kWidth / 2` to make faster copies of 8 bytes. + return is_single_group(new_capacity) && old_capacity < new_capacity; + } + + // Relocates control bytes and slots into new single group for + // transferable objects. + // Must be called only if IsGrowingIntoSingleGroupApplicable returned true. + void GrowSizeIntoSingleGroupTransferable(CommonFields& c, void* old_slots, + size_t slot_size); + + // Shuffle control bits deterministically to the next capacity. + // Returns offset for newly added element with given hash. + // + // PRECONDITIONs: + // 1. new_ctrl is allocated for new_capacity, + // but not initialized. + // 2. new_capacity is a single group. + // + // All elements are transferred into the first `old_capacity + 1` positions + // of the new_ctrl. Elements are rotated by `old_capacity_ / 2 + 1` positions + // in order to change an order and keep it non deterministic. + // Although rotation itself deterministic, position of the new added element + // will be based on `H1` and is not deterministic. + // + // Examples: + // S = kSentinel, E = kEmpty + // + // old_ctrl = SEEEEEEEE... + // new_ctrl = ESEEEEEEE... + // + // old_ctrl = 0SEEEEEEE... + // new_ctrl = E0ESE0EEE... + // + // old_ctrl = 012S012EEEEEEEEE... + // new_ctrl = 2E01EEES2E01EEE... + // + // old_ctrl = 0123456S0123456EEEEEEEEEEE... + // new_ctrl = 456E0123EEEEEES456E0123EEE... + void GrowIntoSingleGroupShuffleControlBytes(ctrl_t* new_ctrl, + size_t new_capacity) const; + + // Shuffle trivially transferable slots in the way consistent with + // GrowIntoSingleGroupShuffleControlBytes. + // + // PRECONDITIONs: + // 1. old_capacity must be non-zero. + // 2. new_ctrl is fully initialized using + // GrowIntoSingleGroupShuffleControlBytes. + // 3. new_slots is allocated and *not* poisoned. + // + // POSTCONDITIONS: + // 1. new_slots are transferred from old_slots_ consistent with + // GrowIntoSingleGroupShuffleControlBytes. + // 2. Empty new_slots are *not* poisoned. + void GrowIntoSingleGroupShuffleTransferableSlots(void* old_slots, + void* new_slots, + size_t slot_size) const; + + // Poison empty slots that were transferred using the deterministic algorithm + // described above. + // PRECONDITIONs: + // 1. new_ctrl is fully initialized using + // GrowIntoSingleGroupShuffleControlBytes. + // 2. new_slots is fully initialized consistent with + // GrowIntoSingleGroupShuffleControlBytes. + void PoisonSingleGroupEmptySlots(CommonFields& c, size_t slot_size) const { + // poison non full items + for (size_t i = 0; i < c.capacity(); ++i) { + if (!IsFull(c.control()[i])) { + SanitizerPoisonMemoryRegion(SlotAddress(c.slot_array(), i, slot_size), + slot_size); + } + } + } + + ctrl_t* old_ctrl_; + size_t old_capacity_; + bool had_infoz_; +}; + // PolicyFunctions bundles together some information for a particular // raw_hash_set instantiation. This information is passed to // type-erased functions that want to do small amounts of type-specific @@ -1442,7 +1789,7 @@ void ClearBackingArray(CommonFields& c, const PolicyFunctions& policy, bool reuse); // Type-erased version of raw_hash_set::erase_meta_only. -void EraseMetaOnly(CommonFields& c, ctrl_t* it, size_t slot_size); +void EraseMetaOnly(CommonFields& c, size_t index, size_t slot_size); // Function to place in PolicyFunctions::dealloc for raw_hash_sets // that are using std::allocator. This allows us to share the same @@ -1456,6 +1803,7 @@ ABSL_ATTRIBUTE_NOINLINE void DeallocateStandard(CommonFields& common, policy.slot_size * common.capacity()); std::allocator alloc; + common.infoz().Unregister(); Deallocate( &alloc, common.backing_array_start(), common.alloc_size(policy.slot_size, AlignOfSlot)); @@ -1534,6 +1882,11 @@ class raw_hash_set { using AllocTraits = absl::allocator_traits; using SlotAlloc = typename absl::allocator_traits< allocator_type>::template rebind_alloc; + // People are often sloppy with the exact type of their allocator (sometimes + // it has an extra const or is missing the pair, but rebinds made it work + // anyway). + using CharAlloc = + typename absl::allocator_traits::template rebind_alloc; using SlotAllocTraits = typename absl::allocator_traits< allocator_type>::template rebind_traits; @@ -1590,7 +1943,7 @@ class raw_hash_set { // PRECONDITION: not an end() iterator. reference operator*() const { AssertIsFull(ctrl_, generation(), generation_ptr(), "operator*()"); - return PolicyTraits::element(slot_); + return unchecked_deref(); } // PRECONDITION: not an end() iterator. @@ -1645,13 +1998,17 @@ class raw_hash_set { // If a sentinel is reached, we null `ctrl_` out instead. void skip_empty_or_deleted() { while (IsEmptyOrDeleted(*ctrl_)) { - uint32_t shift = Group{ctrl_}.CountLeadingEmptyOrDeleted(); + uint32_t shift = + GroupEmptyOrDeleted{ctrl_}.CountLeadingEmptyOrDeleted(); ctrl_ += shift; slot_ += shift; } if (ABSL_PREDICT_FALSE(*ctrl_ == ctrl_t::kSentinel)) ctrl_ = nullptr; } + ctrl_t* control() const { return ctrl_; } + slot_type* slot() const { return slot_; } + // We use EmptyGroup() for default-constructed iterators so that they can // be distinguished from end iterators, which have nullptr ctrl_. ctrl_t* ctrl_ = EmptyGroup(); @@ -1660,10 +2017,23 @@ class raw_hash_set { union { slot_type* slot_; }; + + // An equality check which skips ABSL Hardening iterator invalidation + // checks. + // Should be used when the lifetimes of the iterators are well-enough + // understood to prove that they cannot be invalid. + bool unchecked_equals(const iterator& b) { return ctrl_ == b.control(); } + + // Dereferences the iterator without ABSL Hardening iterator invalidation + // checks. + reference unchecked_deref() const { return PolicyTraits::element(slot_); } }; class const_iterator { friend class raw_hash_set; + template + friend struct absl::container_internal::hashtable_debug_internal:: + HashtableDebugAccess; public: using iterator_category = typename iterator::iterator_category; @@ -1697,8 +2067,14 @@ class raw_hash_set { const GenerationType* gen) : inner_(const_cast(ctrl), const_cast(slot), gen) { } + ctrl_t* control() const { return inner_.control(); } + slot_type* slot() const { return inner_.slot(); } iterator inner_; + + bool unchecked_equals(const const_iterator& b) { + return inner_.unchecked_equals(b.inner_); + } }; using node_type = node_handle, Alloc>; @@ -1717,8 +2093,7 @@ class raw_hash_set { const allocator_type& alloc = allocator_type()) : settings_(CommonFields{}, hash, eq, alloc) { if (bucket_count) { - common().set_capacity(NormalizeCapacity(bucket_count)); - initialize_slots(); + resize(NormalizeCapacity(bucket_count)); } } @@ -1843,28 +2218,35 @@ class raw_hash_set { : // Hash, equality and allocator are copied instead of moved because // `that` must be left valid. If Hash is std::function, moving it // would create a nullptr functor that cannot be called. - settings_(absl::exchange(that.common(), CommonFields{}), - that.hash_ref(), that.eq_ref(), that.alloc_ref()) {} + // TODO(b/296061262): move instead of copying hash/eq/alloc. + // Note: we avoid using exchange for better generated code. + settings_(std::move(that.common()), that.hash_ref(), that.eq_ref(), + that.alloc_ref()) { + that.common() = CommonFields{}; + maybe_increment_generation_or_rehash_on_move(); + } raw_hash_set(raw_hash_set&& that, const allocator_type& a) : settings_(CommonFields{}, that.hash_ref(), that.eq_ref(), a) { if (a == that.alloc_ref()) { std::swap(common(), that.common()); + maybe_increment_generation_or_rehash_on_move(); } else { - reserve(that.size()); - // Note: this will copy elements of dense_set and unordered_set instead of - // moving them. This can be fixed if it ever becomes an issue. - for (auto& elem : that) insert(std::move(elem)); + move_elements_allocs_unequal(std::move(that)); } } raw_hash_set& operator=(const raw_hash_set& that) { - raw_hash_set tmp(that, - AllocTraits::propagate_on_container_copy_assignment::value - ? that.alloc_ref() - : alloc_ref()); - swap(tmp); - return *this; + if (ABSL_PREDICT_FALSE(this == &that)) return *this; + constexpr bool propagate_alloc = + AllocTraits::propagate_on_container_copy_assignment::value; + // TODO(ezb): maybe avoid allocating a new backing array if this->capacity() + // is an exact match for that.size(). If this->capacity() is too big, then + // it would make iteration very slow to reuse the allocation. Maybe we can + // do the same heuristic as clear() and reuse if it's small enough. + raw_hash_set tmp(that, propagate_alloc ? that.alloc_ref() : alloc_ref()); + // NOLINTNEXTLINE: not returning *this for performance. + return assign_impl(std::move(tmp)); } raw_hash_set& operator=(raw_hash_set&& that) noexcept( @@ -1879,19 +2261,7 @@ class raw_hash_set { typename AllocTraits::propagate_on_container_move_assignment()); } - ~raw_hash_set() { - const size_t cap = capacity(); - if (!cap) return; - destroy_slots(); - - // Unpoison before returning the memory to the allocator. - SanitizerUnpoisonMemoryRegion(slot_array(), sizeof(slot_type) * cap); - Deallocate( - &alloc_ref(), common().backing_array_start(), - AllocSize(cap, sizeof(slot_type), alignof(slot_type))); - - infoz().Unregister(); - } + ~raw_hash_set() { destructor_impl(); } iterator begin() ABSL_ATTRIBUTE_LIFETIME_BOUND { auto it = iterator_at(0); @@ -1937,17 +2307,6 @@ class raw_hash_set { common().set_reservation_size(0); } - inline void destroy_slots() { - const size_t cap = capacity(); - const ctrl_t* ctrl = control(); - slot_type* slot = slot_array(); - for (size_t i = 0; i != cap; ++i) { - if (IsFull(ctrl[i])) { - PolicyTraits::destroy(&alloc_ref(), slot + i); - } - } - } - // This overload kicks in when the argument is an rvalue of insertable and // decomposable type other than init_type. // @@ -2075,7 +2434,7 @@ class raw_hash_set { alignas(slot_type) unsigned char raw[sizeof(slot_type)]; slot_type* slot = reinterpret_cast(&raw); - PolicyTraits::construct(&alloc_ref(), slot, std::forward(args)...); + construct(slot, std::forward(args)...); const auto& elem = PolicyTraits::element(slot); return PolicyTraits::apply(InsertSlot{*this, std::move(*slot)}, elem); } @@ -2179,8 +2538,8 @@ class raw_hash_set { // This overload is necessary because otherwise erase(const K&) would be // a better match if non-const iterator is passed as an argument. void erase(iterator it) { - AssertIsFull(it.ctrl_, it.generation(), it.generation_ptr(), "erase()"); - PolicyTraits::destroy(&alloc_ref(), it.slot_); + AssertIsFull(it.control(), it.generation(), it.generation_ptr(), "erase()"); + destroy(it.slot()); erase_meta_only(it); } @@ -2211,8 +2570,8 @@ class raw_hash_set { assert(this != &src); for (auto it = src.begin(), e = src.end(); it != e;) { auto next = std::next(it); - if (PolicyTraits::apply(InsertSlot{*this, std::move(*it.slot_)}, - PolicyTraits::element(it.slot_)) + if (PolicyTraits::apply(InsertSlot{*this, std::move(*it.slot())}, + PolicyTraits::element(it.slot())) .second) { src.erase_meta_only(it); } @@ -2226,10 +2585,9 @@ class raw_hash_set { } node_type extract(const_iterator position) { - AssertIsFull(position.inner_.ctrl_, position.inner_.generation(), + AssertIsFull(position.control(), position.inner_.generation(), position.inner_.generation_ptr(), "extract()"); - auto node = - CommonAccess::Transfer(alloc_ref(), position.inner_.slot_); + auto node = CommonAccess::Transfer(alloc_ref(), position.slot()); erase_meta_only(position); return node; } @@ -2364,7 +2722,11 @@ class raw_hash_set { template bool contains(const key_arg& key) const { - return find(key) != end(); + // Here neither the iterator returned by `find()` nor `end()` can be invalid + // outside of potential thread-safety issues. + // `find()`'s return value is constructed, used, and then destructed + // all in this context. + return !find(key).unchecked_equals(end()); } template @@ -2400,8 +2762,10 @@ class raw_hash_set { const raw_hash_set* outer = &a; const raw_hash_set* inner = &b; if (outer->capacity() > inner->capacity()) std::swap(outer, inner); - for (const value_type& elem : *outer) - if (!inner->has_element(elem)) return false; + for (const value_type& elem : *outer) { + auto it = PolicyTraits::apply(FindElement{*inner}, elem); + if (it == inner->end() || !(*it == elem)) return false; + } return true; } @@ -2471,10 +2835,9 @@ class raw_hash_set { std::pair operator()(const K& key, Args&&...) && { auto res = s.find_or_prepare_insert(key); if (res.second) { - PolicyTraits::transfer(&s.alloc_ref(), s.slot_array() + res.first, - &slot); + s.transfer(s.slot_array() + res.first, &slot); } else if (do_destroy) { - PolicyTraits::destroy(&s.alloc_ref(), &slot); + s.destroy(&slot); } return {s.iterator_at(res.first), res.second}; } @@ -2483,58 +2846,111 @@ class raw_hash_set { slot_type&& slot; }; + // TODO(b/303305702): re-enable reentrant validation. + template + inline void construct(slot_type* slot, Args&&... args) { + PolicyTraits::construct(&alloc_ref(), slot, std::forward(args)...); + } + inline void destroy(slot_type* slot) { + PolicyTraits::destroy(&alloc_ref(), slot); + } + inline void transfer(slot_type* to, slot_type* from) { + PolicyTraits::transfer(&alloc_ref(), to, from); + } + + inline void destroy_slots() { + const size_t cap = capacity(); + const ctrl_t* ctrl = control(); + slot_type* slot = slot_array(); + for (size_t i = 0; i != cap; ++i) { + if (IsFull(ctrl[i])) { + destroy(slot + i); + } + } + } + + inline void dealloc() { + assert(capacity() != 0); + // Unpoison before returning the memory to the allocator. + SanitizerUnpoisonMemoryRegion(slot_array(), sizeof(slot_type) * capacity()); + infoz().Unregister(); + Deallocate( + &alloc_ref(), common().backing_array_start(), + common().alloc_size(sizeof(slot_type), alignof(slot_type))); + } + + inline void destructor_impl() { + if (capacity() == 0) return; + destroy_slots(); + dealloc(); + } + // Erases, but does not destroy, the value pointed to by `it`. // // This merely updates the pertinent control byte. This can be used in // conjunction with Policy::transfer to move the object to another place. void erase_meta_only(const_iterator it) { - EraseMetaOnly(common(), it.inner_.ctrl_, sizeof(slot_type)); + EraseMetaOnly(common(), static_cast(it.control() - control()), + sizeof(slot_type)); } - // Allocates a backing array for `self` and initializes its control bytes. - // This reads `capacity` and updates all other fields based on the result of - // the allocation. + // Resizes table to the new capacity and move all elements to the new + // positions accordingly. // - // This does not free the currently held array; `capacity` must be nonzero. - inline void initialize_slots() { - // People are often sloppy with the exact type of their allocator (sometimes - // it has an extra const or is missing the pair, but rebinds made it work - // anyway). - using CharAlloc = - typename absl::allocator_traits::template rebind_alloc; - InitializeSlots( - common(), CharAlloc(alloc_ref())); - } - + // Note that for better performance instead of + // find_first_non_full(common(), hash), + // HashSetResizeHelper::FindFirstNonFullAfterResize( + // common(), old_capacity, hash) + // can be called right after `resize`. ABSL_ATTRIBUTE_NOINLINE void resize(size_t new_capacity) { assert(IsValidCapacity(new_capacity)); - auto* old_ctrl = control(); + HashSetResizeHelper resize_helper(common()); auto* old_slots = slot_array(); - const size_t old_capacity = common().capacity(); common().set_capacity(new_capacity); - initialize_slots(); - - auto* new_slots = slot_array(); - size_t total_probe_length = 0; - for (size_t i = 0; i != old_capacity; ++i) { - if (IsFull(old_ctrl[i])) { - size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, - PolicyTraits::element(old_slots + i)); - auto target = find_first_non_full(common(), hash); - size_t new_i = target.offset; - total_probe_length += target.probe_length; - SetCtrl(common(), new_i, H2(hash), sizeof(slot_type)); - PolicyTraits::transfer(&alloc_ref(), new_slots + new_i, old_slots + i); - } + // Note that `InitializeSlots` does different number initialization steps + // depending on the values of `transfer_uses_memcpy` and capacities. + // Refer to the comment in `InitializeSlots` for more details. + const bool grow_single_group = + resize_helper.InitializeSlots( + common(), const_cast*>(old_slots), + CharAlloc(alloc_ref())); + + if (resize_helper.old_capacity() == 0) { + // InitializeSlots did all the work including infoz().RecordRehash(). + return; } - if (old_capacity) { - SanitizerUnpoisonMemoryRegion(old_slots, - sizeof(slot_type) * old_capacity); - Deallocate( - &alloc_ref(), old_ctrl - ControlOffset(), - AllocSize(old_capacity, sizeof(slot_type), alignof(slot_type))); + + if (grow_single_group) { + if (PolicyTraits::transfer_uses_memcpy()) { + // InitializeSlots did all the work. + return; + } + // We want GrowSizeIntoSingleGroup to be called here in order to make + // InitializeSlots not depend on PolicyTraits. + resize_helper.GrowSizeIntoSingleGroup(common(), alloc_ref(), + old_slots); + } else { + // InitializeSlots prepares control bytes to correspond to empty table. + auto* new_slots = slot_array(); + size_t total_probe_length = 0; + for (size_t i = 0; i != resize_helper.old_capacity(); ++i) { + if (IsFull(resize_helper.old_ctrl()[i])) { + size_t hash = PolicyTraits::apply( + HashElement{hash_ref()}, PolicyTraits::element(old_slots + i)); + auto target = find_first_non_full(common(), hash); + size_t new_i = target.offset; + total_probe_length += target.probe_length; + SetCtrl(common(), new_i, H2(hash), sizeof(slot_type)); + transfer(new_slots + new_i, old_slots + i); + } + } + infoz().RecordRehash(total_probe_length); } - infoz().RecordRehash(total_probe_length); + resize_helper.DeallocateOld( + CharAlloc(alloc_ref()), sizeof(slot_type), + const_cast*>(old_slots)); } // Prunes control bytes to remove as many tombstones as possible. @@ -2604,36 +3020,64 @@ class raw_hash_set { } } - bool has_element(const value_type& elem) const { - size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, elem); - auto seq = probe(common(), hash); - const ctrl_t* ctrl = control(); - while (true) { - Group g{ctrl + seq.offset()}; - for (uint32_t i : g.Match(H2(hash))) { - if (ABSL_PREDICT_TRUE( - PolicyTraits::element(slot_array() + seq.offset(i)) == elem)) - return true; - } - if (ABSL_PREDICT_TRUE(g.MaskEmpty())) return false; - seq.next(); - assert(seq.index() <= capacity() && "full table!"); + void maybe_increment_generation_or_rehash_on_move() { + common().maybe_increment_generation_on_move(); + if (!empty() && common().should_rehash_for_bug_detection_on_move()) { + resize(capacity()); } - return false; } - // TODO(alkis): Optimize this assuming *this and that don't overlap. - raw_hash_set& move_assign(raw_hash_set&& that, std::true_type) { - raw_hash_set tmp(std::move(that)); - swap(tmp); + template + raw_hash_set& assign_impl(raw_hash_set&& that) { + // We don't bother checking for this/that aliasing. We just need to avoid + // breaking the invariants in that case. + destructor_impl(); + common() = std::move(that.common()); + // TODO(b/296061262): move instead of copying hash/eq/alloc. + hash_ref() = that.hash_ref(); + eq_ref() = that.eq_ref(); + CopyAlloc(alloc_ref(), that.alloc_ref(), + std::integral_constant()); + that.common() = CommonFields{}; + maybe_increment_generation_or_rehash_on_move(); return *this; } - raw_hash_set& move_assign(raw_hash_set&& that, std::false_type) { - raw_hash_set tmp(std::move(that), alloc_ref()); - swap(tmp); + + raw_hash_set& move_elements_allocs_unequal(raw_hash_set&& that) { + const size_t size = that.size(); + if (size == 0) return *this; + reserve(size); + for (iterator it = that.begin(); it != that.end(); ++it) { + insert(std::move(PolicyTraits::element(it.slot()))); + that.destroy(it.slot()); + } + that.dealloc(); + that.common() = CommonFields{}; + maybe_increment_generation_or_rehash_on_move(); return *this; } + raw_hash_set& move_assign(raw_hash_set&& that, + std::true_type /*propagate_alloc*/) { + return assign_impl(std::move(that)); + } + raw_hash_set& move_assign(raw_hash_set&& that, + std::false_type /*propagate_alloc*/) { + if (alloc_ref() == that.alloc_ref()) { + return assign_impl(std::move(that)); + } + // Aliasing can't happen here because allocs would compare equal above. + assert(this != &that); + destructor_impl(); + // We can't take over that's memory so we need to move each element. + // While moving elements, this should have that's hash/eq so copy hash/eq + // before moving elements. + // TODO(b/296061262): move instead of copying hash/eq. + hash_ref() = that.hash_ref(); + eq_ref() = that.eq_ref(); + return move_elements_allocs_unequal(std::move(that)); + } + protected: // Attempts to find `key` in the table; if it isn't found, returns a slot that // the value can be inserted into, with the control byte already set to @@ -2675,10 +3119,19 @@ class raw_hash_set { if (!rehash_for_bug_detection && ABSL_PREDICT_FALSE(growth_left() == 0 && !IsDeleted(control()[target.offset]))) { + size_t old_capacity = capacity(); rehash_and_grow_if_necessary(); - target = find_first_non_full(common(), hash); + // NOTE: It is safe to use `FindFirstNonFullAfterResize`. + // `FindFirstNonFullAfterResize` must be called right after resize. + // `rehash_and_grow_if_necessary` may *not* call `resize` + // and perform `drop_deletes_without_resize` instead. But this + // could happen only on big tables. + // For big tables `FindFirstNonFullAfterResize` will always + // fallback to normal `find_first_non_full`, so it is safe to use it. + target = HashSetResizeHelper::FindFirstNonFullAfterResize( + common(), old_capacity, hash); } - common().set_size(common().size() + 1); + common().increment_size(); set_growth_left(growth_left() - IsEmpty(control()[target.offset])); SetCtrl(common(), target.offset, H2(hash), sizeof(slot_type)); common().maybe_increment_generation_on_insert(); @@ -2696,8 +3149,7 @@ class raw_hash_set { // POSTCONDITION: *m.iterator_at(i) == value_type(forward(args)...). template void emplace_at(size_t i, Args&&... args) { - PolicyTraits::construct(&alloc_ref(), slot_array() + i, - std::forward(args)...); + construct(slot_array() + i, std::forward(args)...); assert(PolicyTraits::apply(FindElement{*this}, *iterator_at(i)) == iterator_at(i) && @@ -2711,6 +3163,8 @@ class raw_hash_set { return {control() + i, slot_array() + i, common().generation_ptr()}; } + reference unchecked_deref(iterator it) { return it.unchecked_deref(); } + private: friend struct RawHashSetTestOnlyAccess; @@ -2743,7 +3197,7 @@ class raw_hash_set { slot_type* slot_array() const { return static_cast(common().slot_array()); } - HashtablezInfoHandle& infoz() { return common().infoz(); } + HashtablezInfoHandle infoz() { return common().infoz(); } hasher& hash_ref() { return settings_.template get<1>(); } const hasher& hash_ref() const { return settings_.template get<1>(); } @@ -2763,8 +3217,7 @@ class raw_hash_set { } static void transfer_slot_fn(void* set, void* dst, void* src) { auto* h = static_cast(set); - PolicyTraits::transfer(&h->alloc_ref(), static_cast(dst), - static_cast(src)); + h->transfer(static_cast(dst), static_cast(src)); } // Note: dealloc_fn will only be used if we have a non-standard allocator. static void dealloc_fn(CommonFields& common, const PolicyFunctions&) { @@ -2774,6 +3227,7 @@ class raw_hash_set { SanitizerUnpoisonMemoryRegion(common.slot_array(), sizeof(slot_type) * common.capacity()); + common.infoz().Unregister(); Deallocate( &set->alloc_ref(), common.backing_array_start(), common.alloc_size(sizeof(slot_type), alignof(slot_type))); @@ -2847,33 +3301,18 @@ struct HashtableDebugAccess> { static size_t AllocatedByteSize(const Set& c) { size_t capacity = c.capacity(); if (capacity == 0) return 0; - size_t m = AllocSize(capacity, sizeof(Slot), alignof(Slot)); + size_t m = c.common().alloc_size(sizeof(Slot), alignof(Slot)); size_t per_slot = Traits::space_used(static_cast(nullptr)); if (per_slot != ~size_t{}) { m += per_slot * c.size(); } else { - const ctrl_t* ctrl = c.control(); - for (size_t i = 0; i != capacity; ++i) { - if (container_internal::IsFull(ctrl[i])) { - m += Traits::space_used(c.slot_array() + i); - } + for (auto it = c.begin(); it != c.end(); ++it) { + m += Traits::space_used(it.slot()); } } return m; } - - static size_t LowerBoundAllocatedByteSize(size_t size) { - size_t capacity = GrowthToLowerboundCapacity(size); - if (capacity == 0) return 0; - size_t m = - AllocSize(NormalizeCapacity(capacity), sizeof(Slot), alignof(Slot)); - size_t per_slot = Traits::space_used(static_cast(nullptr)); - if (per_slot != ~size_t{}) { - m += per_slot * size; - } - return m; - } }; } // namespace hashtable_debug_internal diff --git a/absl/container/internal/test_allocator.h b/absl/container/internal/test_allocator.h new file mode 100644 index 0000000..8e365a3 --- /dev/null +++ b/absl/container/internal/test_allocator.h @@ -0,0 +1,387 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_CONTAINER_INTERNAL_TEST_ALLOCATOR_H_ +#define ABSL_CONTAINER_INTERNAL_TEST_ALLOCATOR_H_ + +#include +#include +#include +#include +#include + +#include "gtest/gtest.h" +#include "absl/base/config.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { + +// This is a stateful allocator, but the state lives outside of the +// allocator (in whatever test is using the allocator). This is odd +// but helps in tests where the allocator is propagated into nested +// containers - that chain of allocators uses the same state and is +// thus easier to query for aggregate allocation information. +template +class CountingAllocator { + public: + using Allocator = std::allocator; + using AllocatorTraits = std::allocator_traits; + using value_type = typename AllocatorTraits::value_type; + using pointer = typename AllocatorTraits::pointer; + using const_pointer = typename AllocatorTraits::const_pointer; + using size_type = typename AllocatorTraits::size_type; + using difference_type = typename AllocatorTraits::difference_type; + + CountingAllocator() = default; + explicit CountingAllocator(int64_t* bytes_used) : bytes_used_(bytes_used) {} + CountingAllocator(int64_t* bytes_used, int64_t* instance_count) + : bytes_used_(bytes_used), instance_count_(instance_count) {} + + template + CountingAllocator(const CountingAllocator& x) + : bytes_used_(x.bytes_used_), instance_count_(x.instance_count_) {} + + pointer allocate( + size_type n, + typename AllocatorTraits::const_void_pointer hint = nullptr) { + Allocator allocator; + pointer ptr = AllocatorTraits::allocate(allocator, n, hint); + if (bytes_used_ != nullptr) { + *bytes_used_ += n * sizeof(T); + } + return ptr; + } + + void deallocate(pointer p, size_type n) { + Allocator allocator; + AllocatorTraits::deallocate(allocator, p, n); + if (bytes_used_ != nullptr) { + *bytes_used_ -= n * sizeof(T); + } + } + + template + void construct(U* p, Args&&... args) { + Allocator allocator; + AllocatorTraits::construct(allocator, p, std::forward(args)...); + if (instance_count_ != nullptr) { + *instance_count_ += 1; + } + } + + template + void destroy(U* p) { + Allocator allocator; + // Ignore GCC warning bug. +#if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(12, 0) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wuse-after-free" +#endif + AllocatorTraits::destroy(allocator, p); +#if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(12, 0) +#pragma GCC diagnostic pop +#endif + if (instance_count_ != nullptr) { + *instance_count_ -= 1; + } + } + + template + class rebind { + public: + using other = CountingAllocator; + }; + + friend bool operator==(const CountingAllocator& a, + const CountingAllocator& b) { + return a.bytes_used_ == b.bytes_used_ && + a.instance_count_ == b.instance_count_; + } + + friend bool operator!=(const CountingAllocator& a, + const CountingAllocator& b) { + return !(a == b); + } + + int64_t* bytes_used_ = nullptr; + int64_t* instance_count_ = nullptr; +}; + +template +struct CopyAssignPropagatingCountingAlloc : public CountingAllocator { + using propagate_on_container_copy_assignment = std::true_type; + + using Base = CountingAllocator; + using Base::Base; + + template + explicit CopyAssignPropagatingCountingAlloc( + const CopyAssignPropagatingCountingAlloc& other) + : Base(other.bytes_used_, other.instance_count_) {} + + template + struct rebind { + using other = CopyAssignPropagatingCountingAlloc; + }; +}; + +template +struct MoveAssignPropagatingCountingAlloc : public CountingAllocator { + using propagate_on_container_move_assignment = std::true_type; + + using Base = CountingAllocator; + using Base::Base; + + template + explicit MoveAssignPropagatingCountingAlloc( + const MoveAssignPropagatingCountingAlloc& other) + : Base(other.bytes_used_, other.instance_count_) {} + + template + struct rebind { + using other = MoveAssignPropagatingCountingAlloc; + }; +}; + +template +struct SwapPropagatingCountingAlloc : public CountingAllocator { + using propagate_on_container_swap = std::true_type; + + using Base = CountingAllocator; + using Base::Base; + + template + explicit SwapPropagatingCountingAlloc( + const SwapPropagatingCountingAlloc& other) + : Base(other.bytes_used_, other.instance_count_) {} + + template + struct rebind { + using other = SwapPropagatingCountingAlloc; + }; +}; + +// Tries to allocate memory at the minimum alignment even when the default +// allocator uses a higher alignment. +template +struct MinimumAlignmentAlloc : std::allocator { + MinimumAlignmentAlloc() = default; + + template + explicit MinimumAlignmentAlloc(const MinimumAlignmentAlloc& /*other*/) {} + + template + struct rebind { + using other = MinimumAlignmentAlloc; + }; + + T* allocate(size_t n) { + T* ptr = std::allocator::allocate(n + 1); + char* cptr = reinterpret_cast(ptr); + cptr += alignof(T); + return reinterpret_cast(cptr); + } + + void deallocate(T* ptr, size_t n) { + char* cptr = reinterpret_cast(ptr); + cptr -= alignof(T); + std::allocator::deallocate(reinterpret_cast(cptr), n + 1); + } +}; + +inline bool IsAssertEnabled() { + // Use an assert with side-effects to figure out if they are actually enabled. + bool assert_enabled = false; + assert([&]() { // NOLINT + assert_enabled = true; + return true; + }()); + return assert_enabled; +} + +template