From be40cf55707b2f7fb0b3189c2a1edbe7dfedbbe9 Mon Sep 17 00:00:00 2001 From: Nick Knize Date: Mon, 17 Jul 2023 22:52:28 -0500 Subject: [PATCH] [Refactor] Remaining HPPC to java.util collections (#8730) This commit refactors the remaining usages of hppc collections to java.util collections and completely removes the obsolete hppc dependency. Signed-off-by: Nicholas Walter Knize --- CHANGELOG.md | 1 + .../org/opensearch/common/util/BitMixer.java | 172 ++++++++ libs/core/.classpath1 | 6 - ...AbstractGeoBucketAggregationIntegTest.java | 16 +- .../aggregations/bucket/GeoHashGridIT.java | 11 +- ...ractGeoAggregatorModulePluginTestCase.java | 14 +- server/build.gradle | 1 - server/licenses/hppc-0.8.1.jar.sha1 | 1 - server/licenses/hppc-LICENSE.txt | 203 --------- server/licenses/hppc-NOTICE.txt | 11 - .../shards/IndicesShardStoreRequestIT.java | 33 +- .../cluster/ClusterStateDiffIT.java | 7 +- .../allocation/AwarenessAllocationIT.java | 31 +- .../cluster/routing/PrimaryAllocationIT.java | 11 +- .../index/store/CorruptedFileIT.java | 7 +- .../search/aggregations/CombiIT.java | 6 +- .../search/aggregations/EquivalenceIT.java | 5 +- .../aggregations/bucket/HistogramIT.java | 15 +- .../aggregations/bucket/MinDocCountIT.java | 4 +- .../search/scroll/DuelScrollIT.java | 5 +- .../DedicatedClusterSnapshotRestoreIT.java | 7 +- .../TransportClusterRerouteAction.java | 5 +- .../cluster/stats/ClusterStatsIndices.java | 10 +- .../cluster/stats/ClusterStatsNodes.java | 54 +-- .../mapping/put/PutMappingRequest.java | 4 +- .../shards/IndicesShardStoresResponse.java | 43 +- .../TransportIndicesShardStoresAction.java | 14 +- .../action/search/SearchPhaseController.java | 49 +-- .../action/support/ActiveShardCount.java | 6 +- .../TransportBroadcastReplicationAction.java | 8 +- .../action/termvectors/TermVectorsFields.java | 23 +- .../org/opensearch/cluster/ClusterInfo.java | 25 +- .../cluster/metadata/IndexMetadata.java | 6 +- .../metadata/MetadataIndexStateService.java | 18 +- .../cluster/routing/IndexRoutingTable.java | 42 +- .../cluster/routing/RoutingNodes.java | 28 +- .../cluster/routing/RoutingTable.java | 8 +- .../routing/WeightedRoutingService.java | 8 +- .../allocator/RemoteShardsBalancer.java | 14 +- .../decider/AwarenessAllocationDecider.java | 21 +- .../opensearch/common/FieldMemoryStats.java | 42 +- .../opensearch/common/collect/HppcMaps.java | 175 -------- .../common/collect/ImmutableOpenIntMap.java | 410 ------------------ .../lucene/search/MultiPhrasePrefixQuery.java | 9 +- .../search/function/RandomScoreFunction.java | 2 +- .../opensearch/common/recycler/Recyclers.java | 2 +- .../common/util/AbstractPagedHashMap.java | 1 - .../opensearch/common/util/BytesRefHash.java | 1 - .../opensearch/common/util/LongLongHash.java | 2 - .../common/util/ReorganizingLongHash.java | 1 - .../java/org/opensearch/gateway/Gateway.java | 9 +- .../http/AbstractHttpServerTransport.java | 7 +- .../index/engine/CombinedDeletionPolicy.java | 10 +- .../index/engine/CompletionStatsCache.java | 16 +- .../index/fielddata/ShardFieldData.java | 6 +- .../opensearch/index/mapper/ParseContext.java | 8 +- .../index/seqno/LocalCheckpointTracker.java | 25 +- .../index/seqno/ReplicationTracker.java | 6 +- .../opensearch/index/shard/IndexShard.java | 7 +- .../index/translog/MultiSnapshot.java | 5 +- .../index/translog/TranslogWriter.java | 12 +- .../indices/IndicesRequestCache.java | 7 +- .../rest/action/cat/RestAllocationAction.java | 7 +- .../rest/action/cat/RestFielddataAction.java | 7 +- .../opensearch/script/ScoreScriptUtils.java | 2 +- .../bucket/nested/NestedAggregator.java | 11 +- .../nested/ReverseNestedAggregator.java | 13 +- .../bucket/terms/IncludeExclude.java | 12 +- .../metrics/CardinalityAggregator.java | 2 +- .../metrics/TopHitsAggregator.java | 18 +- ...tilesBucketPipelineAggregationBuilder.java | 9 +- .../opensearch/search/dfs/AggregatedDfs.java | 29 +- .../org/opensearch/search/dfs/DfsPhase.java | 4 +- .../search/dfs/DfsSearchResult.java | 31 +- .../search/slice/DocValuesSliceQuery.java | 2 +- .../opensearch/snapshots/RestoreService.java | 6 +- .../org/opensearch/tasks/TaskManager.java | 15 +- .../opensearch/transport/TcpTransport.java | 6 +- .../IndicesShardStoreResponseTests.java | 7 +- .../action/support/ActiveShardCountTests.java | 17 +- .../health/ClusterStateHealthTests.java | 61 ++- .../cluster/routing/UnassignedInfoTests.java | 4 +- .../RemoteShardsAllocateUnassignedTests.java | 8 +- .../RemoteShardsRebalanceShardsTests.java | 18 +- .../allocation/ThrottlingAllocationTests.java | 3 +- .../decider/DiskThresholdDeciderTests.java | 3 +- ...storeInProgressAllocationDeciderTests.java | 5 +- .../common/FieldMemoryStatsTests.java | 43 +- .../opensearch/common/hppc/HppcMapsTests.java | 111 ----- .../common/util/BytesRefHashTests.java | 9 +- .../opensearch/common/util/LongHashTests.java | 12 +- .../common/util/LongObjectHashMapTests.java | 10 +- .../engine/CombinedDeletionPolicyTests.java | 3 +- .../seqno/LocalCheckpointTrackerTests.java | 6 +- .../index/translog/MultiSnapshotTests.java | 10 +- .../index/translog/SnapshotMatchers.java | 6 +- .../range/BinaryRangeAggregatorTests.java | 14 +- .../metrics/AbstractGeoTestCase.java | 15 +- .../HyperLogLogPlusPlusSparseTests.java | 2 +- .../metrics/HyperLogLogPlusPlusTests.java | 7 +- .../metrics/InternalCardinalityTests.java | 2 +- .../slice/DocValuesSliceQueryTests.java | 2 +- .../InternalSnapshotsInfoServiceTests.java | 4 +- .../coordination/LinearizabilityChecker.java | 19 +- .../opensearch/test/InternalTestCluster.java | 17 +- 105 files changed, 731 insertions(+), 1577 deletions(-) create mode 100644 libs/common/src/main/java/org/opensearch/common/util/BitMixer.java delete mode 100644 server/licenses/hppc-0.8.1.jar.sha1 delete mode 100644 server/licenses/hppc-LICENSE.txt delete mode 100644 server/licenses/hppc-NOTICE.txt delete mode 100644 server/src/main/java/org/opensearch/common/collect/HppcMaps.java delete mode 100644 server/src/main/java/org/opensearch/common/collect/ImmutableOpenIntMap.java delete mode 100644 server/src/test/java/org/opensearch/common/hppc/HppcMapsTests.java diff --git a/CHANGELOG.md b/CHANGELOG.md index 57f61548c768b..1a7fae0187faa 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -19,6 +19,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Make Span exporter configurable ([#8620](https://github.com/opensearch-project/OpenSearch/issues/8620)) - Perform aggregation postCollection in ContextIndexSearcher after searching leaves ([#8303](https://github.com/opensearch-project/OpenSearch/pull/8303)) - [Refactor] StreamIO from common to core.common namespace in core lib ([#8157](https://github.com/opensearch-project/OpenSearch/pull/8157)) +- [Refactor] Remaining HPPC to java.util collections ([#8730](https://github.com/opensearch-project/OpenSearch/pull/8730)) ### Deprecated diff --git a/libs/common/src/main/java/org/opensearch/common/util/BitMixer.java b/libs/common/src/main/java/org/opensearch/common/util/BitMixer.java new file mode 100644 index 0000000000000..8762217916c7a --- /dev/null +++ b/libs/common/src/main/java/org/opensearch/common/util/BitMixer.java @@ -0,0 +1,172 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * HPPC + * + * Copyright (C) 2010-2022 Carrot Search s.c. + * All rights reserved. + * + * Refer to the full license file "LICENSE.txt": + * https://github.com/carrotsearch/hppc/blob/master/LICENSE.txt + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.common.util; + +/** + * Bit mixing utilities from carrotsearch.hppc. + * + * Licensed under ALv2. This is pulled in directly to avoid a full hppc dependency. + * + * The purpose of these methods is to evenly distribute key space over int32 + * range. + */ +public final class BitMixer { + + // Don't bother mixing very small key domains much. + public static int mix(byte key) { + return key * PHI_C32; + } + + public static int mix(byte key, int seed) { + return (key ^ seed) * PHI_C32; + } + + public static int mix(short key) { + return mixPhi(key); + } + + public static int mix(short key, int seed) { + return mixPhi(key ^ seed); + } + + public static int mix(char key) { + return mixPhi(key); + } + + public static int mix(char key, int seed) { + return mixPhi(key ^ seed); + } + + // Better mix for larger key domains. + public static int mix(int key) { + return mix32(key); + } + + public static int mix(int key, int seed) { + return mix32(key ^ seed); + } + + public static int mix(float key) { + return mix32(Float.floatToIntBits(key)); + } + + public static int mix(float key, int seed) { + return mix32(Float.floatToIntBits(key) ^ seed); + } + + public static int mix(double key) { + return (int) mix64(Double.doubleToLongBits(key)); + } + + public static int mix(double key, int seed) { + return (int) mix64(Double.doubleToLongBits(key) ^ seed); + } + + public static int mix(long key) { + return (int) mix64(key); + } + + public static int mix(long key, int seed) { + return (int) mix64(key ^ seed); + } + + public static int mix(Object key) { + return key == null ? 0 : mix32(key.hashCode()); + } + + public static int mix(Object key, int seed) { + return key == null ? 0 : mix32(key.hashCode() ^ seed); + } + + /** + * MH3's plain finalization step. + */ + public static int mix32(int k) { + k = (k ^ (k >>> 16)) * 0x85ebca6b; + k = (k ^ (k >>> 13)) * 0xc2b2ae35; + return k ^ (k >>> 16); + } + + /** + * Computes David Stafford variant 9 of 64bit mix function (MH3 finalization step, + * with different shifts and constants). + * + * Variant 9 is picked because it contains two 32-bit shifts which could be possibly + * optimized into better machine code. + * + * @see "http://zimbry.blogspot.com/2011/09/better-bit-mixing-improving-on.html" + */ + public static long mix64(long z) { + z = (z ^ (z >>> 32)) * 0x4cd6944c5cc20b6dL; + z = (z ^ (z >>> 29)) * 0xfc12c5b19d3259e9L; + return z ^ (z >>> 32); + } + + /* + * Golden ratio bit mixers. + */ + + private static final int PHI_C32 = 0x9e3779b9; + private static final long PHI_C64 = 0x9e3779b97f4a7c15L; + + public static int mixPhi(byte k) { + final int h = k * PHI_C32; + return h ^ (h >>> 16); + } + + public static int mixPhi(char k) { + final int h = k * PHI_C32; + return h ^ (h >>> 16); + } + + public static int mixPhi(short k) { + final int h = k * PHI_C32; + return h ^ (h >>> 16); + } + + public static int mixPhi(int k) { + final int h = k * PHI_C32; + return h ^ (h >>> 16); + } + + public static int mixPhi(float k) { + final int h = Float.floatToIntBits(k) * PHI_C32; + return h ^ (h >>> 16); + } + + public static int mixPhi(double k) { + final long h = Double.doubleToLongBits(k) * PHI_C64; + return (int) (h ^ (h >>> 32)); + } + + public static int mixPhi(long k) { + final long h = k * PHI_C64; + return (int) (h ^ (h >>> 32)); + } + + public static int mixPhi(Object k) { + final int h = (k == null ? 0 : k.hashCode() * PHI_C32); + return h ^ (h >>> 16); + } +} diff --git a/libs/core/.classpath1 b/libs/core/.classpath1 index 3a20aa700e9e9..3586b0ee921fb 100644 --- a/libs/core/.classpath1 +++ b/libs/core/.classpath1 @@ -253,12 +253,6 @@ - - - - - - diff --git a/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/bucket/AbstractGeoBucketAggregationIntegTest.java b/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/bucket/AbstractGeoBucketAggregationIntegTest.java index 30088c1acb136..d9ff3e8f473ef 100644 --- a/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/bucket/AbstractGeoBucketAggregationIntegTest.java +++ b/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/bucket/AbstractGeoBucketAggregationIntegTest.java @@ -8,8 +8,6 @@ package org.opensearch.geo.search.aggregations.bucket; -import com.carrotsearch.hppc.ObjectIntHashMap; -import com.carrotsearch.hppc.ObjectIntMap; import org.apache.lucene.geo.GeoEncodingUtils; import org.opensearch.Version; import org.opensearch.action.index.IndexRequestBuilder; @@ -26,8 +24,10 @@ import org.opensearch.test.VersionUtils; import java.util.ArrayList; +import java.util.HashMap; import java.util.HashSet; import java.util.List; +import java.util.Map; import java.util.Random; import java.util.Set; @@ -51,11 +51,11 @@ public abstract class AbstractGeoBucketAggregationIntegTest extends GeoModulePlu protected static Rectangle boundingRectangleForGeoShapesAgg; - protected static ObjectIntMap expectedDocsCountForGeoShapes; + protected static Map expectedDocsCountForGeoShapes; - protected static ObjectIntMap expectedDocCountsForSingleGeoPoint; + protected static Map expectedDocCountsForSingleGeoPoint; - protected static ObjectIntMap multiValuedExpectedDocCountsGeoPoint; + protected static Map multiValuedExpectedDocCountsGeoPoint; protected static final String GEO_SHAPE_FIELD_NAME = "location_geo_shape"; @@ -82,7 +82,7 @@ protected boolean forbidPrivateIndexSettings() { * @throws Exception thrown during index creation. */ protected void prepareGeoShapeIndexForAggregations(final Random random) throws Exception { - expectedDocsCountForGeoShapes = new ObjectIntHashMap<>(); + expectedDocsCountForGeoShapes = new HashMap<>(); final Settings settings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, version).build(); final List geoshapes = new ArrayList<>(); assertAcked(prepareCreate(GEO_SHAPE_INDEX_NAME).setSettings(settings).setMapping(GEO_SHAPE_FIELD_NAME, "type" + "=geo_shape")); @@ -129,7 +129,7 @@ protected void prepareGeoShapeIndexForAggregations(final Random random) throws E * @throws Exception thrown during index creation. */ protected void prepareSingleValueGeoPointIndex(final Random random) throws Exception { - expectedDocCountsForSingleGeoPoint = new ObjectIntHashMap<>(); + expectedDocCountsForSingleGeoPoint = new HashMap<>(); createIndex("idx_unmapped"); final Settings settings = Settings.builder() .put(IndexMetadata.SETTING_VERSION_CREATED, version) @@ -155,7 +155,7 @@ protected void prepareSingleValueGeoPointIndex(final Random random) throws Excep } protected void prepareMultiValuedGeoPointIndex(final Random random) throws Exception { - multiValuedExpectedDocCountsGeoPoint = new ObjectIntHashMap<>(); + multiValuedExpectedDocCountsGeoPoint = new HashMap<>(); final Settings settings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, version).build(); final List cities = new ArrayList<>(); assertAcked( diff --git a/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/bucket/GeoHashGridIT.java b/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/bucket/GeoHashGridIT.java index 3d4cd430a77e2..e3ca03aa495ab 100644 --- a/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/bucket/GeoHashGridIT.java +++ b/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/bucket/GeoHashGridIT.java @@ -31,8 +31,6 @@ package org.opensearch.geo.search.aggregations.bucket; -import com.carrotsearch.hppc.ObjectIntHashMap; -import com.carrotsearch.hppc.cursors.ObjectIntCursor; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.geo.GeoBoundingBox; import org.opensearch.common.geo.GeoPoint; @@ -49,6 +47,7 @@ import org.opensearch.search.aggregations.bucket.filter.Filter; import org.opensearch.test.OpenSearchIntegTestCase; +import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Random; @@ -70,7 +69,7 @@ public void setupSuiteScopeCluster() throws Exception { Random random = random(); // Creating a BB for limiting the number buckets generated during aggregation boundingRectangleForGeoShapesAgg = getGridAggregationBoundingBox(random); - expectedDocCountsForSingleGeoPoint = new ObjectIntHashMap<>(); + expectedDocCountsForSingleGeoPoint = new HashMap<>(); prepareSingleValueGeoPointIndex(random); prepareMultiValuedGeoPointIndex(random); prepareGeoShapeIndexForAggregations(random); @@ -232,9 +231,9 @@ public void testTopMatch() { String geohash = cell.getKeyAsString(); long bucketCount = cell.getDocCount(); int expectedBucketCount = 0; - for (ObjectIntCursor cursor : expectedDocCountsForSingleGeoPoint) { - if (cursor.key.length() == precision) { - expectedBucketCount = Math.max(expectedBucketCount, cursor.value); + for (var cursor : expectedDocCountsForSingleGeoPoint.entrySet()) { + if (cursor.getKey().length() == precision) { + expectedBucketCount = Math.max(expectedBucketCount, cursor.getValue()); } } assertNotSame(bucketCount, 0); diff --git a/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/metrics/AbstractGeoAggregatorModulePluginTestCase.java b/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/metrics/AbstractGeoAggregatorModulePluginTestCase.java index 03ed2ea6d1e3b..f7c9747e1a163 100644 --- a/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/metrics/AbstractGeoAggregatorModulePluginTestCase.java +++ b/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/metrics/AbstractGeoAggregatorModulePluginTestCase.java @@ -8,10 +8,6 @@ package org.opensearch.geo.search.aggregations.metrics; -import com.carrotsearch.hppc.ObjectIntHashMap; -import com.carrotsearch.hppc.ObjectIntMap; -import com.carrotsearch.hppc.ObjectObjectHashMap; -import com.carrotsearch.hppc.ObjectObjectMap; import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.Strings; @@ -32,7 +28,9 @@ import org.opensearch.search.sort.SortOrder; import java.util.ArrayList; +import java.util.HashMap; import java.util.List; +import java.util.Map; import java.util.stream.IntStream; import static org.hamcrest.Matchers.equalTo; @@ -65,8 +63,8 @@ public abstract class AbstractGeoAggregatorModulePluginTestCase extends GeoModul protected static Geometry[] geoShapesValues; protected static GeoPoint singleTopLeft, singleBottomRight, multiTopLeft, multiBottomRight, singleCentroid, multiCentroid, unmappedCentroid, geoShapeTopLeft, geoShapeBottomRight; - protected static ObjectIntMap expectedDocCountsForGeoHash = null; - protected static ObjectObjectMap expectedCentroidsForGeoHash = null; + protected static Map expectedDocCountsForGeoHash = null; + protected static Map expectedCentroidsForGeoHash = null; @Override public void setupSuiteScopeCluster() throws Exception { @@ -98,8 +96,8 @@ public void setupSuiteScopeCluster() throws Exception { numDocs = randomIntBetween(6, 20); numUniqueGeoPoints = randomIntBetween(1, numDocs); - expectedDocCountsForGeoHash = new ObjectIntHashMap<>(numDocs * 2); - expectedCentroidsForGeoHash = new ObjectObjectHashMap<>(numDocs * 2); + expectedDocCountsForGeoHash = new HashMap<>(numDocs * 2); + expectedCentroidsForGeoHash = new HashMap<>(numDocs * 2); singleValues = new GeoPoint[numUniqueGeoPoints]; for (int i = 0; i < singleValues.length; i++) { diff --git a/server/build.gradle b/server/build.gradle index c8f3e420c72ee..c608c5ff86f06 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -128,7 +128,6 @@ dependencies { // utilities api project(":libs:opensearch-cli") - api 'com.carrotsearch:hppc:0.8.1' // time handling, remove with java 8 time api "joda-time:joda-time:${versions.joda}" diff --git a/server/licenses/hppc-0.8.1.jar.sha1 b/server/licenses/hppc-0.8.1.jar.sha1 deleted file mode 100644 index 47684ed023210..0000000000000 --- a/server/licenses/hppc-0.8.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ffc7ba8f289428b9508ab484b8001dea944ae603 \ No newline at end of file diff --git a/server/licenses/hppc-LICENSE.txt b/server/licenses/hppc-LICENSE.txt deleted file mode 100644 index 31467575cdbfe..0000000000000 --- a/server/licenses/hppc-LICENSE.txt +++ /dev/null @@ -1,203 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright 2010-2013, Carrot Search s.c., Boznicza 11/56, Poznan, Poland - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - diff --git a/server/licenses/hppc-NOTICE.txt b/server/licenses/hppc-NOTICE.txt deleted file mode 100644 index 1d8842c0bc69d..0000000000000 --- a/server/licenses/hppc-NOTICE.txt +++ /dev/null @@ -1,11 +0,0 @@ -ACKNOWLEDGEMENT -=============== - -HPPC borrowed code, ideas or both from: - - * Apache Lucene, http://lucene.apache.org/ - (Apache license) - * Fastutil, http://fastutil.di.unimi.it/ - (Apache license) - * Koloboke, https://github.com/OpenHFT/Koloboke - (Apache license) diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/shards/IndicesShardStoreRequestIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/shards/IndicesShardStoreRequestIT.java index 52b2ea93fe071..fe5ec9227e844 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/shards/IndicesShardStoreRequestIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/shards/IndicesShardStoreRequestIT.java @@ -32,8 +32,6 @@ package org.opensearch.action.admin.indices.shards; -import com.carrotsearch.hppc.cursors.IntObjectCursor; -import com.carrotsearch.hppc.cursors.ObjectCursor; import org.apache.lucene.index.CorruptIndexException; import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.client.Requests; @@ -42,7 +40,6 @@ import org.opensearch.cluster.routing.IndexRoutingTable; import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.routing.ShardRoutingState; -import org.opensearch.common.collect.ImmutableOpenIntMap; import org.opensearch.common.settings.Settings; import org.opensearch.core.index.Index; import org.opensearch.index.IndexService; @@ -101,10 +98,10 @@ public void testBasic() throws Exception { // all shards response = client().admin().indices().shardStores(Requests.indicesShardStoresRequest(index).shardStatuses("all")).get(); assertThat(response.getStoreStatuses().containsKey(index), equalTo(true)); - ImmutableOpenIntMap> shardStores = response.getStoreStatuses().get(index); + final Map> shardStores = response.getStoreStatuses().get(index); assertThat(shardStores.values().size(), equalTo(2)); - for (ObjectCursor> shardStoreStatuses : shardStores.values()) { - for (IndicesShardStoresResponse.StoreStatus storeStatus : shardStoreStatuses.value) { + for (var shardStoreStatuses : shardStores.values()) { + for (IndicesShardStoresResponse.StoreStatus storeStatus : shardStoreStatuses) { assertThat(storeStatus.getAllocationId(), notNullValue()); assertThat(storeStatus.getNode(), notNullValue()); assertThat(storeStatus.getStoreException(), nullValue()); @@ -123,13 +120,13 @@ public void testBasic() throws Exception { List unassignedShards = clusterState.routingTable().index(index).shardsWithState(ShardRoutingState.UNASSIGNED); response = client().admin().indices().shardStores(Requests.indicesShardStoresRequest(index)).get(); assertThat(response.getStoreStatuses().containsKey(index), equalTo(true)); - ImmutableOpenIntMap> shardStoresStatuses = response.getStoreStatuses().get(index); + final Map> shardStoresStatuses = response.getStoreStatuses().get(index); assertThat(shardStoresStatuses.size(), equalTo(unassignedShards.size())); - for (IntObjectCursor> storesStatus : shardStoresStatuses) { - assertThat("must report for one store", storesStatus.value.size(), equalTo(1)); + for (var storesStatus : shardStoresStatuses.values()) { + assertThat("must report for one store", storesStatus.size(), equalTo(1)); assertThat( "reported store should be primary", - storesStatus.value.get(0).getAllocationStatus(), + storesStatus.get(0).getAllocationStatus(), equalTo(IndicesShardStoresResponse.StoreStatus.AllocationStatus.PRIMARY) ); } @@ -150,7 +147,7 @@ public void testIndices() throws Exception { .indices() .shardStores(Requests.indicesShardStoresRequest().shardStatuses("all")) .get(); - Map>> shardStatuses = response.getStoreStatuses(); + Map>> shardStatuses = response.getStoreStatuses(); assertThat(shardStatuses.containsKey(index1), equalTo(true)); assertThat(shardStatuses.containsKey(index2), equalTo(true)); assertThat(shardStatuses.get(index1).size(), equalTo(2)); @@ -205,21 +202,21 @@ public void testCorruptedShards() throws Exception { assertBusy(() -> { // IndicesClusterStateService#failAndRemoveShard() called asynchronously but we need it to have completed here. IndicesShardStoresResponse rsp = client().admin().indices().prepareShardStores(index).setShardStatuses("all").get(); - ImmutableOpenIntMap> shardStatuses = rsp.getStoreStatuses().get(index); + final Map> shardStatuses = rsp.getStoreStatuses().get(index); assertNotNull(shardStatuses); assertThat(shardStatuses.size(), greaterThan(0)); - for (IntObjectCursor> shardStatus : shardStatuses) { - for (IndicesShardStoresResponse.StoreStatus status : shardStatus.value) { - if (corruptedShardIDMap.containsKey(shardStatus.key) - && corruptedShardIDMap.get(shardStatus.key).contains(status.getNode().getName())) { + for (var shardStatus : shardStatuses.entrySet()) { + for (IndicesShardStoresResponse.StoreStatus status : shardStatus.getValue()) { + if (corruptedShardIDMap.containsKey(shardStatus.getKey()) + && corruptedShardIDMap.get(shardStatus.getKey()).contains(status.getNode().getName())) { assertThat( - "shard [" + shardStatus.key + "] is failed on node [" + status.getNode().getName() + "]", + "shard [" + shardStatus.getKey() + "] is failed on node [" + status.getNode().getName() + "]", status.getStoreException(), notNullValue() ); } else { assertNull( - "shard [" + shardStatus.key + "] is not failed on node [" + status.getNode().getName() + "]", + "shard [" + shardStatus.getKey() + "] is not failed on node [" + status.getNode().getName() + "]", status.getStoreException() ); } diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/ClusterStateDiffIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/ClusterStateDiffIT.java index e5a46c3c6bb9f..147be425f93b3 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/ClusterStateDiffIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/ClusterStateDiffIT.java @@ -32,7 +32,6 @@ package org.opensearch.cluster; -import com.carrotsearch.hppc.cursors.ObjectCursor; import org.opensearch.cluster.block.ClusterBlock; import org.opensearch.cluster.block.ClusterBlocks; import org.opensearch.cluster.coordination.CoordinationMetadata; @@ -347,16 +346,16 @@ private IndexRoutingTable randomIndexRoutingTable(String index, String[] nodeIds */ private IndexRoutingTable randomChangeToIndexRoutingTable(IndexRoutingTable original, String[] nodes) { IndexRoutingTable.Builder builder = IndexRoutingTable.builder(original.getIndex()); - for (ObjectCursor indexShardRoutingTable : original.shards().values()) { + for (var indexShardRoutingTable : original.shards().values()) { Set availableNodes = Sets.newHashSet(nodes); - for (ShardRouting shardRouting : indexShardRoutingTable.value.shards()) { + for (ShardRouting shardRouting : indexShardRoutingTable.shards()) { availableNodes.remove(shardRouting.currentNodeId()); if (shardRouting.relocating()) { availableNodes.remove(shardRouting.relocatingNodeId()); } } - for (ShardRouting shardRouting : indexShardRoutingTable.value.shards()) { + for (ShardRouting shardRouting : indexShardRoutingTable.shards()) { final ShardRouting updatedShardRouting = randomChange(shardRouting, availableNodes); availableNodes.remove(updatedShardRouting.currentNodeId()); if (shardRouting.relocating()) { diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/allocation/AwarenessAllocationIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/allocation/AwarenessAllocationIT.java index fbcf5d3bc78f6..5c8fc82a1c2b5 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/allocation/AwarenessAllocationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/allocation/AwarenessAllocationIT.java @@ -32,7 +32,6 @@ package org.opensearch.cluster.allocation; -import com.carrotsearch.hppc.ObjectIntHashMap; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.opensearch.action.admin.cluster.health.ClusterHealthResponse; @@ -53,7 +52,9 @@ import java.util.ArrayList; import java.util.Arrays; +import java.util.HashMap; import java.util.List; +import java.util.Map; import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; @@ -126,11 +127,11 @@ public void testSimpleAwareness() throws Exception { assertThat("Some indices not closed", notClosedIndices, empty()); // verify that we have all the primaries on node3 - ObjectIntHashMap counts = new ObjectIntHashMap<>(); + final Map counts = new HashMap<>(); for (IndexRoutingTable indexRoutingTable : clusterState.routingTable()) { for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) { for (ShardRouting shardRouting : indexShardRoutingTable) { - counts.addTo(clusterState.nodes().get(shardRouting.currentNodeId()).getName(), 1); + counts.merge(clusterState.nodes().get(shardRouting.currentNodeId()).getName(), 1, Integer::sum); } } } @@ -182,12 +183,12 @@ public void testAwarenessZones() { assertThat(health.isTimedOut(), equalTo(false)); ClusterState clusterState = client().admin().cluster().prepareState().execute().actionGet().getState(); - ObjectIntHashMap counts = new ObjectIntHashMap<>(); + final Map counts = new HashMap<>(); for (IndexRoutingTable indexRoutingTable : clusterState.routingTable()) { for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) { for (ShardRouting shardRouting : indexShardRoutingTable) { - counts.addTo(clusterState.nodes().get(shardRouting.currentNodeId()).getName(), 1); + counts.merge(clusterState.nodes().get(shardRouting.currentNodeId()).getName(), 1, Integer::sum); } } } @@ -232,12 +233,12 @@ public void testAwarenessZonesIncrementalNodes() { .actionGet(); assertThat(health.isTimedOut(), equalTo(false)); ClusterState clusterState = client().admin().cluster().prepareState().execute().actionGet().getState(); - ObjectIntHashMap counts = new ObjectIntHashMap<>(); + Map counts = new HashMap<>(); for (IndexRoutingTable indexRoutingTable : clusterState.routingTable()) { for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) { for (ShardRouting shardRouting : indexShardRoutingTable) { - counts.addTo(clusterState.nodes().get(shardRouting.currentNodeId()).getName(), 1); + counts.merge(clusterState.nodes().get(shardRouting.currentNodeId()).getName(), 1, Integer::sum); } } } @@ -272,12 +273,12 @@ public void testAwarenessZonesIncrementalNodes() { assertThat(health.isTimedOut(), equalTo(false)); clusterState = client().admin().cluster().prepareState().execute().actionGet().getState(); - counts = new ObjectIntHashMap<>(); + counts = new HashMap<>(); for (IndexRoutingTable indexRoutingTable : clusterState.routingTable()) { for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) { for (ShardRouting shardRouting : indexShardRoutingTable) { - counts.addTo(clusterState.nodes().get(shardRouting.currentNodeId()).getName(), 1); + counts.merge(clusterState.nodes().get(shardRouting.currentNodeId()).getName(), 1, Integer::sum); } } } @@ -312,12 +313,12 @@ public void testAwarenessZonesIncrementalNodes() { assertThat(health.isTimedOut(), equalTo(false)); clusterState = client().admin().cluster().prepareState().execute().actionGet().getState(); - counts = new ObjectIntHashMap<>(); + counts = new HashMap<>(); for (IndexRoutingTable indexRoutingTable : clusterState.routingTable()) { for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) { for (ShardRouting shardRouting : indexShardRoutingTable) { - counts.addTo(clusterState.nodes().get(shardRouting.currentNodeId()).getName(), 1); + counts.merge(clusterState.nodes().get(shardRouting.currentNodeId()).getName(), 1, Integer::sum); } } } @@ -347,12 +348,12 @@ public void testAwarenessZonesIncrementalNodes() { assertThat(health.isTimedOut(), equalTo(false)); clusterState = client().admin().cluster().prepareState().execute().actionGet().getState(); - counts = new ObjectIntHashMap<>(); + counts = new HashMap<>(); for (IndexRoutingTable indexRoutingTable : clusterState.routingTable()) { for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) { for (ShardRouting shardRouting : indexShardRoutingTable) { - counts.addTo(clusterState.nodes().get(shardRouting.currentNodeId()).getName(), 1); + counts.merge(clusterState.nodes().get(shardRouting.currentNodeId()).getName(), 1, Integer::sum); } } } @@ -411,12 +412,12 @@ public void testThreeZoneOneReplicaWithForceZoneValueAndLoadAwareness() throws E assertFalse(health.isTimedOut()); ClusterState clusterState = client().admin().cluster().prepareState().execute().actionGet().getState(); - ObjectIntHashMap counts = new ObjectIntHashMap<>(); + final Map counts = new HashMap<>(); for (IndexRoutingTable indexRoutingTable : clusterState.routingTable()) { for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) { for (ShardRouting shardRouting : indexShardRoutingTable) { - counts.addTo(clusterState.nodes().get(shardRouting.currentNodeId()).getName(), 1); + counts.merge(clusterState.nodes().get(shardRouting.currentNodeId()).getName(), 1, Integer::sum); } } } diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/routing/PrimaryAllocationIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/routing/PrimaryAllocationIT.java index 017f6fc29ef87..637cc96bdfc44 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/routing/PrimaryAllocationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/routing/PrimaryAllocationIT.java @@ -32,7 +32,6 @@ package org.opensearch.cluster.routing; -import com.carrotsearch.hppc.cursors.IntObjectCursor; import org.opensearch.action.DocWriteResponse; import org.opensearch.action.admin.cluster.reroute.ClusterRerouteRequestBuilder; import org.opensearch.action.admin.indices.shards.IndicesShardStoresResponse; @@ -48,7 +47,6 @@ import org.opensearch.cluster.routing.allocation.command.AllocateStalePrimaryAllocationCommand; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.Strings; -import org.opensearch.common.collect.ImmutableOpenIntMap; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.set.Sets; import org.opensearch.common.xcontent.XContentType; @@ -74,6 +72,7 @@ import java.util.Collections; import java.util.HashSet; import java.util.List; +import java.util.Map; import java.util.Set; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutionException; @@ -316,16 +315,16 @@ public void testForceStaleReplicaToBePromotedToPrimary() throws Exception { boolean useStaleReplica = randomBoolean(); // if true, use stale replica, otherwise a completely empty copy logger.info("--> explicitly promote old primary shard"); final String idxName = "test"; - ImmutableOpenIntMap> storeStatuses = client().admin() + final Map> storeStatuses = client().admin() .indices() .prepareShardStores(idxName) .get() .getStoreStatuses() .get(idxName); ClusterRerouteRequestBuilder rerouteBuilder = client().admin().cluster().prepareReroute(); - for (IntObjectCursor> shardStoreStatuses : storeStatuses) { - int shardId = shardStoreStatuses.key; - IndicesShardStoresResponse.StoreStatus storeStatus = randomFrom(shardStoreStatuses.value); + for (var shardStoreStatuses : storeStatuses.entrySet()) { + int shardId = shardStoreStatuses.getKey(); + IndicesShardStoresResponse.StoreStatus storeStatus = randomFrom(shardStoreStatuses.getValue()); logger.info("--> adding allocation command for shard {}", shardId); // force allocation based on node id if (useStaleReplica) { diff --git a/server/src/internalClusterTest/java/org/opensearch/index/store/CorruptedFileIT.java b/server/src/internalClusterTest/java/org/opensearch/index/store/CorruptedFileIT.java index 28753e495881f..d51e4bbff11b5 100644 --- a/server/src/internalClusterTest/java/org/opensearch/index/store/CorruptedFileIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/index/store/CorruptedFileIT.java @@ -31,7 +31,6 @@ package org.opensearch.index.store; -import com.carrotsearch.hppc.cursors.IntObjectCursor; import com.carrotsearch.randomizedtesting.generators.RandomPicks; import org.apache.lucene.index.CheckIndex; import org.apache.lucene.index.SegmentCommitInfo; @@ -674,9 +673,9 @@ public void testReplicaCorruption() throws Exception { final IndicesShardStoresResponse stores = client().admin().indices().prepareShardStores(index.getName()).get(); - for (IntObjectCursor> shards : stores.getStoreStatuses().get(index.getName())) { - for (IndicesShardStoresResponse.StoreStatus store : shards.value) { - final ShardId shardId = new ShardId(index, shards.key); + for (var shards : stores.getStoreStatuses().get(index.getName()).entrySet()) { + for (IndicesShardStoresResponse.StoreStatus store : shards.getValue()) { + final ShardId shardId = new ShardId(index, shards.getKey()); if (store.getAllocationStatus().equals(IndicesShardStoresResponse.StoreStatus.AllocationStatus.UNUSED)) { for (Path path : findFilesToCorruptOnNode(store.getNode().getName(), shardId)) { try (OutputStream os = Files.newOutputStream(path)) { diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/CombiIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/CombiIT.java index c5794c76e21de..d35a560b0986c 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/CombiIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/CombiIT.java @@ -32,8 +32,6 @@ package org.opensearch.search.aggregations; -import com.carrotsearch.hppc.IntIntHashMap; -import com.carrotsearch.hppc.IntIntMap; import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; import org.opensearch.search.aggregations.Aggregator.SubAggCollectionMode; @@ -43,7 +41,9 @@ import org.opensearch.test.OpenSearchIntegTestCase; import org.hamcrest.Matchers; +import java.util.HashMap; import java.util.List; +import java.util.Map; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; import static org.opensearch.search.aggregations.AggregationBuilders.histogram; @@ -67,7 +67,7 @@ public void testMultipleAggsOnSameField_WithDifferentRequiredValueSourceType() t createIndex("idx"); IndexRequestBuilder[] builders = new IndexRequestBuilder[randomInt(30)]; - IntIntMap values = new IntIntHashMap(); + final Map values = new HashMap<>(); long missingValues = 0; for (int i = 0; i < builders.length; i++) { String name = "name_" + randomIntBetween(1, 10); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/EquivalenceIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/EquivalenceIT.java index ccd16ff9238bb..21f833d5430db 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/EquivalenceIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/EquivalenceIT.java @@ -32,7 +32,6 @@ package org.opensearch.search.aggregations; -import com.carrotsearch.hppc.IntHashSet; import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchRequestBuilder; import org.opensearch.action.search.SearchResponse; @@ -61,9 +60,11 @@ import java.util.Collection; import java.util.Collections; import java.util.HashMap; +import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.function.Function; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; @@ -230,7 +231,7 @@ public void testDuelTerms() throws Exception { final int numDocs = scaledRandomIntBetween(1000, 2000); final int maxNumTerms = randomIntBetween(10, 5000); - final IntHashSet valuesSet = new IntHashSet(); + final Set valuesSet = new HashSet<>(); cluster().wipeIndices("idx"); prepareCreate("idx").setMapping( jsonBuilder().startObject() diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/HistogramIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/HistogramIT.java index dae788abe0d10..dd495701c3ddb 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/HistogramIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/HistogramIT.java @@ -31,7 +31,6 @@ package org.opensearch.search.aggregations.bucket; -import com.carrotsearch.hppc.LongHashSet; import org.opensearch.OpenSearchException; import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchPhaseExecutionException; @@ -61,8 +60,10 @@ import java.util.Collection; import java.util.Collections; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.function.Function; import static java.util.Collections.emptyMap; @@ -396,7 +397,7 @@ public void testSingleValuedFieldOrderedByCountAsc() throws Exception { assertThat(histo.getName(), equalTo("histo")); assertThat(histo.getBuckets().size(), equalTo(numValueBuckets)); - LongHashSet buckets = new LongHashSet(); + final Set buckets = new HashSet<>(); List histoBuckets = new ArrayList<>(histo.getBuckets()); long previousCount = Long.MIN_VALUE; for (int i = 0; i < numValueBuckets; ++i) { @@ -423,7 +424,7 @@ public void testSingleValuedFieldOrderedByCountDesc() throws Exception { assertThat(histo.getName(), equalTo("histo")); assertThat(histo.getBuckets().size(), equalTo(numValueBuckets)); - LongHashSet buckets = new LongHashSet(); + final Set buckets = new HashSet<>(); List histoBuckets = new ArrayList<>(histo.getBuckets()); long previousCount = Long.MAX_VALUE; for (int i = 0; i < numValueBuckets; ++i) { @@ -497,7 +498,7 @@ public void testSingleValuedFieldOrderedBySubAggregationAsc() throws Exception { assertThat(histo.getName(), equalTo("histo")); assertThat(histo.getBuckets().size(), equalTo(numValueBuckets)); - LongHashSet visited = new LongHashSet(); + final Set visited = new HashSet<>(); double previousSum = Double.NEGATIVE_INFINITY; List buckets = new ArrayList<>(histo.getBuckets()); for (int i = 0; i < numValueBuckets; ++i) { @@ -539,7 +540,7 @@ public void testSingleValuedFieldOrderedBySubAggregationDesc() throws Exception assertThat(histo.getName(), equalTo("histo")); assertThat(histo.getBuckets().size(), equalTo(numValueBuckets)); - LongHashSet visited = new LongHashSet(); + final Set visited = new HashSet<>(); double previousSum = Double.POSITIVE_INFINITY; List buckets = new ArrayList<>(histo.getBuckets()); for (int i = 0; i < numValueBuckets; ++i) { @@ -581,7 +582,7 @@ public void testSingleValuedFieldOrderedByMultiValuedSubAggregationDesc() throws assertThat(histo.getName(), equalTo("histo")); assertThat(histo.getBuckets().size(), equalTo(numValueBuckets)); - LongHashSet visited = new LongHashSet(); + final Set visited = new HashSet<>(); double previousSum = Double.POSITIVE_INFINITY; List buckets = new ArrayList<>(histo.getBuckets()); @@ -625,7 +626,7 @@ public void testSingleValuedFieldOrderedBySubAggregationDescDeepOrderPath() thro assertThat(histo.getName(), equalTo("histo")); assertThat(histo.getBuckets().size(), equalTo(numValueBuckets)); - LongHashSet visited = new LongHashSet(); + final Set visited = new HashSet<>(); double prevMax = asc ? Double.NEGATIVE_INFINITY : Double.POSITIVE_INFINITY; List buckets = new ArrayList<>(histo.getBuckets()); for (int i = 0; i < numValueBuckets; ++i) { diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/MinDocCountIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/MinDocCountIT.java index 08e696245209e..48e2a3d8fa9e1 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/MinDocCountIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/MinDocCountIT.java @@ -32,8 +32,6 @@ package org.opensearch.search.aggregations.bucket; -import com.carrotsearch.hppc.LongHashSet; -import com.carrotsearch.hppc.LongSet; import com.carrotsearch.randomizedtesting.generators.RandomStrings; import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchRequest; @@ -122,7 +120,7 @@ public void setupSuiteScopeCluster() throws Exception { cardinality = randomIntBetween(8, 30); final List indexRequests = new ArrayList<>(); final Set stringTerms = new HashSet<>(); - final LongSet longTerms = new LongHashSet(); + final Set longTerms = new HashSet(); for (int i = 0; i < cardinality; ++i) { String stringTerm; do { diff --git a/server/src/internalClusterTest/java/org/opensearch/search/scroll/DuelScrollIT.java b/server/src/internalClusterTest/java/org/opensearch/search/scroll/DuelScrollIT.java index 19cf1ee3a0ee7..e0a54e9b4fc36 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/scroll/DuelScrollIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/scroll/DuelScrollIT.java @@ -32,7 +32,6 @@ package org.opensearch.search.scroll; -import com.carrotsearch.hppc.IntHashSet; import com.carrotsearch.randomizedtesting.generators.RandomPicks; import org.opensearch.action.index.IndexRequestBuilder; @@ -49,6 +48,8 @@ import org.opensearch.test.OpenSearchIntegTestCase; import java.util.Arrays; +import java.util.HashSet; +import java.util.Set; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; @@ -145,7 +146,7 @@ private TestContext create(SearchType... searchTypes) throws Exception { boolean unevenRouting = randomBoolean(); int numMissingDocs = scaledRandomIntBetween(0, numDocs / 100); - IntHashSet missingDocs = new IntHashSet(numMissingDocs); + final Set missingDocs = new HashSet<>(numMissingDocs); for (int i = 0; i < numMissingDocs; i++) { while (!missingDocs.add(randomInt(numDocs))) { } diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/DedicatedClusterSnapshotRestoreIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/DedicatedClusterSnapshotRestoreIT.java index 179cef372fb82..f0610f101432d 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/DedicatedClusterSnapshotRestoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/DedicatedClusterSnapshotRestoreIT.java @@ -32,9 +32,6 @@ package org.opensearch.snapshots; -import com.carrotsearch.hppc.IntHashSet; -import com.carrotsearch.hppc.IntSet; - import org.opensearch.Version; import org.opensearch.action.ActionFuture; import org.opensearch.action.admin.cluster.snapshots.create.CreateSnapshotRequest; @@ -115,8 +112,10 @@ import java.util.Collection; import java.util.Collections; import java.util.EnumSet; +import java.util.HashSet; import java.util.List; import java.util.Locale; +import java.util.Set; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; @@ -738,7 +737,7 @@ public boolean clearData(String nodeName) { ensureGreen("test-idx"); - IntSet reusedShards = new IntHashSet(); + final Set reusedShards = new HashSet<>(); List recoveryStates = client().admin() .indices() .prepareRecoveries("test-idx") diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/reroute/TransportClusterRerouteAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/reroute/TransportClusterRerouteAction.java index 0a4b98f9d4bb5..61f2a6301a2dd 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/reroute/TransportClusterRerouteAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/reroute/TransportClusterRerouteAction.java @@ -58,7 +58,6 @@ import org.opensearch.cluster.service.ClusterManagerTaskThrottler; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.Priority; -import org.opensearch.common.collect.ImmutableOpenIntMap; import org.opensearch.common.inject.Inject; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.Strings; @@ -152,11 +151,11 @@ private void verifyThenSubmitUpdate( IndicesShardStoresAction.NAME, new IndicesShardStoresRequest().indices(stalePrimaryAllocations.keySet().toArray(Strings.EMPTY_ARRAY)), new ActionListenerResponseHandler<>(ActionListener.wrap(response -> { - final Map>> status = response.getStoreStatuses(); + final Map>> status = response.getStoreStatuses(); Exception e = null; for (Map.Entry> entry : stalePrimaryAllocations.entrySet()) { final String index = entry.getKey(); - final ImmutableOpenIntMap> indexStatus = status.get(index); + final Map> indexStatus = status.get(index); if (indexStatus == null) { // The index in the stale primary allocation request was green and hence filtered out by the store status // request. We ignore it here since the relevant exception will be thrown by the reroute action later on. diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsIndices.java b/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsIndices.java index 02a516fffbfd3..63ac76ae65783 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsIndices.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsIndices.java @@ -32,8 +32,6 @@ package org.opensearch.action.admin.cluster.stats; -import com.carrotsearch.hppc.ObjectObjectHashMap; -import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import org.opensearch.action.admin.indices.stats.CommonStats; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; @@ -45,7 +43,9 @@ import org.opensearch.search.suggest.completion.CompletionStats; import java.io.IOException; +import java.util.HashMap; import java.util.List; +import java.util.Map; /** * Cluster Stats per index @@ -66,7 +66,7 @@ public class ClusterStatsIndices implements ToXContentFragment { private MappingStats mappings; public ClusterStatsIndices(List nodeResponses, MappingStats mappingStats, AnalysisStats analysisStats) { - ObjectObjectHashMap countsPerIndex = new ObjectObjectHashMap<>(); + Map countsPerIndex = new HashMap<>(); this.docs = new DocsStats(); this.store = new StoreStats(); @@ -101,8 +101,8 @@ public ClusterStatsIndices(List nodeResponses, Mapping shards = new ShardStats(); indexCount = countsPerIndex.size(); - for (ObjectObjectCursor indexCountsCursor : countsPerIndex) { - shards.addIndexShardCount(indexCountsCursor.value); + for (final ShardStats indexCountsCursor : countsPerIndex.values()) { + shards.addIndexShardCount(indexCountsCursor); } this.mappings = mappingStats; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsNodes.java b/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsNodes.java index 25a915833c7e2..699884ca0eab3 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsNodes.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsNodes.java @@ -32,8 +32,6 @@ package org.opensearch.action.admin.cluster.stats; -import com.carrotsearch.hppc.ObjectIntHashMap; -import com.carrotsearch.hppc.cursors.ObjectIntCursor; import org.opensearch.Version; import org.opensearch.action.admin.cluster.node.info.NodeInfo; import org.opensearch.action.admin.cluster.node.info.PluginsAndModules; @@ -289,16 +287,16 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws public static class OsStats implements ToXContentFragment { final int availableProcessors; final int allocatedProcessors; - final ObjectIntHashMap names; - final ObjectIntHashMap prettyNames; + final Map names; + final Map prettyNames; final org.opensearch.monitor.os.OsStats.Mem mem; /** * Build the stats from information about each node. */ private OsStats(List nodeInfos, List nodeStatsList) { - this.names = new ObjectIntHashMap<>(); - this.prettyNames = new ObjectIntHashMap<>(); + final Map names = new HashMap<>(nodeInfos.size()); + final Map prettyNames = new HashMap<>(nodeInfos.size()); int availableProcessors = 0; int allocatedProcessors = 0; for (NodeInfo nodeInfo : nodeInfos) { @@ -306,12 +304,14 @@ private OsStats(List nodeInfos, List nodeStatsList) { allocatedProcessors += nodeInfo.getInfo(OsInfo.class).getAllocatedProcessors(); if (nodeInfo.getInfo(OsInfo.class).getName() != null) { - names.addTo(nodeInfo.getInfo(OsInfo.class).getName(), 1); + names.merge(nodeInfo.getInfo(OsInfo.class).getName(), 1, Integer::sum); } if (nodeInfo.getInfo(OsInfo.class).getPrettyName() != null) { - prettyNames.addTo(nodeInfo.getInfo(OsInfo.class).getPrettyName(), 1); + prettyNames.merge(nodeInfo.getInfo(OsInfo.class).getPrettyName(), 1, Integer::sum); } } + this.names = Collections.unmodifiableMap(names); + this.prettyNames = Collections.unmodifiableMap(prettyNames); this.availableProcessors = availableProcessors; this.allocatedProcessors = allocatedProcessors; @@ -365,11 +365,11 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field(Fields.ALLOCATED_PROCESSORS, allocatedProcessors); builder.startArray(Fields.NAMES); { - for (ObjectIntCursor name : names) { + for (final Map.Entry name : names.entrySet()) { builder.startObject(); { - builder.field(Fields.NAME, name.key); - builder.field(Fields.COUNT, name.value); + builder.field(Fields.NAME, name.getKey()); + builder.field(Fields.COUNT, name.getValue()); } builder.endObject(); } @@ -377,11 +377,11 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.endArray(); builder.startArray(Fields.PRETTY_NAMES); { - for (final ObjectIntCursor prettyName : prettyNames) { + for (final Map.Entry prettyName : prettyNames.entrySet()) { builder.startObject(); { - builder.field(Fields.PRETTY_NAME, prettyName.key); - builder.field(Fields.COUNT, prettyName.value); + builder.field(Fields.PRETTY_NAME, prettyName.getKey()); + builder.field(Fields.COUNT, prettyName.getValue()); } builder.endObject(); } @@ -502,7 +502,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws */ public static class JvmStats implements ToXContentFragment { - private final ObjectIntHashMap versions; + private final Map versions; private final long threads; private final long maxUptime; private final long heapUsed; @@ -512,15 +512,15 @@ public static class JvmStats implements ToXContentFragment { * Build from lists of information about each node. */ private JvmStats(List nodeInfos, List nodeStatsList) { - this.versions = new ObjectIntHashMap<>(); + final Map versions = new HashMap<>(nodeInfos.size()); long threads = 0; long maxUptime = 0; long heapMax = 0; long heapUsed = 0; for (NodeInfo nodeInfo : nodeInfos) { - versions.addTo(new JvmVersion(nodeInfo.getInfo(JvmInfo.class)), 1); + versions.merge(new JvmVersion(nodeInfo.getInfo(JvmInfo.class)), 1, Integer::sum); } - + this.versions = Collections.unmodifiableMap(versions); for (NodeStats nodeStats : nodeStatsList) { org.opensearch.monitor.jvm.JvmStats js = nodeStats.getJvm(); if (js == null) { @@ -541,7 +541,7 @@ private JvmStats(List nodeInfos, List nodeStatsList) { this.heapMax = heapMax; } - public ObjectIntHashMap getVersions() { + public Map getVersions() { return versions; } @@ -601,15 +601,15 @@ static final class Fields { public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.humanReadableField(Fields.MAX_UPTIME_IN_MILLIS, Fields.MAX_UPTIME, new TimeValue(maxUptime)); builder.startArray(Fields.VERSIONS); - for (ObjectIntCursor v : versions) { + for (final Map.Entry v : versions.entrySet()) { builder.startObject(); - builder.field(Fields.VERSION, v.key.version); - builder.field(Fields.VM_NAME, v.key.vmName); - builder.field(Fields.VM_VERSION, v.key.vmVersion); - builder.field(Fields.VM_VENDOR, v.key.vmVendor); - builder.field(Fields.BUNDLED_JDK, v.key.bundledJdk); - builder.field(Fields.USING_BUNDLED_JDK, v.key.usingBundledJdk); - builder.field(Fields.COUNT, v.value); + builder.field(Fields.VERSION, v.getKey().version); + builder.field(Fields.VM_NAME, v.getKey().vmName); + builder.field(Fields.VM_VERSION, v.getKey().vmVersion); + builder.field(Fields.VM_VENDOR, v.getKey().vmVendor); + builder.field(Fields.BUNDLED_JDK, v.getKey().bundledJdk); + builder.field(Fields.USING_BUNDLED_JDK, v.getKey().usingBundledJdk); + builder.field(Fields.COUNT, v.getValue()); builder.endObject(); } builder.endArray(); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingRequest.java index 17785a98cd027..df15a961d2a06 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingRequest.java @@ -32,7 +32,6 @@ package org.opensearch.action.admin.indices.mapping.put; -import com.carrotsearch.hppc.ObjectHashSet; import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchGenerationException; import org.opensearch.Version; @@ -62,6 +61,7 @@ import java.util.Arrays; import java.util.Map; import java.util.Objects; +import java.util.Set; import static org.opensearch.action.ValidateActions.addValidationError; @@ -80,7 +80,7 @@ */ public class PutMappingRequest extends AcknowledgedRequest implements IndicesRequest.Replaceable, ToXContentObject { - private static ObjectHashSet RESERVED_FIELDS = ObjectHashSet.from( + private static final Set RESERVED_FIELDS = Set.of( "_uid", "_id", "_type", diff --git a/server/src/main/java/org/opensearch/action/admin/indices/shards/IndicesShardStoresResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/shards/IndicesShardStoresResponse.java index a17adca03e121..d4d747ad65fc8 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/shards/IndicesShardStoresResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/shards/IndicesShardStoresResponse.java @@ -32,13 +32,11 @@ package org.opensearch.action.admin.indices.shards; -import com.carrotsearch.hppc.cursors.IntObjectCursor; import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchException; import org.opensearch.action.ActionResponse; -import org.opensearch.core.action.support.DefaultShardOperationFailedException; import org.opensearch.cluster.node.DiscoveryNode; -import org.opensearch.common.collect.ImmutableOpenIntMap; +import org.opensearch.core.action.support.DefaultShardOperationFailedException; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -281,10 +279,10 @@ public XContentBuilder innerToXContent(XContentBuilder builder, Params params) t } } - private final Map>> storeStatuses; + private final Map>> storeStatuses; private final List failures; - public IndicesShardStoresResponse(final Map>> storeStatuses, List failures) { + public IndicesShardStoresResponse(final Map>> storeStatuses, List failures) { this.storeStatuses = Collections.unmodifiableMap(storeStatuses); this.failures = failures; } @@ -295,14 +293,11 @@ public IndicesShardStoresResponse(final Map { - int indexEntries = i.readVInt(); - ImmutableOpenIntMap.Builder> shardEntries = ImmutableOpenIntMap.builder(); - for (int shardCount = 0; shardCount < indexEntries; shardCount++) { - shardEntries.put(i.readInt(), i.readList(StoreStatus::new)); - } - return shardEntries.build(); - }); + final Map>> storeStatuses = in.readMap( + StreamInput::readString, + i -> i.readMap(StreamInput::readInt, j -> j.readList(StoreStatus::new)) + ); + this.storeStatuses = Collections.unmodifiableMap(storeStatuses); failures = Collections.unmodifiableList(in.readList(Failure::readFailure)); } @@ -310,7 +305,7 @@ public IndicesShardStoresResponse(StreamInput in) throws IOException { * Returns {@link StoreStatus}s * grouped by their index names and shard ids. */ - public Map>> getStoreStatuses() { + public Map>> getStoreStatuses() { return storeStatuses; } @@ -324,13 +319,11 @@ public List getFailures() { @Override public void writeTo(StreamOutput out) throws IOException { - out.writeMap(storeStatuses, StreamOutput::writeString, (o, v) -> { - o.writeVInt(v.size()); - for (IntObjectCursor> shardStatusesEntry : v) { - o.writeInt(shardStatusesEntry.key); - o.writeCollection(shardStatusesEntry.value); - } - }); + out.writeMap( + storeStatuses, + StreamOutput::writeString, + (o, v) -> o.writeMap(v, StreamOutput::writeInt, StreamOutput::writeCollection) + ); out.writeList(failures); } @@ -345,14 +338,14 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } builder.startObject(Fields.INDICES); - for (final Map.Entry>> indexShards : storeStatuses.entrySet()) { + for (final Map.Entry>> indexShards : storeStatuses.entrySet()) { builder.startObject(indexShards.getKey()); builder.startObject(Fields.SHARDS); - for (IntObjectCursor> shardStatusesEntry : indexShards.getValue()) { - builder.startObject(String.valueOf(shardStatusesEntry.key)); + for (final Map.Entry> shardStatusesEntry : indexShards.getValue().entrySet()) { + builder.startObject(String.valueOf(shardStatusesEntry.getKey())); builder.startArray(Fields.STORES); - for (StoreStatus storeStatus : shardStatusesEntry.value) { + for (StoreStatus storeStatus : shardStatusesEntry.getValue()) { builder.startObject(); storeStatus.toXContent(builder, params); builder.endObject(); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/shards/TransportIndicesShardStoresAction.java b/server/src/main/java/org/opensearch/action/admin/indices/shards/TransportIndicesShardStoresAction.java index cbb5c3d686cba..c1e6a64efbf2f 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/shards/TransportIndicesShardStoresAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/shards/TransportIndicesShardStoresAction.java @@ -53,7 +53,6 @@ import org.opensearch.cluster.routing.RoutingTable; import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.service.ClusterService; -import org.opensearch.common.collect.ImmutableOpenIntMap; import org.opensearch.common.collect.Tuple; import org.opensearch.common.inject.Inject; import org.opensearch.core.common.io.stream.StreamInput; @@ -231,18 +230,17 @@ protected synchronized void processAsyncFetch( } void finish() { - final Map>> indicesStoreStatusesBuilder = - new HashMap<>(); + final Map>> indicesStoreStatusesBuilder = new HashMap<>(); java.util.List failureBuilder = new ArrayList<>(); for (Response fetchResponse : fetchResponses) { - ImmutableOpenIntMap> indexStoreStatuses = + final Map> indexStoreStatuses = indicesStoreStatusesBuilder.get(fetchResponse.shardId.getIndexName()); - final ImmutableOpenIntMap.Builder> indexShardsBuilder; + final Map> indexShardsBuilder; if (indexStoreStatuses == null) { - indexShardsBuilder = ImmutableOpenIntMap.builder(); + indexShardsBuilder = new HashMap<>(); } else { - indexShardsBuilder = ImmutableOpenIntMap.builder(indexStoreStatuses); + indexShardsBuilder = new HashMap<>(indexStoreStatuses); } java.util.List storeStatuses = indexShardsBuilder.get( fetchResponse.shardId.id() @@ -269,7 +267,7 @@ void finish() { } CollectionUtil.timSort(storeStatuses); indexShardsBuilder.put(fetchResponse.shardId.id(), storeStatuses); - indicesStoreStatusesBuilder.put(fetchResponse.shardId.getIndexName(), indexShardsBuilder.build()); + indicesStoreStatusesBuilder.put(fetchResponse.shardId.getIndexName(), Collections.unmodifiableMap(indexShardsBuilder)); for (FailedNodeException failure : fetchResponse.failures) { failureBuilder.add( new IndicesShardStoresResponse.Failure( diff --git a/server/src/main/java/org/opensearch/action/search/SearchPhaseController.java b/server/src/main/java/org/opensearch/action/search/SearchPhaseController.java index c1df2fa913885..512d3295c4cfc 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchPhaseController.java +++ b/server/src/main/java/org/opensearch/action/search/SearchPhaseController.java @@ -32,8 +32,6 @@ package org.opensearch.action.search; -import com.carrotsearch.hppc.ObjectObjectHashMap; - import org.apache.lucene.index.Term; import org.apache.lucene.search.CollectionStatistics; import org.apache.lucene.search.FieldDoc; @@ -48,7 +46,6 @@ import org.apache.lucene.search.TotalHits.Relation; import org.apache.lucene.search.grouping.CollapseTopFieldDocs; import org.opensearch.common.breaker.CircuitBreaker; -import org.opensearch.common.collect.HppcMaps; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.common.lucene.search.TopDocsAndMaxScore; import org.opensearch.search.DocValueFormat; @@ -105,8 +102,8 @@ public SearchPhaseController( } public AggregatedDfs aggregateDfs(Collection results) { - ObjectObjectHashMap termStatistics = HppcMaps.newNoNullKeysMap(); - ObjectObjectHashMap fieldStatistics = HppcMaps.newNoNullKeysMap(); + final Map termStatistics = new HashMap<>(); + final Map fieldStatistics = new HashMap<>(); long aggMaxDoc = 0; for (DfsSearchResult lEntry : results) { final Term[] terms = lEntry.terms(); @@ -135,29 +132,25 @@ public AggregatedDfs aggregateDfs(Collection results) { } assert !lEntry.fieldStatistics().containsKey(null); - final Object[] keys = lEntry.fieldStatistics().keys; - final Object[] values = lEntry.fieldStatistics().values; - for (int i = 0; i < keys.length; i++) { - if (keys[i] != null) { - String key = (String) keys[i]; - CollectionStatistics value = (CollectionStatistics) values[i]; - if (value == null) { - continue; - } - assert key != null; - CollectionStatistics existing = fieldStatistics.get(key); - if (existing != null) { - CollectionStatistics merged = new CollectionStatistics( - key, - existing.maxDoc() + value.maxDoc(), - existing.docCount() + value.docCount(), - existing.sumTotalTermFreq() + value.sumTotalTermFreq(), - existing.sumDocFreq() + value.sumDocFreq() - ); - fieldStatistics.put(key, merged); - } else { - fieldStatistics.put(key, value); - } + for (var entry : lEntry.fieldStatistics().entrySet()) { + String key = entry.getKey(); + CollectionStatistics value = entry.getValue(); + if (value == null) { + continue; + } + assert key != null; + CollectionStatistics existing = fieldStatistics.get(key); + if (existing != null) { + CollectionStatistics merged = new CollectionStatistics( + key, + existing.maxDoc() + value.maxDoc(), + existing.docCount() + value.docCount(), + existing.sumTotalTermFreq() + value.sumTotalTermFreq(), + existing.sumDocFreq() + value.sumDocFreq() + ); + fieldStatistics.put(key, merged); + } else { + fieldStatistics.put(key, value); } } aggMaxDoc += lEntry.maxDoc(); diff --git a/server/src/main/java/org/opensearch/action/support/ActiveShardCount.java b/server/src/main/java/org/opensearch/action/support/ActiveShardCount.java index 3a687f946f08b..15275ba48fc6e 100644 --- a/server/src/main/java/org/opensearch/action/support/ActiveShardCount.java +++ b/server/src/main/java/org/opensearch/action/support/ActiveShardCount.java @@ -32,8 +32,6 @@ package org.opensearch.action.support; -import com.carrotsearch.hppc.cursors.IntObjectCursor; - import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.routing.IndexRoutingTable; @@ -186,8 +184,8 @@ public boolean enoughShardsActive(final ClusterState clusterState, final String. if (waitForActiveShards == ActiveShardCount.DEFAULT) { waitForActiveShards = SETTING_WAIT_FOR_ACTIVE_SHARDS.get(indexMetadata.getSettings()); } - for (final IntObjectCursor shardRouting : indexRoutingTable.getShards()) { - if (waitForActiveShards.enoughShardsActive(shardRouting.value) == false) { + for (final IndexShardRoutingTable shardRouting : indexRoutingTable.getShards().values()) { + if (waitForActiveShards.enoughShardsActive(shardRouting) == false) { // not enough active shard copies yet return false; } diff --git a/server/src/main/java/org/opensearch/action/support/replication/TransportBroadcastReplicationAction.java b/server/src/main/java/org/opensearch/action/support/replication/TransportBroadcastReplicationAction.java index 41f05ade7fdf0..116b46469df21 100644 --- a/server/src/main/java/org/opensearch/action/support/replication/TransportBroadcastReplicationAction.java +++ b/server/src/main/java/org/opensearch/action/support/replication/TransportBroadcastReplicationAction.java @@ -32,7 +32,6 @@ package org.opensearch.action.support.replication; -import com.carrotsearch.hppc.cursors.IntObjectCursor; import org.opensearch.ExceptionsHelper; import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; @@ -154,11 +153,12 @@ protected List shards(Request request, ClusterState clusterState) { for (String index : concreteIndices) { IndexMetadata indexMetadata = clusterState.metadata().getIndices().get(index); if (indexMetadata != null) { - for (IntObjectCursor shardRouting : clusterState.getRoutingTable() + for (IndexShardRoutingTable shardRouting : clusterState.getRoutingTable() .indicesRouting() .get(index) - .getShards()) { - shardIds.add(shardRouting.value.shardId()); + .getShards() + .values()) { + shardIds.add(shardRouting.shardId()); } } } diff --git a/server/src/main/java/org/opensearch/action/termvectors/TermVectorsFields.java b/server/src/main/java/org/opensearch/action/termvectors/TermVectorsFields.java index ff7a8de3a97d3..ed3dca815090d 100644 --- a/server/src/main/java/org/opensearch/action/termvectors/TermVectorsFields.java +++ b/server/src/main/java/org/opensearch/action/termvectors/TermVectorsFields.java @@ -32,8 +32,6 @@ package org.opensearch.action.termvectors; -import com.carrotsearch.hppc.ObjectLongHashMap; -import com.carrotsearch.hppc.cursors.ObjectLongCursor; import org.apache.lucene.index.BaseTermsEnum; import org.apache.lucene.index.Fields; import org.apache.lucene.index.ImpactsEnum; @@ -51,7 +49,10 @@ import java.io.IOException; import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; import java.util.Iterator; +import java.util.Map; import static org.apache.lucene.util.ArrayUtil.grow; @@ -135,7 +136,7 @@ */ public final class TermVectorsFields extends Fields { - private final ObjectLongHashMap fieldMap; + private final Map fieldMap; private final BytesReference termVectors; final boolean hasTermStatistic; final boolean hasFieldStatistic; @@ -157,10 +158,11 @@ public TermVectorsFields(BytesReference headerRef, BytesReference termVectors) t hasFieldStatistic = header.readBoolean(); hasScores = header.readBoolean(); final int numFields = header.readVInt(); - fieldMap = new ObjectLongHashMap<>(numFields); + final Map fieldMap = new HashMap<>(numFields); for (int i = 0; i < numFields; i++) { fieldMap.put((header.readString()), header.readVLong()); } + this.fieldMap = Collections.unmodifiableMap(fieldMap); } // reference to the term vector data this.termVectors = termVectors; @@ -168,8 +170,8 @@ public TermVectorsFields(BytesReference headerRef, BytesReference termVectors) t @Override public Iterator iterator() { - final Iterator> iterator = fieldMap.iterator(); - return new Iterator() { + final Iterator> iterator = fieldMap.entrySet().iterator(); + return new Iterator<>() { @Override public boolean hasNext() { return iterator.hasNext(); @@ -177,7 +179,7 @@ public boolean hasNext() { @Override public String next() { - return iterator.next().key; + return iterator.next().getKey(); } @Override @@ -191,12 +193,11 @@ public void remove() { public Terms terms(String field) throws IOException { // first, find where in the termVectors bytes the actual term vector for // this field is stored - final int keySlot = fieldMap.indexOf(field); - if (keySlot < 0) { + final Long keySlot = fieldMap.get(field); + if (keySlot == null) { return null; // we don't have it. } - long readOffset = fieldMap.indexGet(keySlot); - return new TermVector(termVectors, readOffset); + return new TermVector(termVectors, keySlot); } @Override diff --git a/server/src/main/java/org/opensearch/cluster/ClusterInfo.java b/server/src/main/java/org/opensearch/cluster/ClusterInfo.java index 892b88da3c85d..1acdf76c27172 100644 --- a/server/src/main/java/org/opensearch/cluster/ClusterInfo.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterInfo.java @@ -32,8 +32,6 @@ package org.opensearch.cluster; -import com.carrotsearch.hppc.ObjectHashSet; -import com.carrotsearch.hppc.cursors.ObjectCursor; import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.core.common.io.stream.StreamInput; @@ -46,8 +44,10 @@ import java.io.IOException; import java.util.Collections; +import java.util.HashSet; import java.util.Map; import java.util.Objects; +import java.util.Set; /** * ClusterInfo is an object representing a map of nodes to {@link DiskUsage} @@ -278,31 +278,32 @@ public void writeTo(StreamOutput out) throws IOException { */ public static class ReservedSpace implements Writeable { - public static final ReservedSpace EMPTY = new ReservedSpace(0, new ObjectHashSet<>()); + public static final ReservedSpace EMPTY = new ReservedSpace(0, new HashSet<>()); private final long total; - private final ObjectHashSet shardIds; + private final Set shardIds; - private ReservedSpace(long total, ObjectHashSet shardIds) { + private ReservedSpace(long total, Set shardIds) { this.total = total; - this.shardIds = shardIds; + this.shardIds = Collections.unmodifiableSet(shardIds); } ReservedSpace(StreamInput in) throws IOException { total = in.readVLong(); final int shardIdCount = in.readVInt(); - shardIds = new ObjectHashSet<>(shardIdCount); + Set shardIds = new HashSet<>(shardIdCount); for (int i = 0; i < shardIdCount; i++) { shardIds.add(new ShardId(in)); } + this.shardIds = Collections.unmodifiableSet(shardIds); } @Override public void writeTo(StreamOutput out) throws IOException { out.writeVLong(total); out.writeVInt(shardIds.size()); - for (ObjectCursor shardIdCursor : shardIds) { - shardIdCursor.value.writeTo(out); + for (final ShardId shardIdCursor : shardIds) { + shardIdCursor.writeTo(out); } } @@ -331,8 +332,8 @@ void toXContent(XContentBuilder builder, Params params) throws IOException { builder.field("total", total); builder.startArray("shards"); { - for (ObjectCursor shardIdCursor : shardIds) { - shardIdCursor.value.toXContent(builder, params); + for (final ShardId shardIdCursor : shardIds) { + shardIdCursor.toXContent(builder, params); } } builder.endArray(); // end "shards" @@ -345,7 +346,7 @@ void toXContent(XContentBuilder builder, Params params) throws IOException { */ public static class Builder { private long total; - private ObjectHashSet shardIds = new ObjectHashSet<>(); + private Set shardIds = new HashSet<>(); public ReservedSpace build() { assert shardIds != null : "already built"; diff --git a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java index 2c4559c0ca860..5f3731a6172ac 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java @@ -32,7 +32,6 @@ package org.opensearch.cluster.metadata; -import com.carrotsearch.hppc.LongArrayList; import org.opensearch.core.Assertions; import org.opensearch.LegacyESVersion; import org.opensearch.Version; @@ -73,6 +72,7 @@ import java.time.Instant; import java.time.ZoneOffset; import java.time.ZonedDateTime; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.EnumSet; @@ -1820,7 +1820,7 @@ public static IndexMetadata fromXContent(XContentParser parser) throws IOExcepti } } } else if (KEY_PRIMARY_TERMS.equals(currentFieldName)) { - LongArrayList list = new LongArrayList(); + final List list = new ArrayList<>(); while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { if (token == XContentParser.Token.VALUE_NUMBER) { list.add(parser.longValue()); @@ -1828,7 +1828,7 @@ public static IndexMetadata fromXContent(XContentParser parser) throws IOExcepti throw new IllegalStateException("found a non-numeric value under [" + KEY_PRIMARY_TERMS + "]"); } } - builder.primaryTerms(list.toArray()); + builder.primaryTerms(list.stream().mapToLong(i -> i).toArray()); } else { throw new IllegalArgumentException("Unexpected field for an array " + currentFieldName); } diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexStateService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexStateService.java index a38c2b81aec71..f995473162b4a 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexStateService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexStateService.java @@ -32,7 +32,6 @@ package org.opensearch.cluster.metadata; -import com.carrotsearch.hppc.cursors.IntObjectCursor; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; @@ -71,7 +70,6 @@ import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.Priority; import org.opensearch.common.UUIDs; -import org.opensearch.common.collect.ImmutableOpenIntMap; import org.opensearch.common.collect.Tuple; import org.opensearch.common.inject.Inject; import org.opensearch.common.settings.Setting; @@ -82,11 +80,11 @@ import org.opensearch.common.util.concurrent.CountDown; import org.opensearch.core.common.Strings; import org.opensearch.core.index.Index; -import org.opensearch.index.IndexNotFoundException; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.index.IndexNotFoundException; import org.opensearch.indices.IndicesService; import org.opensearch.indices.ShardLimitValidator; -import org.opensearch.core.rest.RestStatus; import org.opensearch.snapshots.RestoreService; import org.opensearch.snapshots.SnapshotInProgressException; import org.opensearch.snapshots.SnapshotsService; @@ -639,12 +637,12 @@ private void waitForShardsReadyForClosing( return; } - final ImmutableOpenIntMap shards = indexRoutingTable.getShards(); + final Map shards = indexRoutingTable.getShards(); final AtomicArray results = new AtomicArray<>(shards.size()); final CountDown countDown = new CountDown(shards.size()); - for (IntObjectCursor shard : shards) { - final IndexShardRoutingTable shardRoutingTable = shard.value; + for (final IndexShardRoutingTable shard : shards.values()) { + final IndexShardRoutingTable shardRoutingTable = shard; final int shardId = shardRoutingTable.shardId().id(); sendVerifyShardBeforeCloseRequest(shardRoutingTable, closingBlock, new NotifyOnceListener() { @Override @@ -772,12 +770,12 @@ private void waitForShardsReady( return; } - final ImmutableOpenIntMap shards = indexRoutingTable.getShards(); + final Map shards = indexRoutingTable.getShards(); final AtomicArray results = new AtomicArray<>(shards.size()); final CountDown countDown = new CountDown(shards.size()); - for (IntObjectCursor shard : shards) { - final IndexShardRoutingTable shardRoutingTable = shard.value; + for (final IndexShardRoutingTable shard : shards.values()) { + final IndexShardRoutingTable shardRoutingTable = shard; final int shardId = shardRoutingTable.shardId().id(); sendVerifyShardBlockRequest(shardRoutingTable, clusterBlock, new NotifyOnceListener() { @Override diff --git a/server/src/main/java/org/opensearch/cluster/routing/IndexRoutingTable.java b/server/src/main/java/org/opensearch/cluster/routing/IndexRoutingTable.java index 224cd53126145..af348c1c98f2d 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/IndexRoutingTable.java +++ b/server/src/main/java/org/opensearch/cluster/routing/IndexRoutingTable.java @@ -32,9 +32,6 @@ package org.opensearch.cluster.routing; -import com.carrotsearch.hppc.IntSet; -import com.carrotsearch.hppc.cursors.IntCursor; -import com.carrotsearch.hppc.cursors.IntObjectCursor; import org.apache.lucene.util.CollectionUtil; import org.opensearch.cluster.AbstractDiffable; import org.opensearch.cluster.Diff; @@ -47,7 +44,6 @@ import org.opensearch.cluster.routing.RecoverySource.SnapshotRecoverySource; import org.opensearch.cluster.routing.RecoverySource.RemoteStoreRecoverySource; import org.opensearch.common.Randomness; -import org.opensearch.common.collect.ImmutableOpenIntMap; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.index.Index; @@ -56,9 +52,11 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Collections; +import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; import java.util.List; +import java.util.Map; import java.util.Set; import java.util.function.Predicate; @@ -86,17 +84,17 @@ public class IndexRoutingTable extends AbstractDiffable imple // note, we assume that when the index routing is created, ShardRoutings are created for all possible number of // shards with state set to UNASSIGNED - private final ImmutableOpenIntMap shards; + private final Map shards; private final List allActiveShards; - IndexRoutingTable(Index index, ImmutableOpenIntMap shards) { + IndexRoutingTable(Index index, final Map shards) { this.index = index; this.shuffler = new RotationShardShuffler(Randomness.get().nextInt()); - this.shards = shards; + this.shards = Collections.unmodifiableMap(shards); List allActiveShards = new ArrayList<>(); - for (IntObjectCursor cursor : shards) { - for (ShardRouting shardRouting : cursor.value) { + for (IndexShardRoutingTable cursor : shards.values()) { + for (ShardRouting shardRouting : cursor) { if (shardRouting.active()) { allActiveShards.add(shardRouting); } @@ -197,7 +195,7 @@ boolean validate(Metadata metadata) { @Override public Iterator iterator() { - return shards.valuesIt(); + return shards.values().iterator(); } /** @@ -232,11 +230,11 @@ public int numberOfNodesShardsAreAllocatedOn(String... excludedNodes) { return nodes.size(); } - public ImmutableOpenIntMap shards() { + public Map shards() { return shards; } - public ImmutableOpenIntMap getShards() { + public Map getShards() { return shards(); } @@ -374,7 +372,7 @@ public static Builder builder(Index index) { public static class Builder { private final Index index; - private final ImmutableOpenIntMap.Builder shards = ImmutableOpenIntMap.builder(); + private final Map shards = new HashMap<>(); public Builder(Index index) { this.index = index; @@ -418,7 +416,11 @@ public Builder initializeAsFromOpenToClose(IndexMetadata indexMetadata) { /** * Initializes a new empty index, to be restored from a snapshot */ - public Builder initializeAsNewRestore(IndexMetadata indexMetadata, SnapshotRecoverySource recoverySource, IntSet ignoreShards) { + public Builder initializeAsNewRestore( + IndexMetadata indexMetadata, + SnapshotRecoverySource recoverySource, + final Set ignoreShards + ) { final UnassignedInfo unassignedInfo = new UnassignedInfo( UnassignedInfo.Reason.NEW_INDEX_RESTORED, "restore_source[" @@ -454,7 +456,7 @@ public Builder initializeAsRemoteStoreRestore(IndexMetadata indexMetadata, Remot "restore_source[remote_store]" ); assert indexMetadata.getIndex().equals(index); - if (!shards.isEmpty()) { + if (shards.isEmpty() == false) { throw new IllegalStateException("trying to initialize an index with fresh shards, but already has shards created"); } for (int shardNumber = 0; shardNumber < indexMetadata.getNumberOfShards(); shardNumber++) { @@ -472,7 +474,7 @@ public Builder initializeAsRemoteStoreRestore(IndexMetadata indexMetadata, Remot private Builder initializeAsRestore( IndexMetadata indexMetadata, RecoverySource recoverySource, - IntSet ignoreShards, + final Set ignoreShards, boolean asNew, UnassignedInfo unassignedInfo ) { @@ -550,8 +552,7 @@ private Builder initializeEmpty(IndexMetadata indexMetadata, UnassignedInfo unas } public Builder addReplica() { - for (IntCursor cursor : shards.keys()) { - int shardNumber = cursor.value; + for (final int shardNumber : shards.keySet()) { ShardId shardId = new ShardId(index, shardNumber); // version 0, will get updated when reroute will happen ShardRouting shard = ShardRouting.newUnassigned( @@ -566,8 +567,7 @@ public Builder addReplica() { } public Builder removeReplica() { - for (IntCursor cursor : shards.keys()) { - int shardId = cursor.value; + for (final int shardId : shards.keySet()) { IndexShardRoutingTable indexShard = shards.get(shardId); if (indexShard.replicaShards().isEmpty()) { // nothing to do here! @@ -621,7 +621,7 @@ public Builder addShard(ShardRouting shard) { } public IndexRoutingTable build() { - return new IndexRoutingTable(index, shards.build()); + return new IndexRoutingTable(index, shards); } } diff --git a/server/src/main/java/org/opensearch/cluster/routing/RoutingNodes.java b/server/src/main/java/org/opensearch/cluster/routing/RoutingNodes.java index d4e3c6db80b4b..b10c3d00f4c31 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/RoutingNodes.java +++ b/server/src/main/java/org/opensearch/cluster/routing/RoutingNodes.java @@ -32,7 +32,6 @@ package org.opensearch.cluster.routing; -import com.carrotsearch.hppc.ObjectIntHashMap; import org.apache.logging.log4j.Logger; import org.apache.lucene.util.CollectionUtil; import org.opensearch.core.Assertions; @@ -59,10 +58,13 @@ import java.util.ListIterator; import java.util.Map; import java.util.NoSuchElementException; +import java.util.Objects; import java.util.Queue; import java.util.Set; import java.util.function.Function; import java.util.function.Predicate; +import java.util.stream.Collectors; +import java.util.stream.Stream; /** * {@link RoutingNodes} represents a copy the routing information contained in the {@link ClusterState cluster state}. @@ -95,7 +97,7 @@ public class RoutingNodes implements Iterable { private int relocatingShards = 0; - private final Map> nodesPerAttributeNames = new HashMap<>(); + private final Map> nodesPerAttributeNames; private final Map recoveriesPerNode = new HashMap<>(); private final Map initialReplicaRecoveries = new HashMap<>(); private final Map initialPrimaryRecoveries = new HashMap<>(); @@ -107,6 +109,7 @@ public RoutingNodes(ClusterState clusterState) { public RoutingNodes(ClusterState clusterState, boolean readOnly) { this.readOnly = readOnly; final RoutingTable routingTable = clusterState.routingTable(); + this.nodesPerAttributeNames = Collections.synchronizedMap(new HashMap<>()); // fill in the nodeToShards with the "live" nodes for (final DiscoveryNode cursor : clusterState.nodes().getDataNodes().values()) { @@ -284,18 +287,15 @@ public RoutingNode node(String nodeId) { return nodesToShards.get(nodeId); } - public ObjectIntHashMap nodesPerAttributesCounts(String attributeName) { - ObjectIntHashMap nodesPerAttributesCounts = nodesPerAttributeNames.get(attributeName); - if (nodesPerAttributesCounts != null) { - return nodesPerAttributesCounts; - } - nodesPerAttributesCounts = new ObjectIntHashMap<>(); - for (RoutingNode routingNode : this) { - String attrValue = routingNode.node().getAttributes().get(attributeName); - nodesPerAttributesCounts.addTo(attrValue, 1); - } - nodesPerAttributeNames.put(attributeName, nodesPerAttributesCounts); - return nodesPerAttributesCounts; + public Stream stream() { + return nodesToShards.values().stream(); + } + + public Set nodesPerAttributesCounts(String attributeName) { + return nodesPerAttributeNames.computeIfAbsent( + attributeName, + ignored -> stream().map(r -> r.node().getAttributes().get(attributeName)).filter(Objects::nonNull).collect(Collectors.toSet()) + ); } /** diff --git a/server/src/main/java/org/opensearch/cluster/routing/RoutingTable.java b/server/src/main/java/org/opensearch/cluster/routing/RoutingTable.java index 23631f438b418..1bee5d8176a0f 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/RoutingTable.java +++ b/server/src/main/java/org/opensearch/cluster/routing/RoutingTable.java @@ -32,7 +32,6 @@ package org.opensearch.cluster.routing; -import com.carrotsearch.hppc.IntSet; import org.opensearch.cluster.Diff; import org.opensearch.cluster.Diffable; import org.opensearch.cluster.DiffableUtils; @@ -56,6 +55,7 @@ import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.function.Predicate; import static org.opensearch.cluster.metadata.MetadataIndexStateService.isIndexVerifiedBeforeClosed; @@ -578,7 +578,11 @@ public Builder addAsRestore(IndexMetadata indexMetadata, SnapshotRecoverySource return this; } - public Builder addAsNewRestore(IndexMetadata indexMetadata, SnapshotRecoverySource recoverySource, IntSet ignoreShards) { + public Builder addAsNewRestore( + IndexMetadata indexMetadata, + SnapshotRecoverySource recoverySource, + final Set ignoreShards + ) { IndexRoutingTable.Builder indexRoutingBuilder = new IndexRoutingTable.Builder(indexMetadata.getIndex()).initializeAsNewRestore( indexMetadata, recoverySource, diff --git a/server/src/main/java/org/opensearch/cluster/routing/WeightedRoutingService.java b/server/src/main/java/org/opensearch/cluster/routing/WeightedRoutingService.java index 9992930a1a7f6..419d2343f65cd 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/WeightedRoutingService.java +++ b/server/src/main/java/org/opensearch/cluster/routing/WeightedRoutingService.java @@ -8,8 +8,6 @@ package org.opensearch.cluster.routing; -import com.carrotsearch.hppc.ObjectIntHashMap; -import com.carrotsearch.hppc.cursors.ObjectCursor; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; @@ -212,10 +210,10 @@ public void verifyAwarenessAttribute(String attributeName) { private void ensureWeightsSetForAllDiscoveredAndForcedAwarenessValues(ClusterState state, ClusterPutWeightedRoutingRequest request) { String attributeName = request.getWeightedRouting().attributeName(); // build attr_value -> nodes map - ObjectIntHashMap nodesPerAttribute = state.getRoutingNodes().nodesPerAttributesCounts(attributeName); + final Set nodesPerAttribute = state.getRoutingNodes().nodesPerAttributesCounts(attributeName); Set discoveredAwarenessValues = new HashSet<>(); - for (ObjectCursor stringObjectCursor : nodesPerAttribute.keys()) { - if (stringObjectCursor.value != null) discoveredAwarenessValues.add(stringObjectCursor.value); + for (String stringObjectCursor : nodesPerAttribute) { + if (stringObjectCursor != null) discoveredAwarenessValues.add(stringObjectCursor); } Set allAwarenessValues; if (forcedAwarenessAttributes.get(attributeName) == null) { diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/RemoteShardsBalancer.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/RemoteShardsBalancer.java index 2351c5ec557fc..1fadd775ab7b5 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/RemoteShardsBalancer.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/RemoteShardsBalancer.java @@ -8,7 +8,6 @@ package org.opensearch.cluster.routing.allocation.allocator; -import com.carrotsearch.hppc.ObjectIntHashMap; import org.apache.logging.log4j.Logger; import org.opensearch.cluster.routing.RoutingNode; import org.opensearch.cluster.routing.RoutingNodes; @@ -25,7 +24,6 @@ import java.util.ArrayDeque; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; @@ -211,8 +209,8 @@ void balance() { return; } - ObjectIntHashMap nodePrimaryShardCount = calculateNodePrimaryShardCount(remoteRoutingNodes); - int totalPrimaryShardCount = Arrays.stream(nodePrimaryShardCount.values).sum(); + final Map nodePrimaryShardCount = calculateNodePrimaryShardCount(remoteRoutingNodes); + int totalPrimaryShardCount = nodePrimaryShardCount.values().stream().reduce(0, Integer::sum); totalPrimaryShardCount += routingNodes.unassigned().getNumPrimaries(); int avgPrimaryPerNode = (totalPrimaryShardCount + routingNodes.size() - 1) / routingNodes.size(); @@ -238,8 +236,8 @@ void balance() { * @param remoteRoutingNodes routing nodes for which the aggregation needs to be performed * @return map of node id to primary shard count */ - private ObjectIntHashMap calculateNodePrimaryShardCount(List remoteRoutingNodes) { - ObjectIntHashMap primaryShardCount = new ObjectIntHashMap<>(); + private Map calculateNodePrimaryShardCount(List remoteRoutingNodes) { + final Map primaryShardCount = new HashMap<>(); for (RoutingNode node : remoteRoutingNodes) { int totalPrimaryShardsPerNode = 0; for (ShardRouting shard : node) { @@ -464,7 +462,7 @@ private void tryRebalanceNode( RoutingNode sourceNode, ArrayDeque targetNodes, int avgPrimary, - ObjectIntHashMap primaryCount + final Map primaryCount ) { long shardsToBalance = primaryCount.get(sourceNode.nodeId()) - avgPrimary; assert shardsToBalance >= 0 : "Shards to balance should be greater than 0, but found negative"; @@ -493,7 +491,7 @@ private void tryRebalanceNode( if (rebalanceDecision.type() == Decision.Type.YES) { shardsToBalance--; - primaryCount.addTo(targetNode.nodeId(), 1); + primaryCount.merge(targetNode.nodeId(), 1, Integer::sum); targetNodes.offer(targetNode); break; diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/AwarenessAllocationDecider.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/AwarenessAllocationDecider.java index 811dd1249e37d..f0b79194af438 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/AwarenessAllocationDecider.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/AwarenessAllocationDecider.java @@ -39,8 +39,6 @@ import java.util.Set; import java.util.function.Function; -import com.carrotsearch.hppc.ObjectIntHashMap; -import com.carrotsearch.hppc.cursors.ObjectCursor; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.routing.RoutingNode; import org.opensearch.cluster.routing.ShardRouting; @@ -178,16 +176,16 @@ private Decision underCapacity(ShardRouting shardRouting, RoutingNode node, Rout } // build attr_value -> nodes map - ObjectIntHashMap nodesPerAttribute = allocation.routingNodes().nodesPerAttributesCounts(awarenessAttribute); + Set nodesPerAttribute = allocation.routingNodes().nodesPerAttributesCounts(awarenessAttribute); // build the count of shards per attribute value - ObjectIntHashMap shardPerAttribute = new ObjectIntHashMap<>(); + Map shardPerAttribute = new HashMap<>(); for (ShardRouting assignedShard : allocation.routingNodes().assignedShards(shardRouting.shardId())) { if (assignedShard.started() || assignedShard.initializing()) { // Note: this also counts relocation targets as that will be the new location of the shard. // Relocation sources should not be counted as the shard is moving away RoutingNode routingNode = allocation.routingNodes().node(assignedShard.currentNodeId()); - shardPerAttribute.addTo(routingNode.node().getAttributes().get(awarenessAttribute), 1); + shardPerAttribute.merge(routingNode.node().getAttributes().get(awarenessAttribute), 1, Integer::sum); } } @@ -196,15 +194,14 @@ private Decision underCapacity(ShardRouting shardRouting, RoutingNode node, Rout String nodeId = shardRouting.relocating() ? shardRouting.relocatingNodeId() : shardRouting.currentNodeId(); if (node.nodeId().equals(nodeId) == false) { // we work on different nodes, move counts around - shardPerAttribute.putOrAdd( + shardPerAttribute.compute( allocation.routingNodes().node(nodeId).node().getAttributes().get(awarenessAttribute), - 0, - -1 + (k, v) -> (v == null) ? 0 : v - 1 ); - shardPerAttribute.addTo(node.node().getAttributes().get(awarenessAttribute), 1); + shardPerAttribute.merge(node.node().getAttributes().get(awarenessAttribute), 1, Integer::sum); } } else { - shardPerAttribute.addTo(node.node().getAttributes().get(awarenessAttribute), 1); + shardPerAttribute.merge(node.node().getAttributes().get(awarenessAttribute), 1, Integer::sum); } } @@ -214,8 +211,8 @@ private Decision underCapacity(ShardRouting shardRouting, RoutingNode node, Rout if (fullValues != null) { // If forced awareness is enabled, numberOfAttributes = count(distinct((union(discovered_attributes, forced_attributes))) Set attributesSet = new HashSet<>(fullValues); - for (ObjectCursor stringObjectCursor : nodesPerAttribute.keys()) { - attributesSet.add(stringObjectCursor.value); + for (String stringObjectCursor : nodesPerAttribute) { + attributesSet.add(stringObjectCursor); } numberOfAttributes = attributesSet.size(); } diff --git a/server/src/main/java/org/opensearch/common/FieldMemoryStats.java b/server/src/main/java/org/opensearch/common/FieldMemoryStats.java index 08e84355ab818..1f8a6aba0c883 100644 --- a/server/src/main/java/org/opensearch/common/FieldMemoryStats.java +++ b/server/src/main/java/org/opensearch/common/FieldMemoryStats.java @@ -32,8 +32,6 @@ package org.opensearch.common; -import com.carrotsearch.hppc.ObjectLongHashMap; -import com.carrotsearch.hppc.cursors.ObjectLongCursor; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -41,7 +39,9 @@ import org.opensearch.core.xcontent.XContentBuilder; import java.io.IOException; +import java.util.HashMap; import java.util.Iterator; +import java.util.Map; import java.util.Objects; /** @@ -49,45 +49,37 @@ * * @opensearch.internal */ -public final class FieldMemoryStats implements Writeable, Iterable> { +public final class FieldMemoryStats implements Writeable, Iterable> { - private final ObjectLongHashMap stats; + private final Map stats; /** * Creates a new FieldMemoryStats instance */ - public FieldMemoryStats(ObjectLongHashMap stats) { + public FieldMemoryStats(Map stats) { this.stats = Objects.requireNonNull(stats, "status must be non-null"); - assert !stats.containsKey(null); + assert stats.containsKey(null) == false; } /** * Creates a new FieldMemoryStats instance from a stream */ public FieldMemoryStats(StreamInput input) throws IOException { - int size = input.readVInt(); - stats = new ObjectLongHashMap<>(size); - for (int i = 0; i < size; i++) { - stats.put(input.readString(), input.readVLong()); - } + stats = input.readMap(StreamInput::readString, StreamInput::readVLong); } /** * Adds / merges the given field memory stats into this stats instance */ public void add(FieldMemoryStats fieldMemoryStats) { - for (ObjectLongCursor entry : fieldMemoryStats.stats) { - stats.addTo(entry.key, entry.value); + for (final var entry : fieldMemoryStats.stats.entrySet()) { + stats.merge(entry.getKey(), entry.getValue(), Long::sum); } } @Override public void writeTo(StreamOutput out) throws IOException { - out.writeVInt(stats.size()); - for (ObjectLongCursor entry : stats) { - out.writeString(entry.key); - out.writeVLong(entry.value); - } + out.writeMap(stats, StreamOutput::writeString, StreamOutput::writeVLong); } /** @@ -99,9 +91,9 @@ public void writeTo(StreamOutput out) throws IOException { */ public void toXContent(XContentBuilder builder, String key, String rawKey, String readableKey) throws IOException { builder.startObject(key); - for (ObjectLongCursor entry : stats) { - builder.startObject(entry.key); - builder.humanReadableField(rawKey, readableKey, new ByteSizeValue(entry.value)); + for (final var entry : stats.entrySet()) { + builder.startObject(entry.getKey()); + builder.humanReadableField(rawKey, readableKey, new ByteSizeValue(entry.getValue())); builder.endObject(); } builder.endObject(); @@ -111,7 +103,7 @@ public void toXContent(XContentBuilder builder, String key, String rawKey, Strin * Creates a deep copy of this stats instance */ public FieldMemoryStats copy() { - return new FieldMemoryStats(stats.clone()); + return new FieldMemoryStats(new HashMap<>(stats)); } @Override @@ -128,15 +120,15 @@ public int hashCode() { } @Override - public Iterator> iterator() { - return stats.iterator(); + public Iterator> iterator() { + return stats.entrySet().iterator(); } /** * Returns the fields value in bytes or 0 if it's not present in the stats */ public long get(String field) { - return stats.get(field); + return stats.getOrDefault(field, 0L); } /** diff --git a/server/src/main/java/org/opensearch/common/collect/HppcMaps.java b/server/src/main/java/org/opensearch/common/collect/HppcMaps.java deleted file mode 100644 index 2fd7316accf6e..0000000000000 --- a/server/src/main/java/org/opensearch/common/collect/HppcMaps.java +++ /dev/null @@ -1,175 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.common.collect; - -import com.carrotsearch.hppc.ObjectIntHashMap; -import com.carrotsearch.hppc.ObjectLookupContainer; -import com.carrotsearch.hppc.ObjectObjectHashMap; -import com.carrotsearch.hppc.cursors.ObjectCursor; - -import java.util.Iterator; - -/** - * High performance maps - * - * @opensearch.internal - */ -public final class HppcMaps { - - private HppcMaps() {} - - /** - * Returns a new map with the given number of expected elements. - * - * @param expectedElements - * The expected number of elements guaranteed not to cause buffer - * expansion (inclusive). - */ - public static ObjectObjectHashMap newMap(int expectedElements) { - return new ObjectObjectHashMap<>(expectedElements); - } - - /** - * Returns a new map with a default initial capacity. - */ - public static ObjectObjectHashMap newMap() { - return newMap(16); - } - - /** - * Returns a map like {@link #newMap()} that does not accept null keys - */ - public static ObjectObjectHashMap newNoNullKeysMap() { - return ensureNoNullKeys(16); - } - - /** - * Returns a map like {@link #newMap(int)} that does not accept null keys - * - * @param expectedElements - * The expected number of elements guaranteed not to cause buffer - * expansion (inclusive). - */ - public static ObjectObjectHashMap newNoNullKeysMap(int expectedElements) { - return ensureNoNullKeys(expectedElements); - } - - /** - * Wraps the given map and prevent adding of null keys. - * - * @param expectedElements - * The expected number of elements guaranteed not to cause buffer - * expansion (inclusive). - */ - public static ObjectObjectHashMap ensureNoNullKeys(int expectedElements) { - return new ObjectObjectHashMap(expectedElements) { - @Override - public V put(K key, V value) { - if (key == null) { - throw new IllegalArgumentException("Map key must not be null"); - } - return super.put(key, value); - } - }; - } - - /** - * @return an intersection view over the two specified containers (which can be KeyContainer or ObjectHashSet). - */ - // Hppc has forEach, but this means we need to build an intermediate set, with this method we just iterate - // over each unique value without creating a third set. - public static Iterable intersection(ObjectLookupContainer container1, final ObjectLookupContainer container2) { - assert container1 != null && container2 != null; - final Iterator> iterator = container1.iterator(); - final Iterator intersection = new Iterator() { - - T current; - - @Override - public boolean hasNext() { - if (iterator.hasNext()) { - do { - T next = iterator.next().value; - if (container2.contains(next)) { - current = next; - return true; - } - } while (iterator.hasNext()); - } - return false; - } - - @Override - public T next() { - return current; - } - - @Override - public void remove() { - throw new UnsupportedOperationException(); - } - }; - return new Iterable() { - @Override - public Iterator iterator() { - return intersection; - } - }; - } - - /** - * Object for the map - * - * @opensearch.internal - */ - public static final class Object { - /** - * Integer type for the map - * - * @opensearch.internal - */ - public static final class Integer { - public static ObjectIntHashMap ensureNoNullKeys(int capacity, float loadFactor) { - return new ObjectIntHashMap(capacity, loadFactor) { - @Override - public int put(V key, int value) { - if (key == null) { - throw new IllegalArgumentException("Map key must not be null"); - } - return super.put(key, value); - } - }; - } - } - } -} diff --git a/server/src/main/java/org/opensearch/common/collect/ImmutableOpenIntMap.java b/server/src/main/java/org/opensearch/common/collect/ImmutableOpenIntMap.java deleted file mode 100644 index 0bff76e7ec90e..0000000000000 --- a/server/src/main/java/org/opensearch/common/collect/ImmutableOpenIntMap.java +++ /dev/null @@ -1,410 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.common.collect; - -import com.carrotsearch.hppc.IntCollection; -import com.carrotsearch.hppc.IntContainer; -import com.carrotsearch.hppc.IntLookupContainer; -import com.carrotsearch.hppc.IntObjectAssociativeContainer; -import com.carrotsearch.hppc.IntObjectHashMap; -import com.carrotsearch.hppc.IntObjectMap; -import com.carrotsearch.hppc.ObjectContainer; -import com.carrotsearch.hppc.cursors.IntCursor; -import com.carrotsearch.hppc.cursors.IntObjectCursor; -import com.carrotsearch.hppc.cursors.ObjectCursor; -import com.carrotsearch.hppc.predicates.IntObjectPredicate; -import com.carrotsearch.hppc.predicates.IntPredicate; -import com.carrotsearch.hppc.procedures.IntObjectProcedure; - -import java.util.Iterator; -import java.util.Map; - -/** - * An immutable map implementation based on open hash map. - *

- * Can be constructed using a {@link #builder()}, or using {@link #builder(org.opensearch.common.collect.ImmutableOpenIntMap)} - * (which is an optimized option to copy over existing content and modify it). - * - * @opensearch.internal - */ -public final class ImmutableOpenIntMap implements Iterable> { - - private final IntObjectHashMap map; - - private ImmutableOpenIntMap(IntObjectHashMap map) { - this.map = map; - } - - /** - * @return Returns the value associated with the given key or the default value - * for the key type, if the key is not associated with any value. - *

- * Important note: For primitive type values, the value returned for a non-existing - * key may not be the default value of the primitive type (it may be any value previously - * assigned to that slot). - */ - public VType get(int key) { - return map.get(key); - } - - /** - * Returns true if this container has an association to a value for - * the given key. - */ - public boolean containsKey(int key) { - return map.containsKey(key); - } - - /** - * @return Returns the current size (number of assigned keys) in the container. - */ - public int size() { - return map.size(); - } - - /** - * @return Return true if this hash map contains no assigned keys. - */ - public boolean isEmpty() { - return map.isEmpty(); - } - - /** - * Returns a cursor over the entries (key-value pairs) in this map. The iterator is - * implemented as a cursor and it returns the same cursor instance on every - * call to {@link java.util.Iterator#next()}. To read the current key and value use the cursor's - * public fields. An example is shown below. - *

-     * for (IntShortCursor c : intShortMap)
-     * {
-     *     System.out.println("index=" + c.index
-     *       + " key=" + c.key
-     *       + " value=" + c.value);
-     * }
-     * 
- *

- * The index field inside the cursor gives the internal index inside - * the container's implementation. The interpretation of this index depends on - * to the container. - */ - @Override - public Iterator> iterator() { - return map.iterator(); - } - - /** - * Returns a specialized view of the keys of this associated container. - * The view additionally implements {@link com.carrotsearch.hppc.ObjectLookupContainer}. - */ - public IntLookupContainer keys() { - return map.keys(); - } - - /** - * Returns a direct iterator over the keys. - */ - public Iterator keysIt() { - final Iterator iterator = map.keys().iterator(); - return new Iterator() { - @Override - public boolean hasNext() { - return iterator.hasNext(); - } - - @Override - public Integer next() { - return iterator.next().value; - } - - @Override - public void remove() { - throw new UnsupportedOperationException(); - } - }; - } - - /** - * @return Returns a container with all values stored in this map. - */ - public ObjectContainer values() { - return map.values(); - } - - /** - * Returns a direct iterator over the keys. - */ - public Iterator valuesIt() { - final Iterator> iterator = map.values().iterator(); - return new Iterator() { - @Override - public boolean hasNext() { - return iterator.hasNext(); - } - - @Override - public VType next() { - return iterator.next().value; - } - - @Override - public void remove() { - throw new UnsupportedOperationException(); - } - }; - } - - @Override - public String toString() { - return map.toString(); - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - - ImmutableOpenIntMap that = (ImmutableOpenIntMap) o; - - if (!map.equals(that.map)) return false; - - return true; - } - - @Override - public int hashCode() { - return map.hashCode(); - } - - @SuppressWarnings("unchecked") - private static final ImmutableOpenIntMap EMPTY = new ImmutableOpenIntMap(new IntObjectHashMap()); - - @SuppressWarnings("unchecked") - public static ImmutableOpenIntMap of() { - return EMPTY; - } - - public static Builder builder() { - return new Builder<>(); - } - - public static Builder builder(int size) { - return new Builder<>(size); - } - - public static Builder builder(ImmutableOpenIntMap map) { - return new Builder<>(map); - } - - /** - * Base builder for an immutable int - * - * @opensearch.internal - */ - public static class Builder implements IntObjectMap { - - private IntObjectHashMap map; - - public Builder() { - // noinspection unchecked - this(EMPTY); - } - - public Builder(int size) { - this.map = new IntObjectHashMap<>(size); - } - - public Builder(ImmutableOpenIntMap map) { - this.map = map.map.clone(); - } - - /** - * Builds a new instance of the - */ - public ImmutableOpenIntMap build() { - IntObjectHashMap map = this.map; - this.map = null; // nullify the map, so any operation post build will fail! (hackish, but safest) - return new ImmutableOpenIntMap<>(map); - } - - /** - * Puts all the entries in the map to the builder. - */ - public Builder putAll(Map map) { - for (Map.Entry entry : map.entrySet()) { - this.map.put(entry.getKey(), entry.getValue()); - } - return this; - } - - /** - * A put operation that can be used in the fluent pattern. - */ - public Builder fPut(int key, VType value) { - map.put(key, value); - return this; - } - - @Override - public VType put(int key, VType value) { - return map.put(key, value); - } - - @Override - public VType get(int key) { - return map.get(key); - } - - @Override - public VType getOrDefault(int kType, VType vType) { - return map.getOrDefault(kType, vType); - } - - /** - * Remove that can be used in the fluent pattern. - */ - public Builder fRemove(int key) { - map.remove(key); - return this; - } - - @Override - public VType remove(int key) { - return map.remove(key); - } - - @Override - public Iterator> iterator() { - return map.iterator(); - } - - @Override - public boolean containsKey(int key) { - return map.containsKey(key); - } - - @Override - public int size() { - return map.size(); - } - - @Override - public boolean isEmpty() { - return map.isEmpty(); - } - - @Override - public void clear() { - map.clear(); - } - - @Override - public int putAll(IntObjectAssociativeContainer container) { - return map.putAll(container); - } - - @Override - public int putAll(Iterable> iterable) { - return map.putAll(iterable); - } - - @Override - public int removeAll(IntContainer container) { - return map.removeAll(container); - } - - @Override - public int removeAll(IntPredicate predicate) { - return map.removeAll(predicate); - } - - @Override - public > T forEach(T procedure) { - return map.forEach(procedure); - } - - @Override - public IntCollection keys() { - return map.keys(); - } - - @Override - public ObjectContainer values() { - return map.values(); - } - - @Override - public int removeAll(IntObjectPredicate predicate) { - return map.removeAll(predicate); - } - - @Override - public > T forEach(T predicate) { - return map.forEach(predicate); - } - - @Override - public int indexOf(int key) { - return map.indexOf(key); - } - - @Override - public boolean indexExists(int index) { - return map.indexExists(index); - } - - @Override - public VType indexGet(int index) { - return map.indexGet(index); - } - - @Override - public VType indexReplace(int index, VType newValue) { - return map.indexReplace(index, newValue); - } - - @Override - public void indexInsert(int index, int key, VType value) { - map.indexInsert(index, key, value); - } - - @Override - public void release() { - map.release(); - } - - @Override - public String visualizeKeyDistribution(int characters) { - return map.visualizeKeyDistribution(characters); - } - } -} diff --git a/server/src/main/java/org/opensearch/common/lucene/search/MultiPhrasePrefixQuery.java b/server/src/main/java/org/opensearch/common/lucene/search/MultiPhrasePrefixQuery.java index a7a595e6a93ff..93ebd82a3e3e0 100644 --- a/server/src/main/java/org/opensearch/common/lucene/search/MultiPhrasePrefixQuery.java +++ b/server/src/main/java/org/opensearch/common/lucene/search/MultiPhrasePrefixQuery.java @@ -32,7 +32,6 @@ package org.opensearch.common.lucene.search; -import com.carrotsearch.hppc.ObjectHashSet; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.Term; @@ -50,10 +49,12 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; +import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.ListIterator; import java.util.Objects; +import java.util.Set; /** * A multi phrase prefix query. @@ -174,7 +175,7 @@ public Query rewrite(IndexReader reader) throws IOException { } Term[] suffixTerms = termArrays.get(sizeMinus1); int position = positions.get(sizeMinus1); - ObjectHashSet terms = new ObjectHashSet<>(); + Set terms = new HashSet<>(); for (Term term : suffixTerms) { getPrefixTerms(terms, term, reader); if (terms.size() > maxExpansions) { @@ -196,11 +197,11 @@ public Query rewrite(IndexReader reader) throws IOException { ) .build(); } - query.add(terms.toArray(Term.class), position); + query.add(terms.toArray(new Term[0]), position); return query.build(); } - private void getPrefixTerms(ObjectHashSet terms, final Term prefix, final IndexReader reader) throws IOException { + private void getPrefixTerms(final Set terms, final Term prefix, final IndexReader reader) throws IOException { // SlowCompositeReaderWrapper could be used... but this would merge all terms from each segment into one terms // instance, which is very expensive. Therefore I think it is better to iterate over each leaf individually. List leaves = reader.leaves(); diff --git a/server/src/main/java/org/opensearch/common/lucene/search/function/RandomScoreFunction.java b/server/src/main/java/org/opensearch/common/lucene/search/function/RandomScoreFunction.java index 48b3e0d04bdc3..b743a7bd3e707 100644 --- a/server/src/main/java/org/opensearch/common/lucene/search/function/RandomScoreFunction.java +++ b/server/src/main/java/org/opensearch/common/lucene/search/function/RandomScoreFunction.java @@ -31,11 +31,11 @@ package org.opensearch.common.lucene.search.function; -import com.carrotsearch.hppc.BitMixer; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.Explanation; import org.apache.lucene.util.StringHelper; import org.opensearch.common.Nullable; +import org.opensearch.common.util.BitMixer; import org.opensearch.index.fielddata.IndexFieldData; import org.opensearch.index.fielddata.LeafFieldData; import org.opensearch.index.fielddata.SortedBinaryDocValues; diff --git a/server/src/main/java/org/opensearch/common/recycler/Recyclers.java b/server/src/main/java/org/opensearch/common/recycler/Recyclers.java index 74916cf4e6d10..4cbb80509d6a1 100644 --- a/server/src/main/java/org/opensearch/common/recycler/Recyclers.java +++ b/server/src/main/java/org/opensearch/common/recycler/Recyclers.java @@ -32,7 +32,7 @@ package org.opensearch.common.recycler; -import com.carrotsearch.hppc.BitMixer; +import org.opensearch.common.util.BitMixer; import java.util.ArrayDeque; diff --git a/server/src/main/java/org/opensearch/common/util/AbstractPagedHashMap.java b/server/src/main/java/org/opensearch/common/util/AbstractPagedHashMap.java index 1ff3038297008..450dcc833a7b3 100644 --- a/server/src/main/java/org/opensearch/common/util/AbstractPagedHashMap.java +++ b/server/src/main/java/org/opensearch/common/util/AbstractPagedHashMap.java @@ -32,7 +32,6 @@ package org.opensearch.common.util; -import com.carrotsearch.hppc.BitMixer; import org.opensearch.common.lease.Releasable; /** diff --git a/server/src/main/java/org/opensearch/common/util/BytesRefHash.java b/server/src/main/java/org/opensearch/common/util/BytesRefHash.java index da11d91b1f535..ecc93d017beaf 100644 --- a/server/src/main/java/org/opensearch/common/util/BytesRefHash.java +++ b/server/src/main/java/org/opensearch/common/util/BytesRefHash.java @@ -32,7 +32,6 @@ package org.opensearch.common.util; -import com.carrotsearch.hppc.BitMixer; import org.apache.lucene.util.BytesRef; import org.opensearch.common.lease.Releasable; import org.opensearch.common.lease.Releasables; diff --git a/server/src/main/java/org/opensearch/common/util/LongLongHash.java b/server/src/main/java/org/opensearch/common/util/LongLongHash.java index 1a720eae82a1d..f1cdd29932b2f 100644 --- a/server/src/main/java/org/opensearch/common/util/LongLongHash.java +++ b/server/src/main/java/org/opensearch/common/util/LongLongHash.java @@ -32,8 +32,6 @@ package org.opensearch.common.util; -import com.carrotsearch.hppc.BitMixer; - import org.opensearch.common.lease.Releasables; /** diff --git a/server/src/main/java/org/opensearch/common/util/ReorganizingLongHash.java b/server/src/main/java/org/opensearch/common/util/ReorganizingLongHash.java index 5789b47423c1d..417eb6a316d86 100644 --- a/server/src/main/java/org/opensearch/common/util/ReorganizingLongHash.java +++ b/server/src/main/java/org/opensearch/common/util/ReorganizingLongHash.java @@ -8,7 +8,6 @@ package org.opensearch.common.util; -import org.apache.lucene.util.hppc.BitMixer; import org.opensearch.common.lease.Releasable; /** diff --git a/server/src/main/java/org/opensearch/gateway/Gateway.java b/server/src/main/java/org/opensearch/gateway/Gateway.java index 15b226e2d30e2..a01f053ce6e9b 100644 --- a/server/src/main/java/org/opensearch/gateway/Gateway.java +++ b/server/src/main/java/org/opensearch/gateway/Gateway.java @@ -32,7 +32,6 @@ package org.opensearch.gateway; -import com.carrotsearch.hppc.ObjectFloatHashMap; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.opensearch.action.FailedNodeException; @@ -44,6 +43,8 @@ import org.opensearch.core.index.Index; import java.util.Arrays; +import java.util.HashMap; +import java.util.Map; import java.util.function.Function; /** @@ -81,7 +82,7 @@ public void performStateRecovery(final GatewayStateRecoveredListener listener) t } } - final ObjectFloatHashMap indices = new ObjectFloatHashMap<>(); + final Map indices = new HashMap<>(); Metadata electedGlobalState = null; int found = 0; for (final TransportNodesListGatewayMetaState.NodeGatewayMetaState nodeState : nodesState.getNodes()) { @@ -95,7 +96,7 @@ public void performStateRecovery(final GatewayStateRecoveredListener listener) t electedGlobalState = nodeState.metadata(); } for (final IndexMetadata cursor : nodeState.metadata().indices().values()) { - indices.addTo(cursor.getIndex(), 1); + indices.merge(cursor.getIndex(), 1f, Float::sum); } } if (found < requiredAllocation) { @@ -106,7 +107,7 @@ public void performStateRecovery(final GatewayStateRecoveredListener listener) t final Metadata.Builder metadataBuilder = Metadata.builder(electedGlobalState).removeAllIndices(); assert !indices.containsKey(null); - final Object[] keys = indices.keys; + final Object[] keys = indices.keySet().toArray(); for (int i = 0; i < keys.length; i++) { if (keys[i] != null) { final Index index = (Index) keys[i]; diff --git a/server/src/main/java/org/opensearch/http/AbstractHttpServerTransport.java b/server/src/main/java/org/opensearch/http/AbstractHttpServerTransport.java index 8c3b5b6ceecbd..46f296f52ae01 100644 --- a/server/src/main/java/org/opensearch/http/AbstractHttpServerTransport.java +++ b/server/src/main/java/org/opensearch/http/AbstractHttpServerTransport.java @@ -32,8 +32,6 @@ package org.opensearch.http; -import com.carrotsearch.hppc.IntHashSet; -import com.carrotsearch.hppc.IntSet; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; @@ -66,6 +64,7 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; +import java.util.HashSet; import java.util.List; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; @@ -264,12 +263,12 @@ static int resolvePublishPort(Settings settings, List boundAdd // if no matching boundAddress found, check if there is a unique port for all bound addresses if (publishPort < 0) { - final IntSet ports = new IntHashSet(); + final Set ports = new HashSet<>(); for (TransportAddress boundAddress : boundAddresses) { ports.add(boundAddress.getPort()); } if (ports.size() == 1) { - publishPort = ports.iterator().next().value; + publishPort = ports.iterator().next(); } } diff --git a/server/src/main/java/org/opensearch/index/engine/CombinedDeletionPolicy.java b/server/src/main/java/org/opensearch/index/engine/CombinedDeletionPolicy.java index 528bd556681cc..4d7faf6c9e375 100644 --- a/server/src/main/java/org/opensearch/index/engine/CombinedDeletionPolicy.java +++ b/server/src/main/java/org/opensearch/index/engine/CombinedDeletionPolicy.java @@ -32,7 +32,6 @@ package org.opensearch.index.engine; -import com.carrotsearch.hppc.ObjectIntHashMap; import org.apache.logging.log4j.Logger; import org.apache.lucene.index.IndexCommit; import org.apache.lucene.index.IndexDeletionPolicy; @@ -45,6 +44,7 @@ import java.io.IOException; import java.nio.file.Path; import java.util.Collection; +import java.util.HashMap; import java.util.List; import java.util.Locale; import java.util.Map; @@ -64,7 +64,7 @@ public class CombinedDeletionPolicy extends IndexDeletionPolicy { private final TranslogDeletionPolicy translogDeletionPolicy; private final SoftDeletesPolicy softDeletesPolicy; private final LongSupplier globalCheckpointSupplier; - private final ObjectIntHashMap snapshottedCommits; // Number of snapshots held against each commit point. + private final Map snapshottedCommits; // Number of snapshots held against each commit point. private volatile IndexCommit safeCommit; // the most recent safe commit point - its max_seqno at most the persisted global checkpoint. private volatile long maxSeqNoOfNextSafeCommit; private volatile IndexCommit lastCommit; // the most recent commit point @@ -80,7 +80,7 @@ public class CombinedDeletionPolicy extends IndexDeletionPolicy { this.translogDeletionPolicy = translogDeletionPolicy; this.softDeletesPolicy = softDeletesPolicy; this.globalCheckpointSupplier = globalCheckpointSupplier; - this.snapshottedCommits = new ObjectIntHashMap<>(); + this.snapshottedCommits = new HashMap<>(); } @Override @@ -174,7 +174,7 @@ synchronized IndexCommit acquireIndexCommit(boolean acquiringSafeCommit) { assert safeCommit != null : "Safe commit is not initialized yet"; assert lastCommit != null : "Last commit is not initialized yet"; final IndexCommit snapshotting = acquiringSafeCommit ? safeCommit : lastCommit; - snapshottedCommits.addTo(snapshotting, 1); // increase refCount + snapshottedCommits.merge(snapshotting, 1, Integer::sum); // increase refCount return new SnapshotIndexCommit(snapshotting); } @@ -191,7 +191,7 @@ synchronized boolean releaseCommit(final IndexCommit snapshotCommit) { + "], releasing commit [" + releasingCommit + "]"; - final int refCount = snapshottedCommits.addTo(releasingCommit, -1); // release refCount + final int refCount = snapshottedCommits.merge(releasingCommit, -1, Integer::sum); // release refCount assert refCount >= 0 : "Number of snapshots can not be negative [" + refCount + "]"; if (refCount == 0) { snapshottedCommits.remove(releasingCommit); diff --git a/server/src/main/java/org/opensearch/index/engine/CompletionStatsCache.java b/server/src/main/java/org/opensearch/index/engine/CompletionStatsCache.java index 08ede10f3f6aa..c6cf7e8c1b53f 100644 --- a/server/src/main/java/org/opensearch/index/engine/CompletionStatsCache.java +++ b/server/src/main/java/org/opensearch/index/engine/CompletionStatsCache.java @@ -31,8 +31,6 @@ package org.opensearch.index.engine; -import com.carrotsearch.hppc.ObjectLongHashMap; -import com.carrotsearch.hppc.cursors.ObjectLongCursor; import org.apache.lucene.index.FieldInfo; import org.apache.lucene.index.LeafReader; import org.apache.lucene.index.LeafReaderContext; @@ -47,6 +45,8 @@ import org.opensearch.common.util.CollectionUtils; import org.opensearch.search.suggest.completion.CompletionStats; +import java.util.HashMap; +import java.util.Map; import java.util.function.Supplier; /** @@ -99,7 +99,7 @@ CompletionStats get(String... fieldNamePatterns) { // we won the race, nobody else is already computing stats, so it's up to us ActionListener.completeWith(newFuture, () -> { long sizeInBytes = 0; - final ObjectLongHashMap completionFields = new ObjectLongHashMap<>(); + final Map completionFields = new HashMap<>(); try (Engine.Searcher currentSearcher = searcherSupplier.get()) { for (LeafReaderContext atomicReaderContext : currentSearcher.getIndexReader().leaves()) { @@ -109,7 +109,7 @@ CompletionStats get(String... fieldNamePatterns) { if (terms instanceof CompletionTerms) { // TODO: currently we load up the suggester for reporting its size final long fstSize = ((CompletionTerms) terms).suggester().ramBytesUsed(); - completionFields.addTo(info.name, fstSize); + completionFields.merge(info.name, fstSize, Long::sum); sizeInBytes += fstSize; } } @@ -143,10 +143,10 @@ CompletionStats get(String... fieldNamePatterns) { private static CompletionStats filterCompletionStatsByFieldName(String[] fieldNamePatterns, CompletionStats fullCompletionStats) { final FieldMemoryStats fieldMemoryStats; if (CollectionUtils.isEmpty(fieldNamePatterns) == false) { - final ObjectLongHashMap completionFields = new ObjectLongHashMap<>(fieldNamePatterns.length); - for (ObjectLongCursor fieldCursor : fullCompletionStats.getFields()) { - if (Regex.simpleMatch(fieldNamePatterns, fieldCursor.key)) { - completionFields.addTo(fieldCursor.key, fieldCursor.value); + final Map completionFields = new HashMap<>(fieldNamePatterns.length); + for (var fieldCursor : fullCompletionStats.getFields()) { + if (Regex.simpleMatch(fieldNamePatterns, fieldCursor.getKey())) { + completionFields.merge(fieldCursor.getKey(), fieldCursor.getValue(), Long::sum); } } fieldMemoryStats = new FieldMemoryStats(completionFields); diff --git a/server/src/main/java/org/opensearch/index/fielddata/ShardFieldData.java b/server/src/main/java/org/opensearch/index/fielddata/ShardFieldData.java index f03896c4b670a..138b417571784 100644 --- a/server/src/main/java/org/opensearch/index/fielddata/ShardFieldData.java +++ b/server/src/main/java/org/opensearch/index/fielddata/ShardFieldData.java @@ -32,7 +32,6 @@ package org.opensearch.index.fielddata; -import com.carrotsearch.hppc.ObjectLongHashMap; import org.apache.lucene.util.Accountable; import org.opensearch.common.FieldMemoryStats; import org.opensearch.common.metrics.CounterMetric; @@ -41,6 +40,7 @@ import org.opensearch.common.util.concurrent.ConcurrentCollections; import org.opensearch.core.index.shard.ShardId; +import java.util.HashMap; import java.util.Map; import java.util.concurrent.ConcurrentMap; @@ -56,9 +56,9 @@ public class ShardFieldData implements IndexFieldDataCache.Listener { private final ConcurrentMap perFieldTotals = ConcurrentCollections.newConcurrentMap(); public FieldDataStats stats(String... fields) { - ObjectLongHashMap fieldTotals = null; + Map fieldTotals = null; if (CollectionUtils.isEmpty(fields) == false) { - fieldTotals = new ObjectLongHashMap<>(); + fieldTotals = new HashMap<>(); for (Map.Entry entry : perFieldTotals.entrySet()) { if (Regex.simpleMatch(fields, entry.getKey())) { fieldTotals.put(entry.getKey(), entry.getValue().count()); diff --git a/server/src/main/java/org/opensearch/index/mapper/ParseContext.java b/server/src/main/java/org/opensearch/index/mapper/ParseContext.java index 707072ec30b44..fdb08a6816cd3 100644 --- a/server/src/main/java/org/opensearch/index/mapper/ParseContext.java +++ b/server/src/main/java/org/opensearch/index/mapper/ParseContext.java @@ -32,8 +32,6 @@ package org.opensearch.index.mapper; -import com.carrotsearch.hppc.ObjectObjectHashMap; -import com.carrotsearch.hppc.ObjectObjectMap; import org.apache.lucene.document.Field; import org.apache.lucene.index.IndexableField; import org.apache.lucene.util.BytesRef; @@ -44,10 +42,12 @@ import java.util.ArrayList; import java.util.Collection; import java.util.Collections; +import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; import java.util.LinkedList; import java.util.List; +import java.util.Map; import java.util.Set; /** @@ -68,7 +68,7 @@ public static class Document implements Iterable { private final String path; private final String prefix; private final List fields; - private ObjectObjectMap keyedFields; + private Map keyedFields; private Document(String path, Document parent) { fields = new ArrayList<>(); @@ -124,7 +124,7 @@ public void add(IndexableField field) { /** Add fields so that they can later be fetched using {@link #getByKey(Object)}. */ public void addWithKey(Object key, IndexableField field) { if (keyedFields == null) { - keyedFields = new ObjectObjectHashMap<>(); + keyedFields = new HashMap<>(); } else if (keyedFields.containsKey(key)) { throw new IllegalStateException("Only one field can be stored per key"); } diff --git a/server/src/main/java/org/opensearch/index/seqno/LocalCheckpointTracker.java b/server/src/main/java/org/opensearch/index/seqno/LocalCheckpointTracker.java index 74ac32349af94..5069e1f7d6ccd 100644 --- a/server/src/main/java/org/opensearch/index/seqno/LocalCheckpointTracker.java +++ b/server/src/main/java/org/opensearch/index/seqno/LocalCheckpointTracker.java @@ -32,9 +32,10 @@ package org.opensearch.index.seqno; -import com.carrotsearch.hppc.LongObjectHashMap; import org.opensearch.common.SuppressForbidden; +import java.util.HashMap; +import java.util.Map; import java.util.concurrent.atomic.AtomicLong; /** @@ -55,13 +56,13 @@ public class LocalCheckpointTracker { * A collection of bit sets representing processed sequence numbers. Each sequence number is mapped to a bit set by dividing by the * bit set size. */ - final LongObjectHashMap processedSeqNo = new LongObjectHashMap<>(); + final Map processedSeqNo = new HashMap<>(); /** * A collection of bit sets representing durably persisted sequence numbers. Each sequence number is mapped to a bit set by dividing by * the bit set size. */ - final LongObjectHashMap persistedSeqNo = new LongObjectHashMap<>(); + final Map persistedSeqNo = new HashMap<>(); /** * The current local checkpoint, i.e., all sequence numbers no more than this number have been processed. @@ -154,7 +155,7 @@ public synchronized void fastForwardProcessedSeqNo(final long seqNo) { processedCheckpoint.compareAndSet(currentProcessedCheckpoint, seqNo); } - private void markSeqNo(final long seqNo, final AtomicLong checkPoint, final LongObjectHashMap bitSetMap) { + private void markSeqNo(final long seqNo, final AtomicLong checkPoint, final Map bitSetMap) { assert Thread.holdsLock(this); // make sure we track highest seen sequence number advanceMaxSeqNo(seqNo); @@ -248,7 +249,7 @@ public boolean hasProcessed(final long seqNo) { * following the current checkpoint is processed. */ @SuppressForbidden(reason = "Object#notifyAll") - private void updateCheckpoint(AtomicLong checkPoint, LongObjectHashMap bitSetMap) { + private void updateCheckpoint(AtomicLong checkPoint, final Map bitSetMap) { assert Thread.holdsLock(this); assert getBitSetForSeqNo(bitSetMap, checkPoint.get() + 1).get(seqNoToBitSetOffset(checkPoint.get() + 1)) : "updateCheckpoint is called but the bit following the checkpoint is not set"; @@ -294,23 +295,15 @@ private static long getBitSetKey(final long seqNo) { return seqNo / BIT_SET_SIZE; } - private CountedBitSet getBitSetForSeqNo(final LongObjectHashMap bitSetMap, final long seqNo) { + private CountedBitSet getBitSetForSeqNo(final Map bitSetMap, final long seqNo) { assert Thread.holdsLock(this); final long bitSetKey = getBitSetKey(seqNo); - final int index = bitSetMap.indexOf(bitSetKey); - final CountedBitSet bitSet; - if (bitSetMap.indexExists(index)) { - bitSet = bitSetMap.indexGet(index); - } else { - bitSet = new CountedBitSet(BIT_SET_SIZE); - bitSetMap.indexInsert(index, bitSetKey, bitSet); - } - return bitSet; + return bitSetMap.computeIfAbsent(bitSetKey, k -> new CountedBitSet(BIT_SET_SIZE)); } /** * Obtain the position in the bit set corresponding to the provided sequence number. The bit set corresponding to the sequence number - * can be obtained via {@link #getBitSetForSeqNo(LongObjectHashMap, long)}. + * can be obtained via {@link #getBitSetForSeqNo(Map, long)}. * * @param seqNo the sequence number to obtain the position for * @return the position in the bit set corresponding to the provided sequence number diff --git a/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java b/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java index 1dcd1f3153d5e..fcb58144c4782 100644 --- a/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java +++ b/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java @@ -32,8 +32,6 @@ package org.opensearch.index.seqno; -import com.carrotsearch.hppc.ObjectLongHashMap; -import com.carrotsearch.hppc.ObjectLongMap; import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.LegacyESVersion; import org.opensearch.Version; @@ -816,10 +814,10 @@ public int hashCode() { * * @return a map from allocation ID to the local knowledge of the persisted global checkpoint for that allocation ID */ - public synchronized ObjectLongMap getInSyncGlobalCheckpoints() { + public synchronized Map getInSyncGlobalCheckpoints() { assert primaryMode; assert handoffInProgress == false; - final ObjectLongMap globalCheckpoints = new ObjectLongHashMap<>(checkpoints.size()); // upper bound on the size + final Map globalCheckpoints = new HashMap<>(checkpoints.size()); // upper bound on the size checkpoints.entrySet() .stream() .filter(e -> e.getValue().inSync && e.getValue().replicated) diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index 7b5306f336284..d0885301c572a 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -32,7 +32,6 @@ package org.opensearch.index.shard; -import com.carrotsearch.hppc.ObjectLongMap; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.codecs.CodecUtil; @@ -3183,7 +3182,7 @@ public long getLastSyncedGlobalCheckpoint() { * * @return a map from allocation ID to the local knowledge of the global checkpoint for that allocation ID */ - public ObjectLongMap getInSyncGlobalCheckpoints() { + public Map getInSyncGlobalCheckpoints() { assert assertPrimaryMode(); verifyNotClosed(); return replicationTracker.getInSyncGlobalCheckpoints(); @@ -3204,7 +3203,7 @@ public void maybeSyncGlobalCheckpoint(final String reason) { final SeqNoStats stats = getEngine().getSeqNoStats(replicationTracker.getGlobalCheckpoint()); final boolean asyncDurability = indexSettings().getTranslogDurability() == Durability.ASYNC; if (stats.getMaxSeqNo() == stats.getGlobalCheckpoint() || asyncDurability) { - final ObjectLongMap globalCheckpoints = getInSyncGlobalCheckpoints(); + final Map globalCheckpoints = getInSyncGlobalCheckpoints(); final long globalCheckpoint = replicationTracker.getGlobalCheckpoint(); // async durability means that the local checkpoint might lag (as it is only advanced on fsync) // periodically ask for the newest local checkpoint by syncing the global checkpoint, so that ultimately the global @@ -3214,7 +3213,7 @@ public void maybeSyncGlobalCheckpoint(final String reason) { final boolean syncNeeded = (asyncDurability && (stats.getGlobalCheckpoint() < stats.getMaxSeqNo() || replicationTracker.pendingInSync())) // check if the persisted global checkpoint - || StreamSupport.stream(globalCheckpoints.values().spliterator(), false).anyMatch(v -> v.value < globalCheckpoint); + || StreamSupport.stream(globalCheckpoints.values().spliterator(), false).anyMatch(v -> v < globalCheckpoint); // only sync if index is not closed and there is a shard lagging the primary if (syncNeeded && indexSettings.getIndexMetadata().getState() == IndexMetadata.State.OPEN) { logger.trace("syncing global checkpoint for [{}]", reason); diff --git a/server/src/main/java/org/opensearch/index/translog/MultiSnapshot.java b/server/src/main/java/org/opensearch/index/translog/MultiSnapshot.java index bda8bbecb81f0..941283afe5908 100644 --- a/server/src/main/java/org/opensearch/index/translog/MultiSnapshot.java +++ b/server/src/main/java/org/opensearch/index/translog/MultiSnapshot.java @@ -32,13 +32,14 @@ package org.opensearch.index.translog; -import com.carrotsearch.hppc.LongObjectHashMap; import org.opensearch.index.seqno.CountedBitSet; import org.opensearch.index.seqno.SequenceNumbers; import java.io.Closeable; import java.io.IOException; import java.util.Arrays; +import java.util.HashMap; +import java.util.Map; /** * A snapshot composed out of multiple snapshots @@ -105,7 +106,7 @@ public void close() throws IOException { */ static final class SeqNoSet { static final short BIT_SET_SIZE = 1024; - private final LongObjectHashMap bitSets = new LongObjectHashMap<>(); + private final Map bitSets = new HashMap<>(); /** * Marks this sequence number and returns {@code true} if it is seen before. diff --git a/server/src/main/java/org/opensearch/index/translog/TranslogWriter.java b/server/src/main/java/org/opensearch/index/translog/TranslogWriter.java index 1fd3a9c3341f6..dd85f8f1f77ea 100644 --- a/server/src/main/java/org/opensearch/index/translog/TranslogWriter.java +++ b/server/src/main/java/org/opensearch/index/translog/TranslogWriter.java @@ -32,8 +32,6 @@ package org.opensearch.index.translog; -import com.carrotsearch.hppc.LongArrayList; -import com.carrotsearch.hppc.procedures.LongProcedure; import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefIterator; @@ -62,7 +60,9 @@ import java.nio.channels.FileChannel; import java.nio.file.Path; import java.nio.file.StandardOpenOption; +import java.util.ArrayList; import java.util.HashMap; +import java.util.List; import java.util.Map; import java.util.Objects; import java.util.concurrent.atomic.AtomicBoolean; @@ -105,7 +105,7 @@ public class TranslogWriter extends BaseTranslogReader implements Closeable { // lock order synchronized(syncLock) -> try(Releasable lock = writeLock.acquire()) -> synchronized(this) private final Object syncLock = new Object(); - private LongArrayList nonFsyncedSequenceNumbers = new LongArrayList(64); + private List nonFsyncedSequenceNumbers = new ArrayList<>(64); private final int forceWriteThreshold; private volatile long bufferedBytes; private ReleasableBytesStreamOutput buffer; @@ -511,7 +511,7 @@ final boolean syncUpTo(long offset) throws IOException { // double checked locking - we don't want to fsync unless we have to and now that we have // the lock we should check again since if this code is busy we might have fsynced enough already final Checkpoint checkpointToSync; - final LongArrayList flushedSequenceNumbers; + final List flushedSequenceNumbers; final ReleasableBytesReference toWrite; try (ReleasableLock toClose = writeLock.acquire()) { synchronized (this) { @@ -519,7 +519,7 @@ final boolean syncUpTo(long offset) throws IOException { checkpointToSync = getCheckpoint(); toWrite = pollOpsToWrite(); flushedSequenceNumbers = nonFsyncedSequenceNumbers; - nonFsyncedSequenceNumbers = new LongArrayList(64); + nonFsyncedSequenceNumbers = new ArrayList<>(64); } try { @@ -541,7 +541,7 @@ final boolean syncUpTo(long offset) throws IOException { closeWithTragicEvent(ex); throw ex; } - flushedSequenceNumbers.forEach((LongProcedure) persistedSequenceNumberConsumer::accept); + flushedSequenceNumbers.forEach(persistedSequenceNumberConsumer::accept); assert lastSyncedCheckpoint.offset <= checkpointToSync.offset : "illegal state: " + lastSyncedCheckpoint.offset + " <= " diff --git a/server/src/main/java/org/opensearch/indices/IndicesRequestCache.java b/server/src/main/java/org/opensearch/indices/IndicesRequestCache.java index a0208fab9cbef..c06880db42587 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesRequestCache.java +++ b/server/src/main/java/org/opensearch/indices/IndicesRequestCache.java @@ -32,8 +32,6 @@ package org.opensearch.indices; -import com.carrotsearch.hppc.ObjectHashSet; -import com.carrotsearch.hppc.ObjectSet; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.index.DirectoryReader; @@ -59,6 +57,7 @@ import java.io.IOException; import java.util.Collection; import java.util.Collections; +import java.util.HashSet; import java.util.Iterator; import java.util.Objects; import java.util.Set; @@ -330,8 +329,8 @@ public int hashCode() { } synchronized void cleanCache() { - final ObjectSet currentKeysToClean = new ObjectHashSet<>(); - final ObjectSet currentFullClean = new ObjectHashSet<>(); + final Set currentKeysToClean = new HashSet<>(); + final Set currentFullClean = new HashSet<>(); currentKeysToClean.clear(); currentFullClean.clear(); for (Iterator iterator = keysToClean.iterator(); iterator.hasNext();) { diff --git a/server/src/main/java/org/opensearch/rest/action/cat/RestAllocationAction.java b/server/src/main/java/org/opensearch/rest/action/cat/RestAllocationAction.java index 6a2be1efc0b86..b879ea37738e1 100644 --- a/server/src/main/java/org/opensearch/rest/action/cat/RestAllocationAction.java +++ b/server/src/main/java/org/opensearch/rest/action/cat/RestAllocationAction.java @@ -32,7 +32,6 @@ package org.opensearch.rest.action.cat; -import com.carrotsearch.hppc.ObjectIntScatterMap; import org.opensearch.action.admin.cluster.node.stats.NodeStats; import org.opensearch.action.admin.cluster.node.stats.NodesStatsRequest; import org.opensearch.action.admin.cluster.node.stats.NodesStatsResponse; @@ -51,7 +50,9 @@ import org.opensearch.rest.action.RestActionListener; import org.opensearch.rest.action.RestResponseListener; +import java.util.HashMap; import java.util.List; +import java.util.Map; import static java.util.Arrays.asList; import static java.util.Collections.unmodifiableList; @@ -131,7 +132,7 @@ protected Table getTableWithHeader(final RestRequest request) { } private Table buildTable(RestRequest request, final ClusterStateResponse state, final NodesStatsResponse stats) { - final ObjectIntScatterMap allocs = new ObjectIntScatterMap<>(); + final Map allocs = new HashMap<>(); for (ShardRouting shard : state.getState().routingTable().allShards()) { String nodeId = "UNASSIGNED"; @@ -140,7 +141,7 @@ private Table buildTable(RestRequest request, final ClusterStateResponse state, nodeId = shard.currentNodeId(); } - allocs.addTo(nodeId, 1); + allocs.merge(nodeId, 1, Integer::sum); } Table table = getTableWithHeader(request); diff --git a/server/src/main/java/org/opensearch/rest/action/cat/RestFielddataAction.java b/server/src/main/java/org/opensearch/rest/action/cat/RestFielddataAction.java index df70ee5943aad..a04bac0c30bc9 100644 --- a/server/src/main/java/org/opensearch/rest/action/cat/RestFielddataAction.java +++ b/server/src/main/java/org/opensearch/rest/action/cat/RestFielddataAction.java @@ -32,7 +32,6 @@ package org.opensearch.rest.action.cat; -import com.carrotsearch.hppc.cursors.ObjectLongCursor; import org.opensearch.action.admin.cluster.node.stats.NodeStats; import org.opensearch.action.admin.cluster.node.stats.NodesStatsRequest; import org.opensearch.action.admin.cluster.node.stats.NodesStatsResponse; @@ -107,14 +106,14 @@ private Table buildTable(final RestRequest request, final NodesStatsResponse nod for (NodeStats nodeStats : nodeStatses.getNodes()) { if (nodeStats.getIndices().getFieldData().getFields() != null) { - for (ObjectLongCursor cursor : nodeStats.getIndices().getFieldData().getFields()) { + for (var cursor : nodeStats.getIndices().getFieldData().getFields()) { table.startRow(); table.addCell(nodeStats.getNode().getId()); table.addCell(nodeStats.getNode().getHostName()); table.addCell(nodeStats.getNode().getHostAddress()); table.addCell(nodeStats.getNode().getName()); - table.addCell(cursor.key); - table.addCell(new ByteSizeValue(cursor.value)); + table.addCell(cursor.getKey()); + table.addCell(new ByteSizeValue(cursor.getValue())); table.endRow(); } } diff --git a/server/src/main/java/org/opensearch/script/ScoreScriptUtils.java b/server/src/main/java/org/opensearch/script/ScoreScriptUtils.java index f955413907714..b94ff77a1d0b7 100644 --- a/server/src/main/java/org/opensearch/script/ScoreScriptUtils.java +++ b/server/src/main/java/org/opensearch/script/ScoreScriptUtils.java @@ -46,7 +46,7 @@ import java.time.ZoneId; -import static com.carrotsearch.hppc.BitMixer.mix32; +import static org.opensearch.common.util.BitMixer.mix32; /** * Utilities for scoring scripts diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/nested/NestedAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/nested/NestedAggregator.java index 25f0fa7bc9b3e..2d517a1220e98 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/nested/NestedAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/nested/NestedAggregator.java @@ -31,7 +31,6 @@ package org.opensearch.search.aggregations.bucket.nested; -import com.carrotsearch.hppc.LongArrayList; import org.apache.lucene.index.IndexReaderContext; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.ReaderUtil; @@ -58,6 +57,8 @@ import org.opensearch.search.internal.SearchContext; import java.io.IOException; +import java.util.ArrayList; +import java.util.List; import java.util.Map; /** @@ -169,7 +170,7 @@ class BufferingNestedLeafBucketCollector extends LeafBucketCollectorBase { final BitSet parentDocs; final LeafBucketCollector sub; final DocIdSetIterator childDocs; - final LongArrayList bucketBuffer = new LongArrayList(); + final List bucketBuffer = new ArrayList<>(); Scorable scorer; int currentParentDoc = -1; @@ -221,10 +222,8 @@ void processBufferedChildBuckets() throws IOException { for (; childDocId < currentParentDoc; childDocId = childDocs.nextDoc()) { cachedScorer.doc = childDocId; - final long[] buffer = bucketBuffer.buffer; - final int size = bucketBuffer.size(); - for (int i = 0; i < size; i++) { - collectBucket(sub, childDocId, buffer[i]); + for (var bucket : bucketBuffer) { + collectBucket(sub, childDocId, bucket); } } bucketBuffer.clear(); diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/nested/ReverseNestedAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/nested/ReverseNestedAggregator.java index 3e18ac6844ed2..8b0429d2379c6 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/nested/ReverseNestedAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/nested/ReverseNestedAggregator.java @@ -31,7 +31,6 @@ package org.opensearch.search.aggregations.bucket.nested; -import com.carrotsearch.hppc.LongIntHashMap; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.Query; @@ -51,6 +50,7 @@ import org.opensearch.search.internal.SearchContext; import java.io.IOException; +import java.util.HashMap; import java.util.Map; /** @@ -91,7 +91,7 @@ protected LeafBucketCollector getLeafCollector(LeafReaderContext ctx, final Leaf if (parentDocs == null) { return LeafBucketCollector.NO_OP_COLLECTOR; } - final LongIntHashMap bucketOrdToLastCollectedParentDoc = new LongIntHashMap(32); + final Map bucketOrdToLastCollectedParentDoc = new HashMap<>(32); return new LeafBucketCollectorBase(sub, null) { @Override public void collect(int childDoc, long bucket) throws IOException { @@ -99,16 +99,15 @@ public void collect(int childDoc, long bucket) throws IOException { final int parentDoc = parentDocs.nextSetBit(childDoc); assert childDoc <= parentDoc && parentDoc != DocIdSetIterator.NO_MORE_DOCS; - int keySlot = bucketOrdToLastCollectedParentDoc.indexOf(bucket); - if (bucketOrdToLastCollectedParentDoc.indexExists(keySlot)) { - int lastCollectedParentDoc = bucketOrdToLastCollectedParentDoc.indexGet(keySlot); + Integer lastCollectedParentDoc = bucketOrdToLastCollectedParentDoc.get(bucket); + if (lastCollectedParentDoc != null) { if (parentDoc > lastCollectedParentDoc) { collectBucket(sub, parentDoc, bucket); - bucketOrdToLastCollectedParentDoc.indexReplace(keySlot, parentDoc); + bucketOrdToLastCollectedParentDoc.put(bucket, parentDoc); } } else { collectBucket(sub, parentDoc, bucket); - bucketOrdToLastCollectedParentDoc.indexInsert(keySlot, bucket, parentDoc); + bucketOrdToLastCollectedParentDoc.put(bucket, parentDoc); } } }; diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/IncludeExclude.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/IncludeExclude.java index 8f0e50f2348ae..f8061bcaca50f 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/IncludeExclude.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/IncludeExclude.java @@ -31,9 +31,6 @@ package org.opensearch.search.aggregations.bucket.terms; -import com.carrotsearch.hppc.BitMixer; -import com.carrotsearch.hppc.LongHashSet; -import com.carrotsearch.hppc.LongSet; import org.apache.lucene.index.SortedSetDocValues; import org.apache.lucene.index.Terms; import org.apache.lucene.index.TermsEnum; @@ -48,6 +45,7 @@ import org.apache.lucene.util.automaton.Operations; import org.apache.lucene.util.automaton.RegExp; import org.opensearch.OpenSearchParseException; +import org.opensearch.common.util.BitMixer; import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -200,15 +198,15 @@ public boolean accept(long value) { * @opensearch.internal */ public static class SetBackedLongFilter extends LongFilter { - private LongSet valids; - private LongSet invalids; + private Set valids; + private Set invalids; private SetBackedLongFilter(int numValids, int numInvalids) { if (numValids > 0) { - valids = new LongHashSet(numValids); + valids = new HashSet<>(numValids); } if (numInvalids > 0) { - invalids = new LongHashSet(numInvalids); + invalids = new HashSet<>(numInvalids); } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/CardinalityAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/CardinalityAggregator.java index 77d8c3cdcf46b..faae96b957b2d 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/CardinalityAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/CardinalityAggregator.java @@ -32,7 +32,6 @@ package org.opensearch.search.aggregations.metrics; -import com.carrotsearch.hppc.BitMixer; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.SortedNumericDocValues; import org.apache.lucene.index.SortedSetDocValues; @@ -44,6 +43,7 @@ import org.opensearch.common.hash.MurmurHash3; import org.opensearch.common.util.BigArrays; import org.opensearch.common.util.BitArray; +import org.opensearch.common.util.BitMixer; import org.opensearch.common.util.LongArray; import org.opensearch.common.util.ObjectArray; import org.opensearch.common.lease.Releasable; diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/TopHitsAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/TopHitsAggregator.java index 5242461fa0054..ee3cd2963c334 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/TopHitsAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/TopHitsAggregator.java @@ -32,8 +32,6 @@ package org.opensearch.search.aggregations.metrics; -import com.carrotsearch.hppc.LongObjectHashMap; -import com.carrotsearch.hppc.cursors.ObjectCursor; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.Collector; import org.apache.lucene.search.FieldDoc; @@ -68,6 +66,7 @@ import org.opensearch.search.sort.SortAndFormats; import java.io.IOException; +import java.util.HashMap; import java.util.Map; /** @@ -129,7 +128,7 @@ public LeafBucketCollector getLeafCollector(LeafReaderContext ctx, LeafBucketCol // when post collecting then we have already replaced the leaf readers on the aggregator level have already been // replaced with the next leaf readers and then post collection pushes docids of the previous segment, which // then causes assertions to trip or incorrect top docs to be computed. - final LongObjectHashMap leafCollectors = new LongObjectHashMap<>(1); + final Map leafCollectors = new HashMap<>(1); return new LeafBucketCollectorBase(sub, null) { Scorable scorer; @@ -138,8 +137,8 @@ public LeafBucketCollector getLeafCollector(LeafReaderContext ctx, LeafBucketCol public void setScorer(Scorable scorer) throws IOException { this.scorer = scorer; super.setScorer(scorer); - for (ObjectCursor cursor : leafCollectors.values()) { - cursor.value.setScorer(scorer); + for (var collector : leafCollectors.values()) { + collector.setScorer(scorer); } } @@ -170,16 +169,13 @@ public void collect(int docId, long bucket) throws IOException { topDocsCollectors.put(bucket, collectors); } - final LeafCollector leafCollector; - final int key = leafCollectors.indexOf(bucket); - if (key < 0) { + LeafCollector leafCollector = leafCollectors.get(bucket); + if (leafCollector == null) { leafCollector = collectors.collector.getLeafCollector(ctx); if (scorer != null) { leafCollector.setScorer(scorer); } - leafCollectors.indexInsert(key, bucket, leafCollector); - } else { - leafCollector = leafCollectors.indexGet(key); + leafCollectors.put(bucket, leafCollector); } leafCollector.collect(docId); } diff --git a/server/src/main/java/org/opensearch/search/aggregations/pipeline/PercentilesBucketPipelineAggregationBuilder.java b/server/src/main/java/org/opensearch/search/aggregations/pipeline/PercentilesBucketPipelineAggregationBuilder.java index 66e99d4a70418..b5f75350c1f1c 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/pipeline/PercentilesBucketPipelineAggregationBuilder.java +++ b/server/src/main/java/org/opensearch/search/aggregations/pipeline/PercentilesBucketPipelineAggregationBuilder.java @@ -32,18 +32,17 @@ package org.opensearch.search.aggregations.pipeline; -import com.carrotsearch.hppc.DoubleArrayList; - import org.opensearch.LegacyESVersion; import org.opensearch.core.ParseField; -import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; import java.io.IOException; +import java.util.ArrayList; import java.util.Arrays; +import java.util.List; import java.util.Map; import java.util.Objects; @@ -183,11 +182,11 @@ protected PercentilesBucketPipelineAggregationBuilder buildFactory( protected boolean token(XContentParser parser, String field, XContentParser.Token token, Map params) throws IOException { if (PERCENTS_FIELD.match(field, parser.getDeprecationHandler()) && token == XContentParser.Token.START_ARRAY) { - DoubleArrayList percents = new DoubleArrayList(10); + final List percents = new ArrayList<>(10); while (parser.nextToken() != XContentParser.Token.END_ARRAY) { percents.add(parser.doubleValue()); } - params.put(PERCENTS_FIELD.getPreferredName(), percents.toArray()); + params.put(PERCENTS_FIELD.getPreferredName(), percents.stream().mapToDouble(Double::doubleValue).toArray()); return true; } else if (KEYED_FIELD.match(field, parser.getDeprecationHandler()) && token == XContentParser.Token.VALUE_BOOLEAN) { params.put(KEYED_FIELD.getPreferredName(), parser.booleanValue()); diff --git a/server/src/main/java/org/opensearch/search/dfs/AggregatedDfs.java b/server/src/main/java/org/opensearch/search/dfs/AggregatedDfs.java index 0223699b7fd89..35ab0c0701b4e 100644 --- a/server/src/main/java/org/opensearch/search/dfs/AggregatedDfs.java +++ b/server/src/main/java/org/opensearch/search/dfs/AggregatedDfs.java @@ -32,17 +32,17 @@ package org.opensearch.search.dfs; -import com.carrotsearch.hppc.ObjectObjectHashMap; -import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import org.apache.lucene.index.Term; import org.apache.lucene.search.CollectionStatistics; import org.apache.lucene.search.TermStatistics; -import org.opensearch.common.collect.HppcMaps; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; import java.io.IOException; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; /** * Compute global distributed frequency across the index @@ -51,25 +51,26 @@ */ public class AggregatedDfs implements Writeable { - private ObjectObjectHashMap termStatistics; - private ObjectObjectHashMap fieldStatistics; + private Map termStatistics; + private Map fieldStatistics; private long maxDoc; public AggregatedDfs(StreamInput in) throws IOException { int size = in.readVInt(); - termStatistics = HppcMaps.newMap(size); + final Map termStatistics = new HashMap<>(size); for (int i = 0; i < size; i++) { Term term = new Term(in.readString(), in.readBytesRef()); TermStatistics stats = new TermStatistics(in.readBytesRef(), in.readVLong(), DfsSearchResult.subOne(in.readVLong())); termStatistics.put(term, stats); } - fieldStatistics = DfsSearchResult.readFieldStats(in); + this.termStatistics = Collections.unmodifiableMap(termStatistics); + this.fieldStatistics = DfsSearchResult.readFieldStats(in); maxDoc = in.readVLong(); } public AggregatedDfs( - ObjectObjectHashMap termStatistics, - ObjectObjectHashMap fieldStatistics, + final Map termStatistics, + final Map fieldStatistics, long maxDoc ) { this.termStatistics = termStatistics; @@ -77,11 +78,11 @@ public AggregatedDfs( this.maxDoc = maxDoc; } - public ObjectObjectHashMap termStatistics() { + public Map termStatistics() { return termStatistics; } - public ObjectObjectHashMap fieldStatistics() { + public Map fieldStatistics() { return fieldStatistics; } @@ -93,11 +94,11 @@ public long maxDoc() { public void writeTo(final StreamOutput out) throws IOException { out.writeVInt(termStatistics.size()); - for (ObjectObjectCursor c : termStatistics()) { - Term term = c.key; + for (final Map.Entry c : termStatistics().entrySet()) { + Term term = c.getKey(); out.writeString(term.field()); out.writeBytesRef(term.bytes()); - TermStatistics stats = c.value; + TermStatistics stats = c.getValue(); out.writeBytesRef(stats.term()); out.writeVLong(stats.docFreq()); out.writeVLong(DfsSearchResult.addOne(stats.totalTermFreq())); diff --git a/server/src/main/java/org/opensearch/search/dfs/DfsPhase.java b/server/src/main/java/org/opensearch/search/dfs/DfsPhase.java index 6814fbd6e32c5..5094145ea1c6e 100644 --- a/server/src/main/java/org/opensearch/search/dfs/DfsPhase.java +++ b/server/src/main/java/org/opensearch/search/dfs/DfsPhase.java @@ -32,14 +32,12 @@ package org.opensearch.search.dfs; -import com.carrotsearch.hppc.ObjectObjectHashMap; import org.apache.lucene.index.Term; import org.apache.lucene.search.CollectionStatistics; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.TermStatistics; -import org.opensearch.common.collect.HppcMaps; import org.opensearch.search.internal.SearchContext; import org.opensearch.search.rescore.RescoreContext; import org.opensearch.tasks.TaskCancelledException; @@ -58,7 +56,7 @@ public class DfsPhase { public void execute(SearchContext context) { try { - ObjectObjectHashMap fieldStatistics = HppcMaps.newNoNullKeysMap(); + Map fieldStatistics = new HashMap<>(); Map stats = new HashMap<>(); IndexSearcher searcher = new IndexSearcher(context.searcher().getIndexReader()) { @Override diff --git a/server/src/main/java/org/opensearch/search/dfs/DfsSearchResult.java b/server/src/main/java/org/opensearch/search/dfs/DfsSearchResult.java index c236d59946172..09e8af8833975 100644 --- a/server/src/main/java/org/opensearch/search/dfs/DfsSearchResult.java +++ b/server/src/main/java/org/opensearch/search/dfs/DfsSearchResult.java @@ -32,14 +32,11 @@ package org.opensearch.search.dfs; -import com.carrotsearch.hppc.ObjectObjectHashMap; -import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import org.apache.lucene.index.Term; import org.apache.lucene.search.CollectionStatistics; import org.apache.lucene.search.TermStatistics; import org.apache.lucene.util.BytesRef; import org.opensearch.LegacyESVersion; -import org.opensearch.common.collect.HppcMaps; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.search.SearchPhaseResult; @@ -48,6 +45,9 @@ import org.opensearch.search.internal.ShardSearchRequest; import java.io.IOException; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; /** * Result from a Distributed Frequency Search phase @@ -60,7 +60,7 @@ public class DfsSearchResult extends SearchPhaseResult { private static final TermStatistics[] EMPTY_TERM_STATS = new TermStatistics[0]; private Term[] terms; private TermStatistics[] termStatistics; - private ObjectObjectHashMap fieldStatistics = HppcMaps.newNoNullKeysMap(); + private Map fieldStatistics; private int maxDoc; public DfsSearchResult(StreamInput in) throws IOException { @@ -76,7 +76,7 @@ public DfsSearchResult(StreamInput in) throws IOException { } } this.termStatistics = readTermStats(in, terms); - fieldStatistics = readFieldStats(in); + this.fieldStatistics = readFieldStats(in); maxDoc = in.readVInt(); if (in.getVersion().onOrAfter(LegacyESVersion.V_7_10_0)) { @@ -105,8 +105,8 @@ public DfsSearchResult termsStatistics(Term[] terms, TermStatistics[] termStatis return this; } - public DfsSearchResult fieldStatistics(ObjectObjectHashMap fieldStatistics) { - this.fieldStatistics = fieldStatistics; + public DfsSearchResult fieldStatistics(final Map fieldStatistics) { + this.fieldStatistics = Collections.unmodifiableMap(fieldStatistics); return this; } @@ -118,7 +118,7 @@ public TermStatistics[] termStatistics() { return termStatistics; } - public ObjectObjectHashMap fieldStatistics() { + public Map fieldStatistics() { return fieldStatistics; } @@ -138,13 +138,12 @@ public void writeTo(StreamOutput out) throws IOException { } } - public static void writeFieldStats(StreamOutput out, ObjectObjectHashMap fieldStatistics) - throws IOException { + public static void writeFieldStats(StreamOutput out, final Map fieldStatistics) throws IOException { out.writeVInt(fieldStatistics.size()); - for (ObjectObjectCursor c : fieldStatistics) { - out.writeString(c.key); - CollectionStatistics statistics = c.value; + for (final Map.Entry c : fieldStatistics.entrySet()) { + out.writeString(c.getKey()); + CollectionStatistics statistics = c.getValue(); assert statistics.maxDoc() >= 0; out.writeVLong(statistics.maxDoc()); if (out.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { @@ -178,9 +177,9 @@ public static void writeSingleTermStats(StreamOutput out, TermStatistics termSta } } - static ObjectObjectHashMap readFieldStats(StreamInput in) throws IOException { + static Map readFieldStats(StreamInput in) throws IOException { final int numFieldStatistics = in.readVInt(); - ObjectObjectHashMap fieldStatistics = HppcMaps.newNoNullKeysMap(numFieldStatistics); + final Map fieldStatistics = new HashMap<>(numFieldStatistics); for (int i = 0; i < numFieldStatistics; i++) { final String field = in.readString(); assert field != null; @@ -201,7 +200,7 @@ static ObjectObjectHashMap readFieldStats(StreamIn CollectionStatistics stats = new CollectionStatistics(field, maxDoc, docCount, sumTotalTermFreq, sumDocFreq); fieldStatistics.put(field, stats); } - return fieldStatistics; + return Collections.unmodifiableMap(fieldStatistics); } static TermStatistics[] readTermStats(StreamInput in, Term[] terms) throws IOException { diff --git a/server/src/main/java/org/opensearch/search/slice/DocValuesSliceQuery.java b/server/src/main/java/org/opensearch/search/slice/DocValuesSliceQuery.java index 8c009b9f5f672..85d003db5726f 100644 --- a/server/src/main/java/org/opensearch/search/slice/DocValuesSliceQuery.java +++ b/server/src/main/java/org/opensearch/search/slice/DocValuesSliceQuery.java @@ -32,7 +32,6 @@ package org.opensearch.search.slice; -import com.carrotsearch.hppc.BitMixer; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.DocValues; import org.apache.lucene.index.SortedNumericDocValues; @@ -44,6 +43,7 @@ import org.apache.lucene.search.Scorer; import org.apache.lucene.search.TwoPhaseIterator; import org.apache.lucene.search.Weight; +import org.opensearch.common.util.BitMixer; import java.io.IOException; diff --git a/server/src/main/java/org/opensearch/snapshots/RestoreService.java b/server/src/main/java/org/opensearch/snapshots/RestoreService.java index 9914990a73863..fc8a8c1017698 100644 --- a/server/src/main/java/org/opensearch/snapshots/RestoreService.java +++ b/server/src/main/java/org/opensearch/snapshots/RestoreService.java @@ -31,8 +31,6 @@ package org.opensearch.snapshots; -import com.carrotsearch.hppc.IntHashSet; -import com.carrotsearch.hppc.IntSet; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; @@ -510,7 +508,7 @@ public ClusterState execute(ClusterState currentState) { } // Check that the index is closed or doesn't exist IndexMetadata currentIndexMetadata = currentState.metadata().index(renamedIndexName); - IntSet ignoreShards = new IntHashSet(); + Set ignoreShards = new HashSet<>(); final Index renamedIndex; if (currentIndexMetadata == null) { // Index doesn't exist - create it and start recovery @@ -708,7 +706,7 @@ private void checkAliasNameConflicts(Map renamedIndices, Set ignoreShards) { for (SnapshotShardFailure failure : snapshotInfo.shardFailures()) { if (index.equals(failure.index())) { ignoreShards.add(failure.shardId()); diff --git a/server/src/main/java/org/opensearch/tasks/TaskManager.java b/server/src/main/java/org/opensearch/tasks/TaskManager.java index f243cf392bbb8..bcd3004188bb6 100644 --- a/server/src/main/java/org/opensearch/tasks/TaskManager.java +++ b/server/src/main/java/org/opensearch/tasks/TaskManager.java @@ -32,8 +32,6 @@ package org.opensearch.tasks; -import com.carrotsearch.hppc.ObjectIntHashMap; -import com.carrotsearch.hppc.ObjectIntMap; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; @@ -80,7 +78,6 @@ import java.util.concurrent.atomic.AtomicLong; import java.util.function.Consumer; import java.util.stream.Collectors; -import java.util.stream.StreamSupport; import static org.opensearch.common.unit.TimeValue.timeValueMillis; import static org.opensearch.http.HttpTransportSettings.SETTING_HTTP_MAX_HEADER_SIZE; @@ -584,7 +581,7 @@ private static class CancellableTaskHolder { private final CancellableTask task; private boolean finished = false; private List cancellationListeners = null; - private ObjectIntMap childTasksPerNode = null; + private Map childTasksPerNode = null; private boolean banChildren = false; private List childTaskCompletedListeners = null; @@ -666,15 +663,15 @@ synchronized void registerChildNode(DiscoveryNode node) { throw new TaskCancelledException("The parent task was cancelled, shouldn't start any child tasks"); } if (childTasksPerNode == null) { - childTasksPerNode = new ObjectIntHashMap<>(); + childTasksPerNode = new HashMap<>(); } - childTasksPerNode.addTo(node, 1); + childTasksPerNode.merge(node, 1, Integer::sum); } void unregisterChildNode(DiscoveryNode node) { final List listeners; synchronized (this) { - if (childTasksPerNode.addTo(node, -1) == 0) { + if (childTasksPerNode.merge(node, -1, Integer::sum) == 0) { childTasksPerNode.remove(node); } if (childTasksPerNode.isEmpty() && this.childTaskCompletedListeners != null) { @@ -695,9 +692,7 @@ Set startBan(Runnable onChildTasksCompleted) { if (childTasksPerNode == null) { pendingChildNodes = Collections.emptySet(); } else { - pendingChildNodes = StreamSupport.stream(childTasksPerNode.spliterator(), false) - .map(e -> e.key) - .collect(Collectors.toSet()); + pendingChildNodes = Set.copyOf(childTasksPerNode.keySet()); } if (pendingChildNodes.isEmpty()) { assert childTaskCompletedListeners == null; diff --git a/server/src/main/java/org/opensearch/transport/TcpTransport.java b/server/src/main/java/org/opensearch/transport/TcpTransport.java index 0ea383d6d48d7..75676307f4026 100644 --- a/server/src/main/java/org/opensearch/transport/TcpTransport.java +++ b/server/src/main/java/org/opensearch/transport/TcpTransport.java @@ -31,8 +31,6 @@ package org.opensearch.transport; -import com.carrotsearch.hppc.IntHashSet; -import com.carrotsearch.hppc.IntSet; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; @@ -524,12 +522,12 @@ static int resolvePublishPort(ProfileSettings profileSettings, List ports = new HashSet<>(); for (InetSocketAddress boundAddress : boundAddresses) { ports.add(boundAddress.getPort()); } if (ports.size() == 1) { - publishPort = ports.iterator().next().value; + publishPort = ports.iterator().next(); } } diff --git a/server/src/test/java/org/opensearch/action/admin/indices/shards/IndicesShardStoreResponseTests.java b/server/src/test/java/org/opensearch/action/admin/indices/shards/IndicesShardStoreResponseTests.java index d29c9cb30a09c..e1838f7605816 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/shards/IndicesShardStoreResponseTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/shards/IndicesShardStoreResponseTests.java @@ -37,7 +37,6 @@ import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.UUIDs; import org.opensearch.core.common.bytes.BytesReference; -import org.opensearch.common.collect.ImmutableOpenIntMap; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentFactory; @@ -59,10 +58,10 @@ public class IndicesShardStoreResponseTests extends OpenSearchTestCase { public void testBasicSerialization() throws Exception { - final Map>> indexStoreStatuses = new HashMap<>(); + final Map>> indexStoreStatuses = new HashMap<>(); List failures = new ArrayList<>(); - ImmutableOpenIntMap.Builder> storeStatuses = ImmutableOpenIntMap.builder(); + final Map> storeStatuses = new HashMap<>(); DiscoveryNode node1 = new DiscoveryNode("node1", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT); DiscoveryNode node2 = new DiscoveryNode("node2", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT); @@ -88,7 +87,7 @@ public void testBasicSerialization() throws Exception { ); storeStatuses.put(0, storeStatusList); storeStatuses.put(1, storeStatusList); - ImmutableOpenIntMap> storesMap = storeStatuses.build(); + final Map> storesMap = Collections.unmodifiableMap(storeStatuses); indexStoreStatuses.put("test", storesMap); indexStoreStatuses.put("test2", storesMap); diff --git a/server/src/test/java/org/opensearch/action/support/ActiveShardCountTests.java b/server/src/test/java/org/opensearch/action/support/ActiveShardCountTests.java index 302f3c1efc394..991d118e5243b 100644 --- a/server/src/test/java/org/opensearch/action/support/ActiveShardCountTests.java +++ b/server/src/test/java/org/opensearch/action/support/ActiveShardCountTests.java @@ -32,7 +32,6 @@ package org.opensearch.action.support; -import com.carrotsearch.hppc.cursors.ObjectCursor; import org.opensearch.Version; import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.ClusterState; @@ -231,8 +230,8 @@ private ClusterState startPrimaries(final ClusterState clusterState, final Strin RoutingTable routingTable = clusterState.routingTable(); IndexRoutingTable indexRoutingTable = routingTable.index(indexName); IndexRoutingTable.Builder newIndexRoutingTable = IndexRoutingTable.builder(indexRoutingTable.getIndex()); - for (final ObjectCursor shardEntry : indexRoutingTable.getShards().values()) { - final IndexShardRoutingTable shardRoutingTable = shardEntry.value; + for (final var shardEntry : indexRoutingTable.getShards().values()) { + final IndexShardRoutingTable shardRoutingTable = shardEntry; for (ShardRouting shardRouting : shardRoutingTable.getShards()) { if (shardRouting.primary()) { shardRouting = shardRouting.initialize(randomAlphaOfLength(8), null, shardRouting.getExpectedShardSize()) @@ -249,8 +248,8 @@ private ClusterState startLessThanWaitOnShards(final ClusterState clusterState, RoutingTable routingTable = clusterState.routingTable(); IndexRoutingTable indexRoutingTable = routingTable.index(indexName); IndexRoutingTable.Builder newIndexRoutingTable = IndexRoutingTable.builder(indexRoutingTable.getIndex()); - for (final ObjectCursor shardEntry : indexRoutingTable.getShards().values()) { - final IndexShardRoutingTable shardRoutingTable = shardEntry.value; + for (final var shardEntry : indexRoutingTable.getShards().values()) { + final IndexShardRoutingTable shardRoutingTable = shardEntry; assert shardRoutingTable.getSize() > 2; int numToStart = numShardsToStart; // want less than half, and primary is already started @@ -275,8 +274,8 @@ private ClusterState startWaitOnShards(final ClusterState clusterState, final St RoutingTable routingTable = clusterState.routingTable(); IndexRoutingTable indexRoutingTable = routingTable.index(indexName); IndexRoutingTable.Builder newIndexRoutingTable = IndexRoutingTable.builder(indexRoutingTable.getIndex()); - for (final ObjectCursor shardEntry : indexRoutingTable.getShards().values()) { - final IndexShardRoutingTable shardRoutingTable = shardEntry.value; + for (final var shardEntry : indexRoutingTable.getShards().values()) { + final IndexShardRoutingTable shardRoutingTable = shardEntry; assert shardRoutingTable.getSize() > 2; int numToStart = numShardsToStart; for (ShardRouting shardRouting : shardRoutingTable.getShards()) { @@ -304,8 +303,8 @@ private ClusterState startAllShards(final ClusterState clusterState, final Strin RoutingTable routingTable = clusterState.routingTable(); IndexRoutingTable indexRoutingTable = routingTable.index(indexName); IndexRoutingTable.Builder newIndexRoutingTable = IndexRoutingTable.builder(indexRoutingTable.getIndex()); - for (final ObjectCursor shardEntry : indexRoutingTable.getShards().values()) { - final IndexShardRoutingTable shardRoutingTable = shardEntry.value; + for (final var shardEntry : indexRoutingTable.getShards().values()) { + final IndexShardRoutingTable shardRoutingTable = shardEntry; for (ShardRouting shardRouting : shardRoutingTable.getShards()) { if (shardRouting.primary()) { assertTrue(shardRouting.active()); diff --git a/server/src/test/java/org/opensearch/cluster/health/ClusterStateHealthTests.java b/server/src/test/java/org/opensearch/cluster/health/ClusterStateHealthTests.java index fde0ebe4b4e30..0e8af8bb0a9a8 100644 --- a/server/src/test/java/org/opensearch/cluster/health/ClusterStateHealthTests.java +++ b/server/src/test/java/org/opensearch/cluster/health/ClusterStateHealthTests.java @@ -31,8 +31,6 @@ package org.opensearch.cluster.health; -import com.carrotsearch.hppc.cursors.IntObjectCursor; -import com.carrotsearch.hppc.cursors.ObjectCursor; import org.opensearch.Version; import org.opensearch.action.admin.cluster.health.ClusterHealthRequest; import org.opensearch.action.admin.cluster.health.ClusterHealthResponse; @@ -56,7 +54,6 @@ import org.opensearch.cluster.routing.allocation.AllocationService; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.UUIDs; -import org.opensearch.common.collect.ImmutableOpenIntMap; import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.common.settings.Settings; @@ -76,8 +73,10 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Collections; +import java.util.HashMap; import java.util.HashSet; import java.util.List; +import java.util.Map; import java.util.Set; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutionException; @@ -416,8 +415,8 @@ private List generateClusterStates( RoutingTable routingTable = originalClusterState.routingTable(); IndexRoutingTable indexRoutingTable = routingTable.index(indexName); IndexRoutingTable.Builder newIndexRoutingTable = IndexRoutingTable.builder(indexRoutingTable.getIndex()); - for (final ObjectCursor shardEntry : indexRoutingTable.getShards().values()) { - final IndexShardRoutingTable shardRoutingTable = shardEntry.value; + for (final var shardEntry : indexRoutingTable.getShards().values()) { + final IndexShardRoutingTable shardRoutingTable = shardEntry; for (final ShardRouting shardRouting : shardRoutingTable.getShards()) { if (shardRouting.primary()) { newIndexRoutingTable.addShard(shardRouting.initialize(randomFrom(nodeIds), null, shardRouting.getExpectedShardSize())); @@ -433,13 +432,13 @@ private List generateClusterStates( // some primaries started indexRoutingTable = routingTable.index(indexName); newIndexRoutingTable = IndexRoutingTable.builder(indexRoutingTable.getIndex()); - ImmutableOpenIntMap.Builder> allocationIds = ImmutableOpenIntMap.>builder(); - for (final ObjectCursor shardEntry : indexRoutingTable.getShards().values()) { - final IndexShardRoutingTable shardRoutingTable = shardEntry.value; + Map> allocationIds = new HashMap<>(); + for (var shardEntry : indexRoutingTable.getShards().values()) { + final IndexShardRoutingTable shardRoutingTable = shardEntry; for (final ShardRouting shardRouting : shardRoutingTable.getShards()) { if (shardRouting.primary() && randomBoolean()) { final ShardRouting newShardRouting = shardRouting.moveToStarted(); - allocationIds.fPut(newShardRouting.getId(), Sets.newHashSet(newShardRouting.allocationId().getId())); + allocationIds.put(newShardRouting.getId(), Set.of(newShardRouting.allocationId().getId())); newIndexRoutingTable.addShard(newShardRouting); } else { newIndexRoutingTable.addShard(shardRouting); @@ -447,10 +446,8 @@ private List generateClusterStates( } } routingTable = RoutingTable.builder(routingTable).add(newIndexRoutingTable).build(); - IndexMetadata.Builder idxMetaBuilder = IndexMetadata.builder(clusterState.metadata().index(indexName)); - for (final IntObjectCursor> entry : allocationIds.build()) { - idxMetaBuilder.putInSyncAllocationIds(entry.key, entry.value); - } + final IndexMetadata.Builder idxMetaBuilder = IndexMetadata.builder(clusterState.metadata().index(indexName)); + allocationIds.forEach(idxMetaBuilder::putInSyncAllocationIds); Metadata.Builder metadataBuilder = Metadata.builder(clusterState.metadata()).put(idxMetaBuilder); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).metadata(metadataBuilder).build(); clusterStates.add(clusterState); @@ -460,8 +457,8 @@ private List generateClusterStates( // some primaries failed to allocate indexRoutingTable = routingTable.index(indexName); newIndexRoutingTable = IndexRoutingTable.builder(indexRoutingTable.getIndex()); - for (final ObjectCursor shardEntry : indexRoutingTable.getShards().values()) { - final IndexShardRoutingTable shardRoutingTable = shardEntry.value; + for (final var shardEntry : indexRoutingTable.getShards().values()) { + final IndexShardRoutingTable shardRoutingTable = shardEntry; for (final ShardRouting shardRouting : shardRoutingTable.getShards()) { if (shardRouting.primary() && (shardRouting.started() == false || alreadyFailedPrimary == false)) { newIndexRoutingTable.addShard( @@ -481,13 +478,13 @@ private List generateClusterStates( // all primaries started indexRoutingTable = routingTable.index(indexName); newIndexRoutingTable = IndexRoutingTable.builder(indexRoutingTable.getIndex()); - allocationIds = ImmutableOpenIntMap.>builder(); - for (final ObjectCursor shardEntry : indexRoutingTable.getShards().values()) { - final IndexShardRoutingTable shardRoutingTable = shardEntry.value; + allocationIds = new HashMap<>(); + for (final var shardEntry : indexRoutingTable.getShards().values()) { + final IndexShardRoutingTable shardRoutingTable = shardEntry; for (final ShardRouting shardRouting : shardRoutingTable.getShards()) { if (shardRouting.primary() && shardRouting.started() == false) { final ShardRouting newShardRouting = shardRouting.moveToStarted(); - allocationIds.fPut(newShardRouting.getId(), Sets.newHashSet(newShardRouting.allocationId().getId())); + allocationIds.put(newShardRouting.getId(), Set.of(newShardRouting.allocationId().getId())); newIndexRoutingTable.addShard(newShardRouting); } else { newIndexRoutingTable.addShard(shardRouting); @@ -495,19 +492,17 @@ private List generateClusterStates( } } routingTable = RoutingTable.builder(routingTable).add(newIndexRoutingTable).build(); - idxMetaBuilder = IndexMetadata.builder(clusterState.metadata().index(indexName)); - for (final IntObjectCursor> entry : allocationIds.build()) { - idxMetaBuilder.putInSyncAllocationIds(entry.key, entry.value); - } - metadataBuilder = Metadata.builder(clusterState.metadata()).put(idxMetaBuilder); + final IndexMetadata.Builder idxMetaBuilder2 = IndexMetadata.builder(clusterState.metadata().index(indexName)); + allocationIds.forEach(idxMetaBuilder2::putInSyncAllocationIds); + metadataBuilder = Metadata.builder(clusterState.metadata()).put(idxMetaBuilder2); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).metadata(metadataBuilder).build(); clusterStates.add(clusterState); // initialize replicas indexRoutingTable = routingTable.index(indexName); newIndexRoutingTable = IndexRoutingTable.builder(indexRoutingTable.getIndex()); - for (final ObjectCursor shardEntry : indexRoutingTable.getShards().values()) { - final IndexShardRoutingTable shardRoutingTable = shardEntry.value; + for (final var shardEntry : indexRoutingTable.getShards().values()) { + final IndexShardRoutingTable shardRoutingTable = shardEntry; final String primaryNodeId = shardRoutingTable.primaryShard().currentNodeId(); Set allocatedNodes = new HashSet<>(); allocatedNodes.add(primaryNodeId); @@ -528,8 +523,8 @@ private List generateClusterStates( // some replicas started indexRoutingTable = routingTable.index(indexName); newIndexRoutingTable = IndexRoutingTable.builder(indexRoutingTable.getIndex()); - for (final ObjectCursor shardEntry : indexRoutingTable.getShards().values()) { - final IndexShardRoutingTable shardRoutingTable = shardEntry.value; + for (final var shardEntry : indexRoutingTable.getShards().values()) { + final IndexShardRoutingTable shardRoutingTable = shardEntry; for (final ShardRouting shardRouting : shardRoutingTable.getShards()) { if (shardRouting.primary() == false && randomBoolean()) { newIndexRoutingTable.addShard(shardRouting.moveToStarted()); @@ -545,8 +540,8 @@ private List generateClusterStates( boolean replicaStateChanged = false; indexRoutingTable = routingTable.index(indexName); newIndexRoutingTable = IndexRoutingTable.builder(indexRoutingTable.getIndex()); - for (final ObjectCursor shardEntry : indexRoutingTable.getShards().values()) { - final IndexShardRoutingTable shardRoutingTable = shardEntry.value; + for (final var shardEntry : indexRoutingTable.getShards().values()) { + final IndexShardRoutingTable shardRoutingTable = shardEntry; for (final ShardRouting shardRouting : shardRoutingTable.getShards()) { if (shardRouting.primary() == false && shardRouting.started() == false) { newIndexRoutingTable.addShard(shardRouting.moveToStarted()); @@ -568,10 +563,10 @@ private List generateClusterStates( // returns true if the inactive primaries in the index are only due to cluster recovery // (not because of allocation of existing shard or previously having allocation ids assigned) private boolean primaryInactiveDueToRecovery(final String indexName, final ClusterState clusterState) { - for (final IntObjectCursor shardRouting : clusterState.routingTable().index(indexName).shards()) { - final ShardRouting primaryShard = shardRouting.value.primaryShard(); + for (final var shardRouting : clusterState.routingTable().index(indexName).shards().entrySet()) { + final ShardRouting primaryShard = shardRouting.getValue().primaryShard(); if (primaryShard.active() == false) { - if (clusterState.metadata().index(indexName).inSyncAllocationIds(shardRouting.key).isEmpty() == false) { + if (clusterState.metadata().index(indexName).inSyncAllocationIds(shardRouting.getKey()).isEmpty() == false) { return false; } if (primaryShard.recoverySource() != null diff --git a/server/src/test/java/org/opensearch/cluster/routing/UnassignedInfoTests.java b/server/src/test/java/org/opensearch/cluster/routing/UnassignedInfoTests.java index 28bf99deec6f4..741bf8e34c0e3 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/UnassignedInfoTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/UnassignedInfoTests.java @@ -32,7 +32,6 @@ package org.opensearch.cluster.routing; -import com.carrotsearch.hppc.IntHashSet; import com.carrotsearch.randomizedtesting.generators.RandomPicks; import org.opensearch.Version; import org.opensearch.cluster.ClusterName; @@ -58,6 +57,7 @@ import java.io.IOException; import java.nio.ByteBuffer; import java.util.Collections; +import java.util.HashSet; import java.util.Set; import java.util.stream.Collectors; import java.util.stream.IntStream; @@ -202,7 +202,7 @@ public void testNewIndexRestored() { Version.CURRENT, new IndexId("test", UUIDs.randomBase64UUID(random())) ), - new IntHashSet() + new HashSet<>() ) .build() ) diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/RemoteShardsAllocateUnassignedTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/RemoteShardsAllocateUnassignedTests.java index 7c45b20ecee1f..ed178ed7e1526 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/RemoteShardsAllocateUnassignedTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/RemoteShardsAllocateUnassignedTests.java @@ -8,7 +8,6 @@ package org.opensearch.cluster.routing.allocation; -import com.carrotsearch.hppc.ObjectIntHashMap; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.routing.RoutingNode; import org.opensearch.cluster.routing.RoutingNodes; @@ -16,6 +15,7 @@ import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.routing.allocation.allocator.RemoteShardsBalancer; +import java.util.HashMap; import java.util.Map; public class RemoteShardsAllocateUnassignedTests extends RemoteShardsBalancerBaseTestCase { @@ -68,7 +68,7 @@ public void testPrimaryAllocation() { assertEquals(0, routingNodes.unassigned().size()); - ObjectIntHashMap nodePrimariesCounter = new ObjectIntHashMap<>(); + final Map nodePrimariesCounter = new HashMap<>(); for (ShardRouting shard : clusterState.getRoutingTable().allShards()) { assertFalse(shard.unassigned()); RoutingNode node = routingNodes.node(shard.currentNodeId()); @@ -80,11 +80,11 @@ public void testPrimaryAllocation() { if (RoutingPool.getNodePool(node) == RoutingPool.REMOTE_CAPABLE && RoutingPool.REMOTE_CAPABLE.equals(RoutingPool.getShardPool(shard, allocation)) && shard.primary()) { - nodePrimariesCounter.putOrAdd(node.nodeId(), 1, 1); + nodePrimariesCounter.compute(node.nodeId(), (k, v) -> (v == null) ? 1 : v + 1); } } final int indexShardLimit = (int) Math.ceil(totalPrimaries(remoteIndices) / (float) remoteCapableNodes); - for (int primaries : nodePrimariesCounter.values) { + for (int primaries : nodePrimariesCounter.values()) { assertTrue(primaries <= indexShardLimit); } } diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/RemoteShardsRebalanceShardsTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/RemoteShardsRebalanceShardsTests.java index c4358aaf12ac2..ef9ae90e18bb5 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/RemoteShardsRebalanceShardsTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/RemoteShardsRebalanceShardsTests.java @@ -8,13 +8,15 @@ package org.opensearch.cluster.routing.allocation; -import com.carrotsearch.hppc.ObjectIntHashMap; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.routing.RoutingNode; import org.opensearch.cluster.routing.RoutingNodes; import org.opensearch.cluster.routing.RoutingPool; import org.opensearch.cluster.routing.ShardRouting; +import java.util.HashMap; +import java.util.Map; + public class RemoteShardsRebalanceShardsTests extends RemoteShardsBalancerBaseTestCase { /** @@ -33,8 +35,8 @@ public void testShardAllocationAndRebalance() { RoutingNodes routingNodes = clusterState.getRoutingNodes(); RoutingAllocation allocation = getRoutingAllocation(clusterState, routingNodes); - ObjectIntHashMap nodePrimariesCounter = getShardCounterPerNodeForRemoteCapablePool(clusterState, allocation, true); - ObjectIntHashMap nodeReplicaCounter = getShardCounterPerNodeForRemoteCapablePool(clusterState, allocation, false); + final Map nodePrimariesCounter = getShardCounterPerNodeForRemoteCapablePool(clusterState, allocation, true); + final Map nodeReplicaCounter = getShardCounterPerNodeForRemoteCapablePool(clusterState, allocation, false); int avgPrimariesPerNode = getTotalShardCountAcrossNodes(nodePrimariesCounter) / remoteCapableNodes; // Primary and replica are balanced post first reroute @@ -46,23 +48,23 @@ public void testShardAllocationAndRebalance() { } } - private ObjectIntHashMap getShardCounterPerNodeForRemoteCapablePool( + private Map getShardCounterPerNodeForRemoteCapablePool( ClusterState clusterState, RoutingAllocation allocation, boolean primary ) { - ObjectIntHashMap nodePrimariesCounter = new ObjectIntHashMap<>(); + final Map nodePrimariesCounter = new HashMap<>(); for (ShardRouting shard : clusterState.getRoutingTable().allShards()) { if (RoutingPool.REMOTE_CAPABLE.equals(RoutingPool.getShardPool(shard, allocation)) && shard.primary() == primary) { - nodePrimariesCounter.putOrAdd(shard.currentNodeId(), 1, 1); + nodePrimariesCounter.compute(shard.currentNodeId(), (k, v) -> (v == null) ? 1 : v + 1); } } return nodePrimariesCounter; } - private int getTotalShardCountAcrossNodes(ObjectIntHashMap nodePrimariesCounter) { + private int getTotalShardCountAcrossNodes(final Map nodePrimariesCounter) { int totalShardCount = 0; - for (int value : nodePrimariesCounter.values) { + for (int value : nodePrimariesCounter.values()) { totalShardCount += value; } return totalShardCount; diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/ThrottlingAllocationTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/ThrottlingAllocationTests.java index e4bbe3c71d918..1ccadcade26bb 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/ThrottlingAllocationTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/ThrottlingAllocationTests.java @@ -32,7 +32,6 @@ package org.opensearch.cluster.routing.allocation; -import com.carrotsearch.hppc.IntHashSet; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.opensearch.Version; @@ -804,7 +803,7 @@ private ClusterState createRecoveryStateAndInitializeAllocations( Version.CURRENT, new IndexId(indexMetadata.getIndex().getName(), UUIDs.randomBase64UUID(random())) ), - new IntHashSet() + new HashSet<>() ); break; case 4: diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java index 6069414b27690..99bc6476aa228 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java @@ -32,7 +32,6 @@ package org.opensearch.cluster.routing.allocation.decider; -import com.carrotsearch.hppc.IntHashSet; import org.opensearch.Version; import org.opensearch.cluster.ClusterInfo; import org.opensearch.cluster.ClusterInfoService; @@ -1339,7 +1338,7 @@ public void testDiskThresholdWithSnapshotShardSizes() { .addAsNewRestore( metadata.index("test"), new RecoverySource.SnapshotRecoverySource("_restore_uuid", snapshot, Version.CURRENT, indexId), - new IntHashSet() + new HashSet<>() ) .build(); diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/RestoreInProgressAllocationDeciderTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/RestoreInProgressAllocationDeciderTests.java index 8cca157210c37..7e2f258cd0021 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/RestoreInProgressAllocationDeciderTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/RestoreInProgressAllocationDeciderTests.java @@ -31,7 +31,6 @@ package org.opensearch.cluster.routing.allocation.decider; -import com.carrotsearch.hppc.cursors.ObjectCursor; import org.opensearch.Version; import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.ClusterState; @@ -145,8 +144,8 @@ public void testCanAllocatePrimaryExistingInRestoreInProgress() { IndexRoutingTable indexRoutingTable = routingTable.index("test"); IndexRoutingTable.Builder newIndexRoutingTable = IndexRoutingTable.builder(indexRoutingTable.getIndex()); - for (final ObjectCursor shardEntry : indexRoutingTable.getShards().values()) { - final IndexShardRoutingTable shardRoutingTable = shardEntry.value; + for (final var shardEntry : indexRoutingTable.getShards().values()) { + final IndexShardRoutingTable shardRoutingTable = shardEntry; for (ShardRouting shardRouting : shardRoutingTable.getShards()) { if (shardRouting.primary()) { newIndexRoutingTable.addShard(primary); diff --git a/server/src/test/java/org/opensearch/common/FieldMemoryStatsTests.java b/server/src/test/java/org/opensearch/common/FieldMemoryStatsTests.java index 2d895f0a10412..e2c155dd47d80 100644 --- a/server/src/test/java/org/opensearch/common/FieldMemoryStatsTests.java +++ b/server/src/test/java/org/opensearch/common/FieldMemoryStatsTests.java @@ -31,12 +31,13 @@ package org.opensearch.common; -import com.carrotsearch.hppc.ObjectLongHashMap; import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; +import java.util.HashMap; +import java.util.Map; public class FieldMemoryStatsTests extends OpenSearchTestCase { @@ -54,21 +55,21 @@ public void testHashCodeEquals() { FieldMemoryStats stats = randomFieldMemoryStats(); assertEquals(stats, stats); assertEquals(stats.hashCode(), stats.hashCode()); - ObjectLongHashMap map1 = new ObjectLongHashMap<>(); - map1.put("bar", 1); + final Map map1 = new HashMap<>(); + map1.put("bar", 1L); FieldMemoryStats stats1 = new FieldMemoryStats(map1); - ObjectLongHashMap map2 = new ObjectLongHashMap<>(); - map2.put("foo", 2); + final Map map2 = new HashMap<>(); + map2.put("foo", 2L); FieldMemoryStats stats2 = new FieldMemoryStats(map2); - ObjectLongHashMap map3 = new ObjectLongHashMap<>(); - map3.put("foo", 2); - map3.put("bar", 1); + final Map map3 = new HashMap<>(); + map3.put("foo", 2L); + map3.put("bar", 1L); FieldMemoryStats stats3 = new FieldMemoryStats(map3); - ObjectLongHashMap map4 = new ObjectLongHashMap<>(); - map4.put("foo", 2); - map4.put("bar", 1); + final Map map4 = new HashMap<>(); + map4.put("foo", 2L); + map4.put("bar", 1L); FieldMemoryStats stats4 = new FieldMemoryStats(map4); assertNotEquals(stats1, stats2); @@ -83,21 +84,21 @@ public void testHashCodeEquals() { } public void testAdd() { - ObjectLongHashMap map1 = new ObjectLongHashMap<>(); - map1.put("bar", 1); + final Map map1 = new HashMap<>(); + map1.put("bar", 1L); FieldMemoryStats stats1 = new FieldMemoryStats(map1); - ObjectLongHashMap map2 = new ObjectLongHashMap<>(); - map2.put("foo", 2); + final Map map2 = new HashMap<>(); + map2.put("foo", 2L); FieldMemoryStats stats2 = new FieldMemoryStats(map2); - ObjectLongHashMap map3 = new ObjectLongHashMap<>(); - map3.put("bar", 1); + final Map map3 = new HashMap<>(); + map3.put("bar", 1L); FieldMemoryStats stats3 = new FieldMemoryStats(map3); stats3.add(stats1); - ObjectLongHashMap map4 = new ObjectLongHashMap<>(); - map4.put("foo", 2); - map4.put("bar", 2); + final Map map4 = new HashMap<>(); + map4.put("foo", 2L); + map4.put("bar", 2L); FieldMemoryStats stats4 = new FieldMemoryStats(map4); assertNotEquals(stats3, stats4); stats3.add(stats2); @@ -105,7 +106,7 @@ public void testAdd() { } public static FieldMemoryStats randomFieldMemoryStats() { - ObjectLongHashMap map = new ObjectLongHashMap<>(); + final Map map = new HashMap<>(); int keys = randomIntBetween(1, 1000); for (int i = 0; i < keys; i++) { map.put(randomRealisticUnicodeOfCodepointLengthBetween(1, 10), randomNonNegativeLong()); diff --git a/server/src/test/java/org/opensearch/common/hppc/HppcMapsTests.java b/server/src/test/java/org/opensearch/common/hppc/HppcMapsTests.java deleted file mode 100644 index 42450bb69ce75..0000000000000 --- a/server/src/test/java/org/opensearch/common/hppc/HppcMapsTests.java +++ /dev/null @@ -1,111 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.common.hppc; - -import com.carrotsearch.hppc.ObjectHashSet; -import org.opensearch.core.Assertions; -import org.opensearch.common.collect.HppcMaps; -import org.opensearch.test.OpenSearchTestCase; - -import java.util.ArrayList; -import java.util.List; - -import static org.hamcrest.Matchers.equalTo; - -public class HppcMapsTests extends OpenSearchTestCase { - public void testIntersection() throws Exception { - assumeTrue("assertions enabled", Assertions.ENABLED); - ObjectHashSet set1 = ObjectHashSet.from("1", "2", "3"); - ObjectHashSet set2 = ObjectHashSet.from("1", "2", "3"); - List values = toList(HppcMaps.intersection(set1, set2)); - assertThat(values.size(), equalTo(3)); - assertThat(values.contains("1"), equalTo(true)); - assertThat(values.contains("2"), equalTo(true)); - assertThat(values.contains("3"), equalTo(true)); - - set1 = ObjectHashSet.from("1", "2", "3"); - set2 = ObjectHashSet.from("3", "4", "5"); - values = toList(HppcMaps.intersection(set1, set2)); - assertThat(values.size(), equalTo(1)); - assertThat(values.get(0), equalTo("3")); - - set1 = ObjectHashSet.from("1", "2", "3"); - set2 = ObjectHashSet.from("4", "5", "6"); - values = toList(HppcMaps.intersection(set1, set2)); - assertThat(values.size(), equalTo(0)); - - set1 = ObjectHashSet.from(); - set2 = ObjectHashSet.from("3", "4", "5"); - values = toList(HppcMaps.intersection(set1, set2)); - assertThat(values.size(), equalTo(0)); - - set1 = ObjectHashSet.from("1", "2", "3"); - set2 = ObjectHashSet.from(); - values = toList(HppcMaps.intersection(set1, set2)); - assertThat(values.size(), equalTo(0)); - - set1 = ObjectHashSet.from(); - set2 = ObjectHashSet.from(); - values = toList(HppcMaps.intersection(set1, set2)); - assertThat(values.size(), equalTo(0)); - - set1 = null; - set2 = ObjectHashSet.from(); - try { - toList(HppcMaps.intersection(set1, set2)); - fail(); - } catch (AssertionError e) {} - - set1 = ObjectHashSet.from(); - set2 = null; - try { - toList(HppcMaps.intersection(set1, set2)); - fail(); - } catch (AssertionError e) {} - - set1 = null; - set2 = null; - try { - toList(HppcMaps.intersection(set1, set2)); - fail(); - } catch (AssertionError e) {} - } - - private List toList(Iterable iterable) { - List list = new ArrayList<>(); - for (String s : iterable) { - list.add(s); - } - return list; - } - -} diff --git a/server/src/test/java/org/opensearch/common/util/BytesRefHashTests.java b/server/src/test/java/org/opensearch/common/util/BytesRefHashTests.java index 1859a7d4a8f9d..8b719283ed71d 100644 --- a/server/src/test/java/org/opensearch/common/util/BytesRefHashTests.java +++ b/server/src/test/java/org/opensearch/common/util/BytesRefHashTests.java @@ -32,9 +32,6 @@ package org.opensearch.common.util; -import com.carrotsearch.hppc.ObjectLongHashMap; -import com.carrotsearch.hppc.ObjectLongMap; -import com.carrotsearch.hppc.cursors.ObjectLongCursor; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.tests.util.TestUtil; @@ -77,7 +74,7 @@ public void testDuel() { for (int i = 0; i < values.length; ++i) { values[i] = new BytesRef(randomAlphaOfLength(5)); } - final ObjectLongMap valueToId = new ObjectLongHashMap<>(); + final Map valueToId = new HashMap<>(); final BytesRef[] idToValue = new BytesRef[values.length]; final int iters = randomInt(1000000); for (int i = 0; i < iters; ++i) { @@ -92,8 +89,8 @@ public void testDuel() { } assertEquals(valueToId.size(), hash.size()); - for (final ObjectLongCursor next : valueToId) { - assertEquals(next.value, hash.find(next.key, next.key.hashCode())); + for (final var next : valueToId.entrySet()) { + assertEquals(next.getValue().longValue(), hash.find(next.getKey(), next.getKey().hashCode())); } for (long i = 0; i < hash.capacity(); ++i) { diff --git a/server/src/test/java/org/opensearch/common/util/LongHashTests.java b/server/src/test/java/org/opensearch/common/util/LongHashTests.java index aabc3c295e6fa..295497b9f188c 100644 --- a/server/src/test/java/org/opensearch/common/util/LongHashTests.java +++ b/server/src/test/java/org/opensearch/common/util/LongHashTests.java @@ -32,17 +32,12 @@ package org.opensearch.common.util; -import com.carrotsearch.hppc.LongLongHashMap; -import com.carrotsearch.hppc.LongLongMap; -import com.carrotsearch.hppc.cursors.LongLongCursor; - import org.opensearch.common.settings.Settings; import org.opensearch.indices.breaker.NoneCircuitBreakerService; import org.opensearch.test.OpenSearchTestCase; import java.util.HashMap; import java.util.HashSet; -import java.util.Iterator; import java.util.Map; import java.util.Set; @@ -74,7 +69,7 @@ public void testDuell() { for (int i = 0; i < values.length; ++i) { values[i] = randomLong(); } - final LongLongMap valueToId = new LongLongHashMap(); + final Map valueToId = new HashMap<>(); final long[] idToValue = new long[values.length]; final int iters = randomInt(1000000); for (int i = 0; i < iters; ++i) { @@ -89,9 +84,8 @@ public void testDuell() { } assertEquals(valueToId.size(), hash.size()); - for (Iterator iterator = valueToId.iterator(); iterator.hasNext();) { - final LongLongCursor next = iterator.next(); - assertEquals(next.value, hash.find(next.key)); + for (var iterator : valueToId.entrySet()) { + assertEquals(iterator.getValue().longValue(), hash.find(iterator.getKey())); } for (long i = 0; i < hash.capacity(); ++i) { diff --git a/server/src/test/java/org/opensearch/common/util/LongObjectHashMapTests.java b/server/src/test/java/org/opensearch/common/util/LongObjectHashMapTests.java index f99f1f8a9a1d5..73a6fc5d54883 100644 --- a/server/src/test/java/org/opensearch/common/util/LongObjectHashMapTests.java +++ b/server/src/test/java/org/opensearch/common/util/LongObjectHashMapTests.java @@ -32,11 +32,13 @@ package org.opensearch.common.util; -import com.carrotsearch.hppc.LongObjectHashMap; import org.opensearch.common.settings.Settings; import org.opensearch.indices.breaker.NoneCircuitBreakerService; import org.opensearch.test.OpenSearchTestCase; +import java.util.HashMap; +import java.util.Map; + public class LongObjectHashMapTests extends OpenSearchTestCase { private BigArrays randomBigArrays() { @@ -44,7 +46,7 @@ private BigArrays randomBigArrays() { } public void testDuel() { - final LongObjectHashMap map1 = new LongObjectHashMap<>(); + final Map map1 = new HashMap<>(); final LongObjectPagedHashMap map2 = new LongObjectPagedHashMap<>( randomInt(42), 0.6f + randomFloat() * 0.39f, @@ -66,10 +68,10 @@ public void testDuel() { assertEquals(map1.size(), map2.size()); } } - for (int i = 0; i <= maxKey; ++i) { + for (long i = 0; i <= maxKey; ++i) { assertSame(map1.get(i), map2.get(i)); } - final LongObjectHashMap copy = new LongObjectHashMap<>(); + final Map copy = new HashMap<>(); for (LongObjectPagedHashMap.Cursor cursor : map2) { copy.put(cursor.key, cursor.value); } diff --git a/server/src/test/java/org/opensearch/index/engine/CombinedDeletionPolicyTests.java b/server/src/test/java/org/opensearch/index/engine/CombinedDeletionPolicyTests.java index dac6fa90bd917..dd469644709ec 100644 --- a/server/src/test/java/org/opensearch/index/engine/CombinedDeletionPolicyTests.java +++ b/server/src/test/java/org/opensearch/index/engine/CombinedDeletionPolicyTests.java @@ -32,7 +32,6 @@ package org.opensearch.index.engine; -import com.carrotsearch.hppc.LongArrayList; import org.apache.lucene.index.IndexCommit; import org.apache.lucene.store.Directory; import org.opensearch.index.seqno.RetentionLeases; @@ -74,7 +73,7 @@ public void testKeepCommitsAfterGlobalCheckpoint() throws Exception { TranslogDeletionPolicy translogPolicy = createTranslogDeletionPolicy(); CombinedDeletionPolicy indexPolicy = newCombinedDeletionPolicy(translogPolicy, softDeletesPolicy, globalCheckpoint); - final LongArrayList maxSeqNoList = new LongArrayList(); + final List maxSeqNoList = new ArrayList<>(); final List commitList = new ArrayList<>(); int totalCommits = between(2, 20); long lastMaxSeqNo = 0; diff --git a/server/src/test/java/org/opensearch/index/seqno/LocalCheckpointTrackerTests.java b/server/src/test/java/org/opensearch/index/seqno/LocalCheckpointTrackerTests.java index 5e6d51527e264..88c5e2b49b900 100644 --- a/server/src/test/java/org/opensearch/index/seqno/LocalCheckpointTrackerTests.java +++ b/server/src/test/java/org/opensearch/index/seqno/LocalCheckpointTrackerTests.java @@ -172,7 +172,7 @@ public void testSimpleOverFlow() { assertThat(tracker.processedCheckpoint.get(), equalTo(maxOps - 1L)); assertThat(tracker.processedSeqNo.size(), equalTo(aligned ? 0 : 1)); if (aligned == false) { - assertThat(tracker.processedSeqNo.keys().iterator().next().value, equalTo(tracker.processedCheckpoint.get() / BIT_SET_SIZE)); + assertThat(tracker.processedSeqNo.keySet().iterator().next(), equalTo(tracker.processedCheckpoint.get() / BIT_SET_SIZE)); } assertThat(tracker.hasProcessed(randomFrom(seqNoList)), equalTo(true)); final long notCompletedSeqNo = randomValueOtherThanMany(seqNoList::contains, OpenSearchTestCase::randomNonNegativeLong); @@ -218,7 +218,7 @@ protected void doRun() throws Exception { assertThat(tracker.getProcessedCheckpoint(), equalTo(maxOps - 1L)); assertThat(tracker.processedSeqNo.size(), is(oneOf(0, 1))); if (tracker.processedSeqNo.size() == 1) { - assertThat(tracker.processedSeqNo.keys().iterator().next().value, equalTo(tracker.processedCheckpoint.get() / BIT_SET_SIZE)); + assertThat(tracker.processedSeqNo.keySet().iterator().next(), equalTo(tracker.processedCheckpoint.get() / BIT_SET_SIZE)); } } @@ -272,7 +272,7 @@ protected void doRun() throws Exception { assertThat(tracker.hasProcessed(randomLongBetween(maxOps, Long.MAX_VALUE)), equalTo(false)); assertThat(tracker.processedSeqNo.size(), is(oneOf(0, 1))); if (tracker.processedSeqNo.size() == 1) { - assertThat(tracker.processedSeqNo.keys().iterator().next().value, equalTo(tracker.processedCheckpoint.get() / BIT_SET_SIZE)); + assertThat(tracker.processedSeqNo.keySet().iterator().next(), equalTo(tracker.processedCheckpoint.get() / BIT_SET_SIZE)); } } diff --git a/server/src/test/java/org/opensearch/index/translog/MultiSnapshotTests.java b/server/src/test/java/org/opensearch/index/translog/MultiSnapshotTests.java index 783c42f1749c0..b5bef0c028665 100644 --- a/server/src/test/java/org/opensearch/index/translog/MultiSnapshotTests.java +++ b/server/src/test/java/org/opensearch/index/translog/MultiSnapshotTests.java @@ -32,12 +32,12 @@ package org.opensearch.index.translog; -import com.carrotsearch.hppc.LongHashSet; -import com.carrotsearch.hppc.LongSet; import org.opensearch.common.Randomness; import org.opensearch.test.OpenSearchTestCase; +import java.util.HashSet; import java.util.List; +import java.util.Set; import java.util.stream.Collectors; import java.util.stream.IntStream; import java.util.stream.LongStream; @@ -60,7 +60,7 @@ public void testTrackSeqNoSimpleRange() throws Exception { public void testTrackSeqNoDenseRanges() throws Exception { final MultiSnapshot.SeqNoSet bitSet = new MultiSnapshot.SeqNoSet(); - final LongSet normalSet = new LongHashSet(); + final Set normalSet = new HashSet<>(); IntStream.range(0, scaledRandomIntBetween(5_000, 10_000)).forEach(i -> { long seq = between(0, 5000); boolean existed = normalSet.add(seq) == false; @@ -70,7 +70,7 @@ public void testTrackSeqNoDenseRanges() throws Exception { public void testTrackSeqNoSparseRanges() throws Exception { final MultiSnapshot.SeqNoSet bitSet = new MultiSnapshot.SeqNoSet(); - final LongSet normalSet = new LongHashSet(); + final Set normalSet = new HashSet<>(); IntStream.range(0, scaledRandomIntBetween(5_000, 10_000)).forEach(i -> { long seq = between(i * 10_000, i * 30_000); boolean existed = normalSet.add(seq) == false; @@ -80,7 +80,7 @@ public void testTrackSeqNoSparseRanges() throws Exception { public void testTrackSeqNoMimicTranslogRanges() throws Exception { final MultiSnapshot.SeqNoSet bitSet = new MultiSnapshot.SeqNoSet(); - final LongSet normalSet = new LongHashSet(); + final Set normalSet = new HashSet<>(); long currentSeq = between(10_000_000, 1_000_000_000); final int iterations = scaledRandomIntBetween(100, 2000); for (long i = 0; i < iterations; i++) { diff --git a/server/src/test/java/org/opensearch/index/translog/SnapshotMatchers.java b/server/src/test/java/org/opensearch/index/translog/SnapshotMatchers.java index 0277e420f74ed..c9de270cf7568 100644 --- a/server/src/test/java/org/opensearch/index/translog/SnapshotMatchers.java +++ b/server/src/test/java/org/opensearch/index/translog/SnapshotMatchers.java @@ -32,8 +32,6 @@ package org.opensearch.index.translog; -import com.carrotsearch.hppc.LongHashSet; -import com.carrotsearch.hppc.LongSet; import org.opensearch.OpenSearchException; import org.hamcrest.Description; import org.hamcrest.Matcher; @@ -42,7 +40,9 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Collection; +import java.util.HashSet; import java.util.List; +import java.util.Set; import java.util.stream.Collectors; public final class SnapshotMatchers { @@ -217,7 +217,7 @@ static class ContainingSeqNoRangeMatcher extends TypeSafeMatcher seqNoList = new HashSet<>(); Translog.Operation op; while ((op = snapshot.next()) != null) { seqNoList.add(op.seqNo()); diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/range/BinaryRangeAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/range/BinaryRangeAggregatorTests.java index 9307a70d57ca2..d9a83549f8a97 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/range/BinaryRangeAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/range/BinaryRangeAggregatorTests.java @@ -45,8 +45,6 @@ import org.opensearch.search.aggregations.bucket.range.BinaryRangeAggregator.SortedSetRangeLeafCollector; import org.opensearch.test.OpenSearchTestCase; -import com.carrotsearch.hppc.LongHashSet; - public class BinaryRangeAggregatorTests extends OpenSearchTestCase { private static class FakeSortedSetDocValues extends AbstractSortedSetDocValues { @@ -121,12 +119,12 @@ protected void doCollect(LeafBucketCollector sub, int doc, long bucket) throws I final int[] expectedCounts = new int[ranges.length]; final int maxDoc = randomIntBetween(5, 10); for (int doc = 0; doc < maxDoc; ++doc) { - LongHashSet ordinalSet = new LongHashSet(); + Set ordinalSet = new HashSet<>(); final int numValues = randomInt(maxNumValuesPerDoc); while (ordinalSet.size() < numValues) { - ordinalSet.add(random().nextInt(terms.length)); + ordinalSet.add(TestUtil.nextLong(random(), 0, terms.length - 1)); } - final long[] ords = ordinalSet.toArray(); + final long[] ords = ordinalSet.stream().mapToLong(Long::longValue).toArray(); Arrays.sort(ords); values.ords = ords; @@ -222,12 +220,12 @@ protected void doCollect(LeafBucketCollector sub, int doc, long bucket) throws I final int[] expectedCounts = new int[ranges.length]; final int maxDoc = randomIntBetween(5, 10); for (int doc = 0; doc < maxDoc; ++doc) { - LongHashSet ordinalSet = new LongHashSet(); + Set ordinalSet = new HashSet<>(); final int numValues = randomInt(maxNumValuesPerDoc); while (ordinalSet.size() < numValues) { - ordinalSet.add(random().nextInt(terms.length)); + ordinalSet.add(TestUtil.nextLong(random(), 0, terms.length - 1)); } - final long[] ords = ordinalSet.toArray(); + final long[] ords = ordinalSet.stream().mapToLong(Long::longValue).toArray(); Arrays.sort(ords); values.ords = ords; diff --git a/server/src/test/java/org/opensearch/search/aggregations/metrics/AbstractGeoTestCase.java b/server/src/test/java/org/opensearch/search/aggregations/metrics/AbstractGeoTestCase.java index 52a32e6e134cc..036bb39e790ae 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/metrics/AbstractGeoTestCase.java +++ b/server/src/test/java/org/opensearch/search/aggregations/metrics/AbstractGeoTestCase.java @@ -32,11 +32,6 @@ package org.opensearch.search.aggregations.metrics; -import com.carrotsearch.hppc.ObjectIntHashMap; -import com.carrotsearch.hppc.ObjectIntMap; -import com.carrotsearch.hppc.ObjectObjectHashMap; -import com.carrotsearch.hppc.ObjectObjectMap; - import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.Strings; @@ -54,7 +49,9 @@ import org.opensearch.test.geo.RandomGeoGenerator; import java.util.ArrayList; +import java.util.HashMap; import java.util.List; +import java.util.Map; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; @@ -79,8 +76,8 @@ public abstract class AbstractGeoTestCase extends OpenSearchIntegTestCase { protected static GeoPoint[] singleValues, multiValues; protected static GeoPoint singleTopLeft, singleBottomRight, multiTopLeft, multiBottomRight, singleCentroid, multiCentroid, unmappedCentroid; - protected static ObjectIntMap expectedDocCountsForGeoHash = null; - protected static ObjectObjectMap expectedCentroidsForGeoHash = null; + protected static Map expectedDocCountsForGeoHash = null; + protected static Map expectedCentroidsForGeoHash = null; protected static final double GEOHASH_TOLERANCE = 1E-5D; @Override @@ -109,8 +106,8 @@ public void setupSuiteScopeCluster() throws Exception { numDocs = randomIntBetween(6, 20); numUniqueGeoPoints = randomIntBetween(1, numDocs); - expectedDocCountsForGeoHash = new ObjectIntHashMap<>(numDocs * 2); - expectedCentroidsForGeoHash = new ObjectObjectHashMap<>(numDocs * 2); + expectedDocCountsForGeoHash = new HashMap<>(numDocs * 2); + expectedCentroidsForGeoHash = new HashMap<>(numDocs * 2); singleValues = new GeoPoint[numUniqueGeoPoints]; for (int i = 0; i < singleValues.length; i++) { diff --git a/server/src/test/java/org/opensearch/search/aggregations/metrics/HyperLogLogPlusPlusSparseTests.java b/server/src/test/java/org/opensearch/search/aggregations/metrics/HyperLogLogPlusPlusSparseTests.java index ce47862c9884b..5968605fc0c34 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/metrics/HyperLogLogPlusPlusSparseTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/metrics/HyperLogLogPlusPlusSparseTests.java @@ -32,12 +32,12 @@ package org.opensearch.search.aggregations.metrics; -import com.carrotsearch.hppc.BitMixer; import org.opensearch.common.breaker.CircuitBreaker; import org.opensearch.common.breaker.CircuitBreakingException; import org.opensearch.common.breaker.NoopCircuitBreaker; import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.common.util.BigArrays; +import org.opensearch.common.util.BitMixer; import org.opensearch.indices.breaker.CircuitBreakerService; import org.opensearch.test.OpenSearchTestCase; import org.hamcrest.CoreMatchers; diff --git a/server/src/test/java/org/opensearch/search/aggregations/metrics/HyperLogLogPlusPlusTests.java b/server/src/test/java/org/opensearch/search/aggregations/metrics/HyperLogLogPlusPlusTests.java index 0726621855b47..6f5f7494331a5 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/metrics/HyperLogLogPlusPlusTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/metrics/HyperLogLogPlusPlusTests.java @@ -32,15 +32,16 @@ package org.opensearch.search.aggregations.metrics; -import com.carrotsearch.hppc.BitMixer; -import com.carrotsearch.hppc.IntHashSet; import org.opensearch.common.breaker.CircuitBreaker; import org.opensearch.common.breaker.CircuitBreakingException; import org.opensearch.common.breaker.NoopCircuitBreaker; import org.opensearch.common.util.BigArrays; +import org.opensearch.common.util.BitMixer; import org.opensearch.indices.breaker.CircuitBreakerService; import org.opensearch.test.OpenSearchTestCase; +import java.util.HashSet; +import java.util.Set; import java.util.concurrent.atomic.AtomicLong; import static org.opensearch.search.aggregations.metrics.AbstractHyperLogLog.MAX_PRECISION; @@ -80,7 +81,7 @@ public void testAccuracy() { final int numValues = randomIntBetween(1, 100000); final int maxValue = randomIntBetween(1, randomBoolean() ? 1000 : 100000); final int p = randomIntBetween(14, MAX_PRECISION); - IntHashSet set = new IntHashSet(); + final Set set = new HashSet<>(); HyperLogLogPlusPlus e = new HyperLogLogPlusPlus(p, BigArrays.NON_RECYCLING_INSTANCE, 1); for (int i = 0; i < numValues; ++i) { final int n = randomInt(maxValue); diff --git a/server/src/test/java/org/opensearch/search/aggregations/metrics/InternalCardinalityTests.java b/server/src/test/java/org/opensearch/search/aggregations/metrics/InternalCardinalityTests.java index 8a513d9f45dde..a95533bb3c33c 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/metrics/InternalCardinalityTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/metrics/InternalCardinalityTests.java @@ -32,8 +32,8 @@ package org.opensearch.search.aggregations.metrics; -import com.carrotsearch.hppc.BitMixer; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.BitMixer; import org.opensearch.common.util.MockBigArrays; import org.opensearch.common.util.MockPageCacheRecycler; import org.opensearch.common.lease.Releasables; diff --git a/server/src/test/java/org/opensearch/search/slice/DocValuesSliceQueryTests.java b/server/src/test/java/org/opensearch/search/slice/DocValuesSliceQueryTests.java index 2bf1225aa9b11..86dc27d73d21f 100644 --- a/server/src/test/java/org/opensearch/search/slice/DocValuesSliceQueryTests.java +++ b/server/src/test/java/org/opensearch/search/slice/DocValuesSliceQueryTests.java @@ -32,7 +32,6 @@ package org.opensearch.search.slice; -import com.carrotsearch.hppc.BitMixer; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.document.SortedNumericDocValuesField; @@ -49,6 +48,7 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.util.NumericUtils; import org.opensearch.common.UUIDs; +import org.opensearch.common.util.BitMixer; import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; diff --git a/server/src/test/java/org/opensearch/snapshots/InternalSnapshotsInfoServiceTests.java b/server/src/test/java/org/opensearch/snapshots/InternalSnapshotsInfoServiceTests.java index 8cfc11cb6f32c..88de234d3449b 100644 --- a/server/src/test/java/org/opensearch/snapshots/InternalSnapshotsInfoServiceTests.java +++ b/server/src/test/java/org/opensearch/snapshots/InternalSnapshotsInfoServiceTests.java @@ -32,7 +32,6 @@ package org.opensearch.snapshots; -import com.carrotsearch.hppc.IntHashSet; import org.opensearch.Version; import org.opensearch.action.support.PlainActionFuture; import org.opensearch.cluster.ClusterState; @@ -72,6 +71,7 @@ import java.util.Collections; import java.util.HashMap; +import java.util.HashSet; import java.util.Locale; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; @@ -457,7 +457,7 @@ private ClusterState addUnassignedShards(final ClusterState currentState, String final Index index = indexMetadata.getIndex(); final RoutingTable.Builder routingTable = RoutingTable.builder(currentState.routingTable()); - routingTable.add(IndexRoutingTable.builder(index).initializeAsNewRestore(indexMetadata, recoverySource, new IntHashSet()).build()); + routingTable.add(IndexRoutingTable.builder(index).initializeAsNewRestore(indexMetadata, recoverySource, new HashSet<>()).build()); final RestoreInProgress.Builder restores = new RestoreInProgress.Builder( currentState.custom(RestoreInProgress.TYPE, RestoreInProgress.EMPTY) diff --git a/test/framework/src/main/java/org/opensearch/cluster/coordination/LinearizabilityChecker.java b/test/framework/src/main/java/org/opensearch/cluster/coordination/LinearizabilityChecker.java index 72434b33ae39f..3d3cff6f3be9b 100644 --- a/test/framework/src/main/java/org/opensearch/cluster/coordination/LinearizabilityChecker.java +++ b/test/framework/src/main/java/org/opensearch/cluster/coordination/LinearizabilityChecker.java @@ -31,7 +31,6 @@ package org.opensearch.cluster.coordination; -import com.carrotsearch.hppc.LongObjectHashMap; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.util.FixedBitSet; @@ -498,7 +497,7 @@ void unlift() { */ private static class Cache { private final Map> largeMap = new HashMap<>(); - private final LongObjectHashMap> smallMap = new LongObjectHashMap<>(); + private final Map> smallMap = new HashMap<>(); private final Map internalizeStateMap = new HashMap<>(); private final Map, Set> statePermutations = new HashMap<>(); @@ -517,12 +516,11 @@ private boolean addInternal(Object state, FixedBitSet bitSet) { } private boolean addSmall(Object state, long bits) { - int index = smallMap.indexOf(bits); - Set states; - if (index < 0) { - states = Collections.singleton(state); + Set states = smallMap.get(bits); + if (states == null) { + states = Set.of(state); } else { - Set oldStates = smallMap.indexGet(index); + Set oldStates = states; if (oldStates.contains(state)) return false; states = new HashSet<>(oldStates.size() + 1); states.addAll(oldStates); @@ -532,12 +530,7 @@ private boolean addSmall(Object state, long bits) { // Get a unique set object per state permutation. We assume that the number of permutations of states are small. // We thus avoid the overhead of the set data structure. states = statePermutations.computeIfAbsent(states, k -> k); - - if (index < 0) { - smallMap.indexInsert(index, bits, states); - } else { - smallMap.indexReplace(index, states); - } + smallMap.put(bits, states); return true; } diff --git a/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java index d8c011a2448f7..e3a78548d5d06 100644 --- a/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java @@ -31,8 +31,6 @@ package org.opensearch.test; -import com.carrotsearch.hppc.ObjectLongMap; -import com.carrotsearch.hppc.cursors.IntObjectCursor; import com.carrotsearch.randomizedtesting.RandomizedTest; import com.carrotsearch.randomizedtesting.SeedUtils; import com.carrotsearch.randomizedtesting.generators.RandomNumbers; @@ -60,7 +58,6 @@ import org.opensearch.cluster.node.DiscoveryNodeRole; import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.cluster.routing.IndexRoutingTable; -import org.opensearch.cluster.routing.IndexShardRoutingTable; import org.opensearch.cluster.routing.OperationRouting; import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.routing.allocation.DiskThresholdSettings; @@ -1492,14 +1489,14 @@ public void assertSeqNos() throws Exception { assertBusy(() -> { final ClusterState state = clusterService().state(); for (final IndexRoutingTable indexRoutingTable : state.routingTable().indicesRouting().values()) { - for (IntObjectCursor indexShardRoutingTable : indexRoutingTable.shards()) { - ShardRouting primaryShardRouting = indexShardRoutingTable.value.primaryShard(); + for (var indexShardRoutingTable : indexRoutingTable.shards().values()) { + ShardRouting primaryShardRouting = indexShardRoutingTable.primaryShard(); final IndexShard primaryShard = getShardOrNull(state, primaryShardRouting); if (primaryShard == null) { continue; // just ignore - shard movement } final SeqNoStats primarySeqNoStats; - final ObjectLongMap syncGlobalCheckpoints; + final Map syncGlobalCheckpoints; try { primarySeqNoStats = primaryShard.seqNoStats(); syncGlobalCheckpoints = primaryShard.getInSyncGlobalCheckpoints(); @@ -1511,7 +1508,7 @@ public void assertSeqNos() throws Exception { primarySeqNoStats.getGlobalCheckpoint(), not(equalTo(SequenceNumbers.UNASSIGNED_SEQ_NO)) ); - for (ShardRouting replicaShardRouting : indexShardRoutingTable.value.replicaShards()) { + for (ShardRouting replicaShardRouting : indexShardRoutingTable.replicaShards()) { final IndexShard replicaShard = getShardOrNull(state, replicaShardRouting); if (replicaShard == null) { continue; // just ignore - shard movement @@ -1544,8 +1541,8 @@ public void assertSameDocIdsOnShards() throws Exception { assertBusy(() -> { ClusterState state = client().admin().cluster().prepareState().get().getState(); for (final IndexRoutingTable indexRoutingTable : state.routingTable().indicesRouting().values()) { - for (IntObjectCursor indexShardRoutingTable : indexRoutingTable.shards()) { - ShardRouting primaryShardRouting = indexShardRoutingTable.value.primaryShard(); + for (var indexShardRoutingTable : indexRoutingTable.shards().values()) { + ShardRouting primaryShardRouting = indexShardRoutingTable.primaryShard(); IndexShard primaryShard = getShardOrNull(state, primaryShardRouting); if (primaryShard == null) { continue; @@ -1556,7 +1553,7 @@ public void assertSameDocIdsOnShards() throws Exception { } catch (AlreadyClosedException ex) { continue; } - for (ShardRouting replicaShardRouting : indexShardRoutingTable.value.replicaShards()) { + for (ShardRouting replicaShardRouting : indexShardRoutingTable.replicaShards()) { IndexShard replicaShard = getShardOrNull(state, replicaShardRouting); if (replicaShard == null) { continue;