diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ConvertProcessorTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ConvertProcessorTests.java index 500f1aa1719c3..34fd4ec27a3b3 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ConvertProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ConvertProcessorTests.java @@ -67,10 +67,9 @@ public void testConvertIntLeadingZero() throws Exception { assertThat(ingestDocument.getFieldValue(fieldName, Integer.class), equalTo(10)); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/32370") public void testConvertIntHexError() { IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random()); - String value = "0x" + randomAlphaOfLengthBetween(1, 10); + String value = "0xnotanumber"; String fieldName = RandomDocumentPicks.addRandomField(random(), ingestDocument, value); Processor processor = new ConvertProcessor(randomAlphaOfLength(10), fieldName, fieldName, Type.INTEGER, false); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> processor.execute(ingestDocument)); @@ -138,10 +137,9 @@ public void testConvertLongLeadingZero() throws Exception { assertThat(ingestDocument.getFieldValue(fieldName, Long.class), equalTo(10L)); } - @AwaitsFix( bugUrl = "https://github.com/elastic/elasticsearch/issues/32370") public void testConvertLongHexError() { IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random()); - String value = "0x" + randomAlphaOfLengthBetween(1, 10); + String value = "0xnotanumber"; String fieldName = RandomDocumentPicks.addRandomField(random(), ingestDocument, value); Processor processor = new ConvertProcessor(randomAlphaOfLength(10), fieldName, fieldName, Type.LONG, false); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> processor.execute(ingestDocument)); diff --git a/server/src/main/java/org/elasticsearch/index/get/GetResult.java b/server/src/main/java/org/elasticsearch/index/get/GetResult.java index a3f83609037e1..ba5c4cd929fd2 100644 --- a/server/src/main/java/org/elasticsearch/index/get/GetResult.java +++ b/server/src/main/java/org/elasticsearch/index/get/GetResult.java @@ -20,6 +20,7 @@ package org.elasticsearch.index.get; import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.compress.CompressorFactory; import org.elasticsearch.common.document.DocumentField; @@ -30,6 +31,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.mapper.IgnoredFieldMapper; import org.elasticsearch.index.mapper.SourceFieldMapper; import org.elasticsearch.search.lookup.SourceLookup; @@ -225,10 +227,13 @@ public XContentBuilder toXContentEmbedded(XContentBuilder builder, Params params } } } - for (DocumentField field : metaFields) { - Object value = field.getValue(); - builder.field(field.getName(), value); + // TODO: can we avoid having an exception here? + if (field.getName().equals(IgnoredFieldMapper.NAME)) { + builder.field(field.getName(), field.getValues()); + } else { + builder.field(field.getName(), field.getValue()); + } } builder.field(FOUND, exists); @@ -316,7 +321,11 @@ public static GetResult fromXContentEmbedded(XContentParser parser, String index parser.skipChildren(); // skip potential inner objects for forward compatibility } } else if (token == XContentParser.Token.START_ARRAY) { - parser.skipChildren(); // skip potential inner arrays for forward compatibility + if (IgnoredFieldMapper.NAME.equals(currentFieldName)) { + fields.put(currentFieldName, new DocumentField(currentFieldName, parser.list())); + } else { + parser.skipChildren(); // skip potential inner arrays for forward compatibility + } } } return new GetResult(index, type, id, version, found, source, fields); @@ -400,7 +409,12 @@ public boolean equals(Object o) { @Override public int hashCode() { - return Objects.hash(index, type, id, version, exists, fields, sourceAsMap()); + return Objects.hash(version, exists, index, type, id, fields, sourceAsMap()); + } + + @Override + public String toString() { + return Strings.toString(this, true, true); } } diff --git a/server/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java b/server/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java index 472cb04936d64..e6a86d47f55c0 100644 --- a/server/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java +++ b/server/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java @@ -420,6 +420,12 @@ private void removeShards(final ClusterState state) { // state may result in a new shard being initialized while having the same allocation id as the currently started shard. logger.debug("{} removing shard (not active, current {}, new {})", shardId, currentRoutingEntry, newShardRouting); indexService.removeShard(shardId.id(), "removing shard (stale copy)"); + } else if (newShardRouting.primary() && currentRoutingEntry.primary() == false && newShardRouting.initializing()) { + assert currentRoutingEntry.initializing() : currentRoutingEntry; // see above if clause + // this can happen when cluster state batching batches activation of the shard, closing an index, reopening it + // and assigning an initializing primary to this node + logger.debug("{} removing shard (not active, current {}, new {})", shardId, currentRoutingEntry, newShardRouting); + indexService.removeShard(shardId.id(), "removing shard (stale copy)"); } } } diff --git a/server/src/main/java/org/elasticsearch/search/SearchHit.java b/server/src/main/java/org/elasticsearch/search/SearchHit.java index 8c688cbf4466a..66999c7e3899b 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchHit.java +++ b/server/src/main/java/org/elasticsearch/search/SearchHit.java @@ -602,16 +602,24 @@ private static void declareMetaDataFields(ObjectParser, Void for (String metadatafield : MapperService.getAllMetaFields()) { if (metadatafield.equals(Fields._ID) == false && metadatafield.equals(Fields._INDEX) == false && metadatafield.equals(Fields._TYPE) == false) { - parser.declareField((map, field) -> { - @SuppressWarnings("unchecked") - Map fieldMap = (Map) map.computeIfAbsent(Fields.FIELDS, - v -> new HashMap()); - fieldMap.put(field.getName(), field); - }, (p, c) -> { - List values = new ArrayList<>(); - values.add(parseFieldsValue(p)); - return new DocumentField(metadatafield, values); - }, new ParseField(metadatafield), ValueType.VALUE); + if (metadatafield.equals(IgnoredFieldMapper.NAME)) { + parser.declareObjectArray((map, list) -> { + @SuppressWarnings("unchecked") + Map fieldMap = (Map) map.computeIfAbsent(Fields.FIELDS, + v -> new HashMap()); + DocumentField field = new DocumentField(metadatafield, list); + fieldMap.put(field.getName(), field); + }, (p, c) -> parseFieldsValue(p), + new ParseField(metadatafield)); + } else { + parser.declareField((map, field) -> { + @SuppressWarnings("unchecked") + Map fieldMap = (Map) map.computeIfAbsent(Fields.FIELDS, + v -> new HashMap()); + fieldMap.put(field.getName(), field); + }, (p, c) -> new DocumentField(metadatafield, Collections.singletonList(parseFieldsValue(p))), + new ParseField(metadatafield), ValueType.VALUE); + } } } } @@ -958,4 +966,9 @@ public int hashCode() { return Objects.hash(field, offset, child); } } + + @Override + public String toString() { + return Strings.toString(this, true, true); + } } diff --git a/server/src/test/java/org/elasticsearch/action/support/replication/ClusterStateCreationUtils.java b/server/src/test/java/org/elasticsearch/action/support/replication/ClusterStateCreationUtils.java index 59ede535c2f39..60053748d68c9 100644 --- a/server/src/test/java/org/elasticsearch/action/support/replication/ClusterStateCreationUtils.java +++ b/server/src/test/java/org/elasticsearch/action/support/replication/ClusterStateCreationUtils.java @@ -27,10 +27,12 @@ import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.routing.AllocationId; import org.elasticsearch.cluster.routing.IndexRoutingTable; import org.elasticsearch.cluster.routing.IndexShardRoutingTable; import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.RoutingTable.Builder; +import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.TestShardRouting; import org.elasticsearch.cluster.routing.UnassignedInfo; @@ -44,6 +46,7 @@ import java.util.HashSet; import java.util.List; import java.util.Set; +import java.util.stream.Collectors; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_CREATION_DATE; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS; @@ -93,7 +96,8 @@ public static ClusterState state(String index, boolean activePrimaryLocal, Shard IndexMetaData indexMetaData = IndexMetaData.builder(index).settings(Settings.builder() .put(SETTING_VERSION_CREATED, Version.CURRENT) .put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, numberOfReplicas) - .put(SETTING_CREATION_DATE, System.currentTimeMillis())).primaryTerm(0, primaryTerm).build(); + .put(SETTING_CREATION_DATE, System.currentTimeMillis())).primaryTerm(0, primaryTerm) + .build(); RoutingTable.Builder routing = new RoutingTable.Builder(); routing.addAsNew(indexMetaData); @@ -138,12 +142,19 @@ public static ClusterState state(String index, boolean activePrimaryLocal, Shard TestShardRouting.newShardRouting(index, shardId.id(), replicaNode, relocatingNode, false, replicaState, unassignedInfo)); } + final IndexShardRoutingTable indexShardRoutingTable = indexShardRoutingBuilder.build(); + + IndexMetaData.Builder indexMetaDataBuilder = new IndexMetaData.Builder(indexMetaData); + indexMetaDataBuilder.putInSyncAllocationIds(0, + indexShardRoutingTable.activeShards().stream().map(ShardRouting::allocationId).map(AllocationId::getId) + .collect(Collectors.toSet()) + ); ClusterState.Builder state = ClusterState.builder(new ClusterName("test")); state.nodes(discoBuilder); - state.metaData(MetaData.builder().put(indexMetaData, false).generateClusterUuidIfNeeded()); + state.metaData(MetaData.builder().put(indexMetaDataBuilder.build(), false).generateClusterUuidIfNeeded()); state.routingTable(RoutingTable.builder().add(IndexRoutingTable.builder(indexMetaData.getIndex()) - .addIndexShard(indexShardRoutingBuilder.build())).build()); + .addIndexShard(indexShardRoutingTable)).build()); return state.build(); } @@ -272,21 +283,21 @@ public static ClusterState stateWithAssignedPrimariesAndOneReplica(String index, state.routingTable(RoutingTable.builder().add(indexRoutingTableBuilder.build()).build()); return state.build(); } - - + + /** * Creates cluster state with several indexes, shards and replicas and all shards STARTED. */ public static ClusterState stateWithAssignedPrimariesAndReplicas(String[] indices, int numberOfShards, int numberOfReplicas) { - int numberOfDataNodes = numberOfReplicas + 1; + int numberOfDataNodes = numberOfReplicas + 1; DiscoveryNodes.Builder discoBuilder = DiscoveryNodes.builder(); for (int i = 0; i < numberOfDataNodes + 1; i++) { final DiscoveryNode node = newNode(i); discoBuilder = discoBuilder.add(node); } discoBuilder.localNodeId(newNode(0).getId()); - discoBuilder.masterNodeId(newNode(numberOfDataNodes + 1).getId()); + discoBuilder.masterNodeId(newNode(numberOfDataNodes + 1).getId()); ClusterState.Builder state = ClusterState.builder(new ClusterName("test")); state.nodes(discoBuilder); Builder routingTableBuilder = RoutingTable.builder(); @@ -316,7 +327,7 @@ public static ClusterState stateWithAssignedPrimariesAndReplicas(String[] indice state.metaData(metadataBuilder); state.routingTable(routingTableBuilder.build()); return state.build(); - } + } /** * Creates cluster state with and index that has one shard and as many replicas as numberOfReplicas. diff --git a/server/src/test/java/org/elasticsearch/index/get/DocumentFieldTests.java b/server/src/test/java/org/elasticsearch/index/get/DocumentFieldTests.java index 830e41996e9b3..e2e3d4df67cf7 100644 --- a/server/src/test/java/org/elasticsearch/index/get/DocumentFieldTests.java +++ b/server/src/test/java/org/elasticsearch/index/get/DocumentFieldTests.java @@ -26,7 +26,11 @@ import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.index.mapper.RoutingFieldMapper; +import org.elasticsearch.index.mapper.IdFieldMapper; +import org.elasticsearch.index.mapper.IgnoredFieldMapper; +import org.elasticsearch.index.mapper.IndexFieldMapper; +import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.mapper.TypeFieldMapper; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.RandomObjects; @@ -98,14 +102,28 @@ private static DocumentField mutateDocumentField(DocumentField documentField) { public static Tuple randomDocumentField(XContentType xContentType) { if (randomBoolean()) { - String fieldName = randomFrom(RoutingFieldMapper.NAME); - DocumentField documentField = new DocumentField(fieldName, Collections.singletonList(randomAlphaOfLengthBetween(3, 10))); + String metaField = randomValueOtherThanMany(field -> field.equals(TypeFieldMapper.NAME) + || field.equals(IndexFieldMapper.NAME) || field.equals(IdFieldMapper.NAME), + () -> randomFrom(MapperService.getAllMetaFields())); + DocumentField documentField; + if (metaField.equals(IgnoredFieldMapper.NAME)) { + int numValues = randomIntBetween(1, 3); + List ignoredFields = new ArrayList<>(numValues); + for (int i = 0; i < numValues; i++) { + ignoredFields.add(randomAlphaOfLengthBetween(3, 10)); + } + documentField = new DocumentField(metaField, ignoredFields); + } else { + //meta fields are single value only, besides _ignored + documentField = new DocumentField(metaField, Collections.singletonList(randomAlphaOfLengthBetween(3, 10))); + } return Tuple.tuple(documentField, documentField); + } else { + String fieldName = randomAlphaOfLengthBetween(3, 10); + Tuple, List> tuple = RandomObjects.randomStoredFieldValues(random(), xContentType); + DocumentField input = new DocumentField(fieldName, tuple.v1()); + DocumentField expected = new DocumentField(fieldName, tuple.v2()); + return Tuple.tuple(input, expected); } - String fieldName = randomAlphaOfLengthBetween(3, 10); - Tuple, List> tuple = RandomObjects.randomStoredFieldValues(random(), xContentType); - DocumentField input = new DocumentField(fieldName, tuple.v1()); - DocumentField expected = new DocumentField(fieldName, tuple.v2()); - return Tuple.tuple(input, expected); } } diff --git a/server/src/test/java/org/elasticsearch/index/get/GetResultTests.java b/server/src/test/java/org/elasticsearch/index/get/GetResultTests.java index a38d183299cdd..1cc2612041f46 100644 --- a/server/src/test/java/org/elasticsearch/index/get/GetResultTests.java +++ b/server/src/test/java/org/elasticsearch/index/get/GetResultTests.java @@ -90,7 +90,6 @@ public void testToAndFromXContentEmbedded() throws Exception { XContentType xContentType = randomFrom(XContentType.values()); Tuple tuple = randomGetResult(xContentType); GetResult getResult = tuple.v1(); - // We don't expect to retrieve the index/type/id of the GetResult because they are not rendered // by the toXContentEmbedded method. GetResult expectedGetResult = new GetResult(null, null, null, -1, @@ -106,7 +105,6 @@ public void testToAndFromXContentEmbedded() throws Exception { parsedEmbeddedGetResult = GetResult.fromXContentEmbedded(parser); assertNull(parser.nextToken()); } - assertEquals(expectedGetResult, parsedEmbeddedGetResult); //print the parsed object out and test that the output is the same as the original output BytesReference finalBytes = toXContentEmbedded(parsedEmbeddedGetResult, xContentType, humanReadable); @@ -203,16 +201,17 @@ public static Tuple randomGetResult(XContentType xContentT return Tuple.tuple(getResult, expectedGetResult); } - private static Tuple,Map> randomDocumentFields(XContentType xContentType) { + public static Tuple,Map> randomDocumentFields(XContentType xContentType) { int numFields = randomIntBetween(2, 10); Map fields = new HashMap<>(numFields); Map expectedFields = new HashMap<>(numFields); - for (int i = 0; i < numFields; i++) { + while (fields.size() < numFields) { Tuple tuple = randomDocumentField(xContentType); DocumentField getField = tuple.v1(); DocumentField expectedGetField = tuple.v2(); - fields.put(getField.getName(), getField); - expectedFields.put(expectedGetField.getName(), expectedGetField); + if (fields.putIfAbsent(getField.getName(), getField) == null) { + assertNull(expectedFields.putIfAbsent(expectedGetField.getName(), expectedGetField)); + } } return Tuple.tuple(fields, expectedFields); } diff --git a/server/src/test/java/org/elasticsearch/indices/cluster/AbstractIndicesClusterStateServiceTestCase.java b/server/src/test/java/org/elasticsearch/indices/cluster/AbstractIndicesClusterStateServiceTestCase.java index 5c6b000f7e519..580696264bdd4 100644 --- a/server/src/test/java/org/elasticsearch/indices/cluster/AbstractIndicesClusterStateServiceTestCase.java +++ b/server/src/test/java/org/elasticsearch/indices/cluster/AbstractIndicesClusterStateServiceTestCase.java @@ -74,6 +74,10 @@ public void injectRandomFailures() { enableRandomFailures = randomBoolean(); } + protected void disableRandomFailures() { + enableRandomFailures = false; + } + protected void failRandomly() { if (enableRandomFailures && rarely()) { throw new RuntimeException("dummy test failure"); diff --git a/server/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java b/server/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java index 5611421594aa1..39091ce04ec6e 100644 --- a/server/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java +++ b/server/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java @@ -49,6 +49,7 @@ import org.elasticsearch.discovery.DiscoverySettings; import org.elasticsearch.index.Index; import org.elasticsearch.index.shard.PrimaryReplicaSyncer; +import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.recovery.PeerRecoveryTargetService; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.threadpool.TestThreadPool; @@ -75,6 +76,7 @@ import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS; import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING; +import static org.hamcrest.Matchers.equalTo; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -97,7 +99,6 @@ public void tearDown() throws Exception { terminate(threadPool); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/32308") public void testRandomClusterStateUpdates() { // we have an IndicesClusterStateService per node in the cluster final Map clusterStateServiceMap = new HashMap<>(); @@ -199,6 +200,59 @@ public void testJoiningNewClusterOnlyRemovesInMemoryIndexStructures() { } } + /** + * In rare cases it is possible that a nodes gets an instruction to replace a replica + * shard that's in POST_RECOVERY with a new initializing primary with the same allocation id. + * This can happen by batching cluster states that include the starting of the replica, with + * closing of the indices, opening it up again and allocating the primary shard to the node in + * question. The node should then clean it's initializing replica and replace it with a new + * initializing primary. + */ + public void testInitializingPrimaryRemovesInitializingReplicaWithSameAID() { + disableRandomFailures(); + String index = "index_" + randomAlphaOfLength(8).toLowerCase(Locale.ROOT); + ClusterState state = ClusterStateCreationUtils.state(index, randomBoolean(), + ShardRoutingState.STARTED, ShardRoutingState.INITIALIZING); + + // the initial state which is derived from the newly created cluster state but doesn't contain the index + ClusterState previousState = ClusterState.builder(state) + .metaData(MetaData.builder(state.metaData()).remove(index)) + .routingTable(RoutingTable.builder().build()) + .build(); + + // pick a data node to simulate the adding an index cluster state change event on, that has shards assigned to it + final ShardRouting shardRouting = state.routingTable().index(index).shard(0).replicaShards().get(0); + final ShardId shardId = shardRouting.shardId(); + DiscoveryNode node = state.nodes().get(shardRouting.currentNodeId()); + + // simulate the cluster state change on the node + ClusterState localState = adaptClusterStateToLocalNode(state, node); + ClusterState previousLocalState = adaptClusterStateToLocalNode(previousState, node); + IndicesClusterStateService indicesCSSvc = createIndicesClusterStateService(node, RecordingIndicesService::new); + indicesCSSvc.start(); + indicesCSSvc.applyClusterState(new ClusterChangedEvent("cluster state change that adds the index", localState, previousLocalState)); + previousState = state; + + // start the replica + state = cluster.applyStartedShards(state, state.routingTable().index(index).shard(0).replicaShards()); + + // close the index and open it up again (this will sometimes swap roles between primary and replica) + CloseIndexRequest closeIndexRequest = new CloseIndexRequest(state.metaData().index(index).getIndex().getName()); + state = cluster.closeIndices(state, closeIndexRequest); + OpenIndexRequest openIndexRequest = new OpenIndexRequest(state.metaData().index(index).getIndex().getName()); + state = cluster.openIndices(state, openIndexRequest); + + localState = adaptClusterStateToLocalNode(state, node); + previousLocalState = adaptClusterStateToLocalNode(previousState, node); + + indicesCSSvc.applyClusterState(new ClusterChangedEvent("new cluster state", localState, previousLocalState)); + + final MockIndexShard shardOrNull = ((RecordingIndicesService) indicesCSSvc.indicesService).getShardOrNull(shardId); + assertThat(shardOrNull == null ? null : shardOrNull.routingEntry(), + equalTo(state.getRoutingNodes().node(node.getId()).getByShardId(shardId))); + + } + public ClusterState randomInitialClusterState(Map clusterStateServiceMap, Supplier indicesServiceSupplier) { List allNodes = new ArrayList<>(); diff --git a/server/src/test/java/org/elasticsearch/search/SearchHitTests.java b/server/src/test/java/org/elasticsearch/search/SearchHitTests.java index 97dfad4645447..87b8ba2dc59b0 100644 --- a/server/src/test/java/org/elasticsearch/search/SearchHitTests.java +++ b/server/src/test/java/org/elasticsearch/search/SearchHitTests.java @@ -19,23 +19,31 @@ package org.elasticsearch.search; +import java.io.IOException; +import java.io.InputStream; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.function.Predicate; + import org.apache.lucene.search.Explanation; import org.elasticsearch.action.OriginalIndices; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.document.DocumentField; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.InputStreamStreamInput; import org.elasticsearch.common.text.Text; -import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.index.Index; +import org.elasticsearch.index.get.GetResultTests; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.search.SearchHit.NestedIdentity; import org.elasticsearch.search.fetch.subphase.highlight.HighlightField; @@ -43,16 +51,6 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.RandomObjects; -import java.io.IOException; -import java.io.InputStream; -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.function.Predicate; - import static org.elasticsearch.common.xcontent.XContentHelper.toXContent; import static org.elasticsearch.test.XContentTestUtils.insertRandomFields; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertToXContentEquivalent; @@ -63,8 +61,6 @@ public class SearchHitTests extends ESTestCase { - private static Set META_FIELDS = Sets.newHashSet("_uid", "_parent", "_routing", "_size", "_timestamp", "_ttl"); - public static SearchHit createTestItem(boolean withOptionalInnerHits) { int internalId = randomInt(); String uid = randomAlphaOfLength(10); @@ -75,18 +71,7 @@ public static SearchHit createTestItem(boolean withOptionalInnerHits) { } Map fields = new HashMap<>(); if (randomBoolean()) { - int size = randomIntBetween(0, 10); - for (int i = 0; i < size; i++) { - Tuple, List> values = RandomObjects.randomStoredFieldValues(random(), - XContentType.JSON); - if (randomBoolean()) { - String metaField = randomFrom(META_FIELDS); - fields.put(metaField, new DocumentField(metaField, values.v1())); - } else { - String fieldName = randomAlphaOfLengthBetween(5, 10); - fields.put(fieldName, new DocumentField(fieldName, values.v1())); - } - } + fields = GetResultTests.randomDocumentFields(XContentType.JSON).v1(); } SearchHit hit = new SearchHit(internalId, uid, type, nestedIdentity, fields); if (frequently()) { @@ -109,7 +94,8 @@ public static SearchHit createTestItem(boolean withOptionalInnerHits) { int size = randomIntBetween(0, 5); Map highlightFields = new HashMap<>(size); for (int i = 0; i < size; i++) { - highlightFields.put(randomAlphaOfLength(5), HighlightFieldTests.createTestItem()); + HighlightField testItem = HighlightFieldTests.createTestItem(); + highlightFields.put(testItem.getName(), testItem); } hit.highlightFields(highlightFields); } diff --git a/server/src/test/java/org/elasticsearch/search/SearchSortValuesTests.java b/server/src/test/java/org/elasticsearch/search/SearchSortValuesTests.java index d1a9a15a3937c..d69039b72f56a 100644 --- a/server/src/test/java/org/elasticsearch/search/SearchSortValuesTests.java +++ b/server/src/test/java/org/elasticsearch/search/SearchSortValuesTests.java @@ -46,13 +46,13 @@ public static SearchSortValues createTestItem() { List> valueSuppliers = new ArrayList<>(); // this should reflect all values that are allowed to go through the transport layer valueSuppliers.add(() -> null); - valueSuppliers.add(() -> randomInt()); - valueSuppliers.add(() -> randomLong()); - valueSuppliers.add(() -> randomDouble()); - valueSuppliers.add(() -> randomFloat()); - valueSuppliers.add(() -> randomByte()); - valueSuppliers.add(() -> randomShort()); - valueSuppliers.add(() -> randomBoolean()); + valueSuppliers.add(ESTestCase::randomInt); + valueSuppliers.add(ESTestCase::randomLong); + valueSuppliers.add(ESTestCase::randomDouble); + valueSuppliers.add(ESTestCase::randomFloat); + valueSuppliers.add(ESTestCase::randomByte); + valueSuppliers.add(ESTestCase::randomShort); + valueSuppliers.add(ESTestCase::randomBoolean); valueSuppliers.add(() -> frequently() ? randomAlphaOfLengthBetween(1, 30) : randomRealisticUnicodeOfCodepointLength(30)); int size = randomIntBetween(1, 20); diff --git a/test/framework/src/main/java/org/elasticsearch/test/RandomObjects.java b/test/framework/src/main/java/org/elasticsearch/test/RandomObjects.java index 06eefb7ccba14..68434f0f29ecc 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/RandomObjects.java +++ b/test/framework/src/main/java/org/elasticsearch/test/RandomObjects.java @@ -48,7 +48,7 @@ import java.util.Random; import static com.carrotsearch.randomizedtesting.generators.RandomNumbers.randomIntBetween; -import static com.carrotsearch.randomizedtesting.generators.RandomStrings.randomAsciiOfLength; +import static com.carrotsearch.randomizedtesting.generators.RandomStrings.randomAsciiLettersOfLength; import static com.carrotsearch.randomizedtesting.generators.RandomStrings.randomUnicodeOfLengthBetween; import static java.util.Collections.singleton; import static org.elasticsearch.cluster.metadata.IndexMetaData.INDEX_UUID_NA_VALUE; @@ -122,7 +122,7 @@ public static Tuple, List> randomStoredFieldValues(Random r expectedParsedValues.add(randomBoolean); break; case 7: - String randomString = random.nextBoolean() ? RandomStrings.randomAsciiOfLengthBetween(random, 3, 10 ) : + String randomString = random.nextBoolean() ? RandomStrings.randomAsciiLettersOfLengthBetween(random, 3, 10) : randomUnicodeOfLengthBetween(random, 3, 10); originalValues.add(randomString); expectedParsedValues.add(randomString); @@ -191,11 +191,11 @@ private static void addFields(Random random, XContentBuilder builder, int minNum for (int i = 0; i < numFields; i++) { if (currentDepth < 5 && random.nextInt(100) >= 70) { if (random.nextBoolean()) { - builder.startObject(RandomStrings.randomAsciiOfLengthBetween(random, 6, 10)); + builder.startObject(RandomStrings.randomAsciiLettersOfLengthBetween(random, 6, 10)); addFields(random, builder, minNumFields, currentDepth + 1); builder.endObject(); } else { - builder.startArray(RandomStrings.randomAsciiOfLengthBetween(random, 6, 10)); + builder.startArray(RandomStrings.randomAsciiLettersOfLengthBetween(random, 6, 10)); int numElements = randomIntBetween(random, 1, 5); boolean object = random.nextBoolean(); int dataType = -1; @@ -214,7 +214,7 @@ private static void addFields(Random random, XContentBuilder builder, int minNum builder.endArray(); } } else { - builder.field(RandomStrings.randomAsciiOfLengthBetween(random, 6, 10), + builder.field(RandomStrings.randomAsciiLettersOfLengthBetween(random, 6, 10), randomFieldValue(random, randomDataType(random))); } } @@ -227,9 +227,9 @@ private static int randomDataType(Random random) { private static Object randomFieldValue(Random random, int dataType) { switch(dataType) { case 0: - return RandomStrings.randomAsciiOfLengthBetween(random, 3, 10); + return RandomStrings.randomAsciiLettersOfLengthBetween(random, 3, 10); case 1: - return RandomStrings.randomAsciiOfLengthBetween(random, 3, 10); + return RandomStrings.randomAsciiLettersOfLengthBetween(random, 3, 10); case 2: return random.nextLong(); case 3: @@ -287,10 +287,10 @@ public static Tuple randomShardInfo(Random random, boolean * @param random Random generator */ private static Tuple randomShardInfoFailure(Random random) { - String index = randomAsciiOfLength(random, 5); - String indexUuid = randomAsciiOfLength(random, 5); + String index = randomAsciiLettersOfLength(random, 5); + String indexUuid = randomAsciiLettersOfLength(random, 5); int shardId = randomIntBetween(random, 1, 10); - String nodeId = randomAsciiOfLength(random, 5); + String nodeId = randomAsciiLettersOfLength(random, 5); RestStatus status = randomFrom(random, RestStatus.INTERNAL_SERVER_ERROR, RestStatus.FORBIDDEN, RestStatus.NOT_FOUND); boolean primary = random.nextBoolean(); ShardId shard = new ShardId(index, indexUuid, shardId); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/DocumentSubsetReaderTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/DocumentSubsetReaderTests.java index 0fb3094fca5a1..38857e2170de4 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/DocumentSubsetReaderTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/DocumentSubsetReaderTests.java @@ -80,6 +80,7 @@ public void cleanDirectory() throws Exception { bitsetFilterCache.close(); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/32457") public void testSearch() throws Exception { IndexWriter iw = new IndexWriter(directory, newIndexWriterConfig());