Skip to content

Commit

Permalink
Merge branch '6.x' into ccr-6.x
Browse files Browse the repository at this point in the history
* 6.x:
  Fixed byte buffer leak in Netty4 request handler
  Avoid uid creation in ParsedDocument (#27241)
  Upgrade to Lucene 7.1 (#27225)
  Add additional explanations around discovery.zen.ping_timeout (#27231)
  Fix compile error
  Remove unused searcher parameter in SearchService#createContext (#27227)
  Fix sequence number assertions in BWC tests
  Move IndexShard#getWritingBytes() under InternalEngine (#27209)
  Adjust bwc version for exists query tests
  • Loading branch information
martijnvg committed Nov 3, 2017
2 parents 0ab744e + 7cb202b commit 1166c19
Show file tree
Hide file tree
Showing 61 changed files with 107 additions and 136 deletions.
2 changes: 1 addition & 1 deletion buildSrc/version.properties
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# When updating elasticsearch, please update 'rest' version in core/src/main/resources/org/elasticsearch/bootstrap/test-framework.policy
elasticsearch = 6.1.0
lucene = 7.1.0-snapshot-f33ed4ba12a
lucene = 7.1.0

# optional dependencies
spatial4j = 0.6
Expand Down

This file was deleted.

1 change: 1 addition & 0 deletions core/licenses/lucene-analyzers-common-7.1.0.jar.sha1
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
a508bf6b580471ee568dab7d2acfedfa5aadce70

This file was deleted.

1 change: 1 addition & 0 deletions core/licenses/lucene-backward-codecs-7.1.0.jar.sha1
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
804a7ce82bba3d085733486bfde4846ecb77ce01

This file was deleted.

1 change: 1 addition & 0 deletions core/licenses/lucene-core-7.1.0.jar.sha1
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
dd291b7ebf4845483895724d2562214dc7f40049

This file was deleted.

1 change: 1 addition & 0 deletions core/licenses/lucene-grouping-7.1.0.jar.sha1
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
0732d16c16421fca058a2a07ca4081ec7696365b

This file was deleted.

1 change: 1 addition & 0 deletions core/licenses/lucene-highlighter-7.1.0.jar.sha1
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
596550daabae765ad685112e0fe7c4f0fdfccb3f

This file was deleted.

1 change: 1 addition & 0 deletions core/licenses/lucene-join-7.1.0.jar.sha1
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
5f26dd64c195258a81175772ef7fe105e7d60a26

This file was deleted.

1 change: 1 addition & 0 deletions core/licenses/lucene-memory-7.1.0.jar.sha1
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
3ef64c58d0c09ca40d848efa96b585b7476271f2

This file was deleted.

1 change: 1 addition & 0 deletions core/licenses/lucene-misc-7.1.0.jar.sha1
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
1496ee5fa62206ee5ddf51042a340d6a9ee3b5de

This file was deleted.

1 change: 1 addition & 0 deletions core/licenses/lucene-queries-7.1.0.jar.sha1
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
1554920ab207a3245fa408d022a5c90ad3a1fea3

This file was deleted.

1 change: 1 addition & 0 deletions core/licenses/lucene-queryparser-7.1.0.jar.sha1
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
5767c15c5ee97926829fd8a4337e434fa95f3c08

This file was deleted.

1 change: 1 addition & 0 deletions core/licenses/lucene-sandbox-7.1.0.jar.sha1
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
691f7b9ac05f3ad2ac7e80733ef70247904bd3ae

This file was deleted.

1 change: 1 addition & 0 deletions core/licenses/lucene-spatial-7.1.0.jar.sha1
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
6c64c04d802badb800516a8a574cb993929c3805

This file was deleted.

1 change: 1 addition & 0 deletions core/licenses/lucene-spatial-extras-7.1.0.jar.sha1
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
3f1bc1aada8f06b176b782da24b9d7ad9641c41a

This file was deleted.

1 change: 1 addition & 0 deletions core/licenses/lucene-spatial3d-7.1.0.jar.sha1
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
8ded650aed23efb775f17be496e3e3870214e23b

This file was deleted.

1 change: 1 addition & 0 deletions core/licenses/lucene-suggest-7.1.0.jar.sha1
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
8d0ed1589ebdccf34e888c6efc0134a13a238c85
Original file line number Diff line number Diff line change
Expand Up @@ -155,7 +155,7 @@ protected ShardValidateQueryResponse shardOperation(ShardValidateQueryRequest re
String error = null;
ShardSearchLocalRequest shardSearchLocalRequest = new ShardSearchLocalRequest(request.shardId(), request.types(),
request.nowInMillis(), request.filteringAliases());
SearchContext searchContext = searchService.createSearchContext(shardSearchLocalRequest, SearchService.NO_TIMEOUT, null);
SearchContext searchContext = searchService.createSearchContext(shardSearchLocalRequest, SearchService.NO_TIMEOUT);
try {
ParsedQuery parsedQuery = searchContext.getQueryShardContext().toQuery(request.query());
searchContext.parsedQuery(parsedQuery);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,7 @@ protected void resolveRequest(ClusterState state, InternalRequest request) {
protected ExplainResponse shardOperation(ExplainRequest request, ShardId shardId) throws IOException {
ShardSearchLocalRequest shardSearchLocalRequest = new ShardSearchLocalRequest(shardId,
new String[]{request.type()}, request.nowInMillis, request.filteringAlias());
SearchContext context = searchService.createSearchContext(shardSearchLocalRequest, SearchService.NO_TIMEOUT, null);
SearchContext context = searchService.createSearchContext(shardSearchLocalRequest, SearchService.NO_TIMEOUT);
Engine.GetResult result = null;
try {
Term uidTerm = context.mapperService().createUidTerm(request.type(), request.id());
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -187,6 +187,9 @@ public MergeStats getMergeStats() {
/** returns the history uuid for the engine */
public abstract String getHistoryUUID();

/** Returns how many bytes we are currently moving from heap to disk */
public abstract long getWritingBytes();

/**
* A throttling class that can be activated, causing the
* {@code acquireThrottle} method to block on a lock when throttling
Expand Down Expand Up @@ -707,7 +710,7 @@ protected void writerSegmentStats(SegmentsStats stats) {
}

/** How much heap is used that would be freed by a refresh. Note that this may throw {@link AlreadyClosedException}. */
public abstract long getIndexBufferRAMBytesUsed();
public abstract long getIndexBufferRAMBytesUsed();

protected Segment[] getSegmentInfo(SegmentInfos lastCommittedSegmentInfos, boolean verbose) {
ensureOpen();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -140,6 +140,12 @@ public class InternalEngine extends Engine {
private final AtomicLong maxUnsafeAutoIdTimestamp = new AtomicLong(-1);
private final CounterMetric numVersionLookups = new CounterMetric();
private final CounterMetric numIndexVersionsLookups = new CounterMetric();
/**
* How many bytes we are currently moving to disk, via either IndexWriter.flush or refresh. IndexingMemoryController polls this
* across all shards to decide if throttling is necessary because moving bytes to disk is falling behind vs incoming documents
* being indexed/deleted.
*/
private final AtomicLong writingBytes = new AtomicLong();

@Nullable
private final String historyUUID;
Expand Down Expand Up @@ -422,6 +428,12 @@ public String getHistoryUUID() {
return historyUUID;
}

/** Returns how many bytes we are currently moving from indexing buffer to segments on disk */
@Override
public long getWritingBytes() {
return writingBytes.get();
}

/**
* Reads the current stored translog ID from the IW commit data. If the id is not found, recommits the current
* translog id into lucene and returns null.
Expand Down Expand Up @@ -1230,21 +1242,26 @@ public void refresh(String source) throws EngineException {
}

final void refresh(String source, SearcherScope scope) throws EngineException {
long bytes = 0;
// we obtain a read lock here, since we don't want a flush to happen while we are refreshing
// since it flushes the index as well (though, in terms of concurrency, we are allowed to do it)
try (ReleasableLock lock = readLock.acquire()) {
ensureOpen();
bytes = indexWriter.ramBytesUsed();
switch (scope) {
case EXTERNAL:
// even though we maintain 2 managers we really do the heavy-lifting only once.
// the second refresh will only do the extra work we have to do for warming caches etc.
writingBytes.addAndGet(bytes);
externalSearcherManager.maybeRefreshBlocking();
// the break here is intentional we never refresh both internal / external together
break;
case INTERNAL:
final long versionMapBytes = versionMap.ramBytesUsedForRefresh();
bytes += versionMapBytes;
writingBytes.addAndGet(bytes);
internalSearcherManager.maybeRefreshBlocking();
break;

default:
throw new IllegalArgumentException("unknown scope: " + scope);
}
Expand All @@ -1258,6 +1275,8 @@ final void refresh(String source, SearcherScope scope) throws EngineException {
e.addSuppressed(inner);
}
throw new RefreshFailedEngineException(shardId, e);
} finally {
writingBytes.addAndGet(-bytes);
}

// TODO: maybe we should just put a scheduled job in threadPool?
Expand All @@ -1271,24 +1290,7 @@ final void refresh(String source, SearcherScope scope) throws EngineException {
public void writeIndexingBuffer() throws EngineException {
// we obtain a read lock here, since we don't want a flush to happen while we are writing
// since it flushes the index as well (though, in terms of concurrency, we are allowed to do it)
try (ReleasableLock lock = readLock.acquire()) {
ensureOpen();
final long versionMapBytes = versionMap.ramBytesUsedForRefresh();
final long indexingBufferBytes = indexWriter.ramBytesUsed();
logger.debug("use refresh to write indexing buffer (heap size=[{}]), to also clear version map (heap size=[{}])",
new ByteSizeValue(indexingBufferBytes), new ByteSizeValue(versionMapBytes));
refresh("write indexing buffer", SearcherScope.INTERNAL);
} catch (AlreadyClosedException e) {
failOnTragicEvent(e);
throw e;
} catch (Exception e) {
try {
failEngine("writeIndexingBuffer failed", e);
} catch (Exception inner) {
e.addSuppressed(inner);
}
throw new RefreshFailedEngineException(shardId, e);
}
refresh("write indexing buffer", SearcherScope.INTERNAL);
}

@Override
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@
package org.elasticsearch.index.mapper;

import org.apache.lucene.document.Field;
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.mapper.ParseContext.Document;
Expand All @@ -35,7 +34,6 @@ public class ParsedDocument {
private final Field version;

private final String id, type;
private final BytesRef uid;
private final SeqNoFieldMapper.SequenceIDFields seqID;

private final String routing;
Expand All @@ -62,7 +60,6 @@ public ParsedDocument(Field version,
this.seqID = seqID;
this.id = id;
this.type = type;
this.uid = Uid.createUidAsBytes(type, id);
this.routing = routing;
this.documents = documents;
this.source = source;
Expand Down Expand Up @@ -140,9 +137,7 @@ public void addDynamicMappingsUpdate(Mapping update) {

@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("Document ").append("uid[").append(uid).append("] doc [").append(documents).append("]");
return sb.toString();
return "Document uid[" + Uid.createUidAsBytes(type, id) + "] doc [" + documents + ']';
}

}
58 changes: 9 additions & 49 deletions core/src/main/java/org/elasticsearch/index/shard/IndexShard.java
Original file line number Diff line number Diff line change
Expand Up @@ -182,12 +182,6 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
private final QueryCachingPolicy cachingPolicy;
private final Supplier<Sort> indexSortSupplier;

/**
* How many bytes we are currently moving to disk, via either IndexWriter.flush or refresh. IndexingMemoryController polls this
* across all shards to decide if throttling is necessary because moving bytes to disk is falling behind vs incoming documents
* being indexed/deleted.
*/
private final AtomicLong writingBytes = new AtomicLong();
private final SearchOperationListener searchOperationListener;

protected volatile ShardRouting shardRouting;
Expand Down Expand Up @@ -323,12 +317,6 @@ public Store store() {
public Sort getIndexSort() {
return indexSortSupplier.get();
}
/**
* returns true if this shard supports indexing (i.e., write) operations.
*/
public boolean canIndex() {
return true;
}

public ShardGetService getService() {
return this.getService;
Expand Down Expand Up @@ -839,34 +827,21 @@ public Engine.GetResult get(Engine.Get get) {
*/
public void refresh(String source) {
verifyNotClosed();

if (canIndex()) {
long bytes = getEngine().getIndexBufferRAMBytesUsed();
writingBytes.addAndGet(bytes);
try {
if (logger.isTraceEnabled()) {
logger.trace("refresh with source [{}] indexBufferRAMBytesUsed [{}]", source, new ByteSizeValue(bytes));
}
getEngine().refresh(source);
} finally {
if (logger.isTraceEnabled()) {
logger.trace("remove [{}] writing bytes for shard [{}]", new ByteSizeValue(bytes), shardId());
}
writingBytes.addAndGet(-bytes);
}
} else {
if (logger.isTraceEnabled()) {
logger.trace("refresh with source [{}]", source);
}
getEngine().refresh(source);
if (logger.isTraceEnabled()) {
logger.trace("refresh with source [{}]", source);
}
getEngine().refresh(source);
}

/**
* Returns how many bytes we are currently moving from heap to disk
*/
public long getWritingBytes() {
return writingBytes.get();
Engine engine = getEngineOrNull();
if (engine == null) {
return 0;
}
return engine.getWritingBytes();
}

public RefreshStats refreshStats() {
Expand Down Expand Up @@ -1677,24 +1652,9 @@ private void handleRefreshException(Exception e) {
* Called when our shard is using too much heap and should move buffered indexed/deleted documents to disk.
*/
public void writeIndexingBuffer() {
if (canIndex() == false) {
throw new UnsupportedOperationException();
}
try {
Engine engine = getEngine();
long bytes = engine.getIndexBufferRAMBytesUsed();

// NOTE: this can be an overestimate by up to 20%, if engine uses IW.flush not refresh, because version map
// memory is low enough, but this is fine because after the writes finish, IMC will poll again and see that
// there's still up to the 20% being used and continue writing if necessary:
logger.debug("add [{}] writing bytes for shard [{}]", new ByteSizeValue(bytes), shardId());
writingBytes.addAndGet(bytes);
try {
engine.writeIndexingBuffer();
} finally {
writingBytes.addAndGet(-bytes);
logger.debug("remove [{}] writing bytes for shard [{}]", new ByteSizeValue(bytes), shardId());
}
engine.writeIndexingBuffer();
} catch (Exception e) {
handleRefreshException(e);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -152,8 +152,7 @@ ByteSizeValue indexingBufferSize() {
protected List<IndexShard> availableShards() {
List<IndexShard> availableShards = new ArrayList<>();
for (IndexShard shard : indexShards) {
// shadow replica doesn't have an indexing buffer
if (shard.canIndex() && CAN_WRITE_INDEX_BUFFER_STATES.contains(shard.state())) {
if (CAN_WRITE_INDEX_BUFFER_STATES.contains(shard.state())) {
availableShards.add(shard);
}
}
Expand Down
Loading

0 comments on commit 1166c19

Please sign in to comment.