Skip to content

Commit

Permalink
More Efficient Blobstore Metdata IO (elastic#55777)
Browse files Browse the repository at this point in the history
No need to copy all these bytes multiple times, especially not when
writing a multiple MB global cluster state snapshot through this method.
  • Loading branch information
original-brownbear authored Apr 27, 2020
1 parent 9a91b06 commit eac3e9a
Show file tree
Hide file tree
Showing 3 changed files with 25 additions and 23 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@
import org.elasticsearch.common.io.stream.StreamOutput;

import java.io.IOException;
import java.io.OutputStream;

public interface Compressor {

Expand All @@ -37,5 +38,5 @@ public interface Compressor {
* Creates a new stream output that compresses the contents and writes to the provided stream
* output. Closing the returned {@link StreamOutput} will close the provided stream output.
*/
StreamOutput streamOutput(StreamOutput out) throws IOException;
StreamOutput streamOutput(OutputStream out) throws IOException;
}
Original file line number Diff line number Diff line change
Expand Up @@ -107,8 +107,8 @@ public void close() throws IOException {
}

@Override
public StreamOutput streamOutput(StreamOutput out) throws IOException {
out.writeBytes(HEADER);
public StreamOutput streamOutput(OutputStream out) throws IOException {
out.write(HEADER);
final boolean nowrap = true;
final Deflater deflater = new Deflater(LEVEL, nowrap);
final boolean syncFlush = true;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,12 +22,15 @@
import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.IndexFormatTooNewException;
import org.apache.lucene.index.IndexFormatTooOldException;
import org.apache.lucene.store.ByteBuffersDataInput;
import org.apache.lucene.store.ByteBuffersIndexInput;
import org.apache.lucene.store.IndexInput;
import org.apache.lucene.store.OutputStreamIndexOutput;
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.cluster.metadata.Metadata;
import org.elasticsearch.common.CheckedConsumer;
import org.elasticsearch.common.CheckedFunction;
import org.elasticsearch.common.blobstore.BlobContainer;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.compress.CompressorFactory;
import org.elasticsearch.common.io.Streams;
Expand All @@ -46,10 +49,10 @@
import org.elasticsearch.gateway.CorruptStateException;
import org.elasticsearch.snapshots.SnapshotInfo;

import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.util.Arrays;
import java.util.HashMap;
import java.util.Locale;
import java.util.Map;
Expand Down Expand Up @@ -127,8 +130,10 @@ public String blobName(String name) {
public T readBlob(BlobContainer blobContainer, String blobName) throws IOException {
final BytesReference bytes = Streams.readFully(blobContainer.readBlob(blobName));
final String resourceDesc = "ChecksumBlobStoreFormat.readBlob(blob=\"" + blobName + "\")";
try (ByteArrayIndexInput indexInput =
new ByteArrayIndexInput(resourceDesc, BytesReference.toBytes(bytes))) {
try {
final IndexInput indexInput = bytes.length() > 0 ? new ByteBuffersIndexInput(
new ByteBuffersDataInput(Arrays.asList(BytesReference.toByteBuffers(bytes))), resourceDesc)
: new ByteArrayIndexInput(resourceDesc, BytesRef.EMPTY_BYTES);
CodecUtil.checksumEntireFile(indexInput);
CodecUtil.checkHeader(indexInput, codec, VERSION, VERSION);
long filePointer = indexInput.getFilePointer();
Expand Down Expand Up @@ -182,19 +187,9 @@ public void write(T obj, BlobContainer blobContainer, String name, boolean failI
});
}

private void writeTo(final T obj, final String blobName, final CheckedConsumer<BytesArray, IOException> consumer) throws IOException {
final BytesReference bytes;
try (BytesStreamOutput bytesStreamOutput = new BytesStreamOutput()) {
if (compress) {
try (StreamOutput compressedStreamOutput = CompressorFactory.COMPRESSOR.streamOutput(bytesStreamOutput)) {
write(obj, compressedStreamOutput);
}
} else {
write(obj, bytesStreamOutput);
}
bytes = bytesStreamOutput.bytes();
}
try (ByteArrayOutputStream outputStream = new ByteArrayOutputStream()) {
private void writeTo(final T obj, final String blobName,
final CheckedConsumer<BytesReference, IOException> consumer) throws IOException {
try (BytesStreamOutput outputStream = new BytesStreamOutput()) {
final String resourceDesc = "ChecksumBlobStoreFormat.writeBlob(blob=\"" + blobName + "\")";
try (OutputStreamIndexOutput indexOutput = new OutputStreamIndexOutput(resourceDesc, blobName, outputStream, BUFFER_SIZE)) {
CodecUtil.writeHeader(indexOutput, codec, VERSION);
Expand All @@ -205,15 +200,21 @@ public void close() {
// in order to write the footer we need to prevent closing the actual index input.
}
}) {
bytes.writeTo(indexOutputOutputStream);
if (compress) {
try (StreamOutput compressedStreamOutput = CompressorFactory.COMPRESSOR.streamOutput(indexOutputOutputStream)) {
write(obj, compressedStreamOutput);
}
} else {
write(obj, indexOutputOutputStream);
}
}
CodecUtil.writeFooter(indexOutput);
}
consumer.accept(new BytesArray(outputStream.toByteArray()));
consumer.accept(outputStream.bytes());
}
}

private void write(T obj, StreamOutput streamOutput) throws IOException {
private void write(T obj, OutputStream streamOutput) throws IOException {
try (XContentBuilder builder = XContentFactory.contentBuilder(XContentType.SMILE, streamOutput)) {
builder.startObject();
obj.toXContent(builder, SNAPSHOT_ONLY_FORMAT_PARAMS);
Expand Down

0 comments on commit eac3e9a

Please sign in to comment.