Skip to content

Commit

Permalink
Internal: Fix a very rare case of corruption in compression used for
Browse files Browse the repository at this point in the history
internal cluster communication.

See CorruptedCompressorTests for details on how this bug can be hit.
This change also removes the ability to use the unsafe variant of
ChunkedEncoder, removing support for the compress.lzf.decoder setting.
  • Loading branch information
rjernst committed Aug 11, 2014
1 parent 9d82840 commit eb2ddea
Show file tree
Hide file tree
Showing 4 changed files with 75 additions and 21 deletions.
2 changes: 1 addition & 1 deletion pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -253,7 +253,7 @@
<dependency>
<groupId>com.ning</groupId>
<artifactId>compress-lzf</artifactId>
<version>0.9.6</version>
<version>1.0.2</version>
<scope>compile</scope>
</dependency>

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@
import com.ning.compress.BufferRecycler;
import com.ning.compress.lzf.ChunkEncoder;
import com.ning.compress.lzf.LZFChunk;
import com.ning.compress.lzf.util.ChunkEncoderFactory;
import org.elasticsearch.common.compress.CompressedStreamOutput;
import org.elasticsearch.common.io.stream.StreamOutput;

Expand All @@ -39,7 +40,7 @@ public LZFCompressedStreamOutput(StreamOutput out) throws IOException {
this.recycler = BufferRecycler.instance();
this.uncompressed = this.recycler.allocOutputBuffer(LZFChunk.MAX_CHUNK_LEN);
this.uncompressedLength = LZFChunk.MAX_CHUNK_LEN;
this.encoder = new ChunkEncoder(LZFChunk.MAX_CHUNK_LEN);
this.encoder = ChunkEncoderFactory.safeInstance();
}

@Override
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -49,11 +49,7 @@ public class LZFCompressor implements Compressor {
private ChunkDecoder decoder;

public LZFCompressor() {
if (Constants.SUN_OS) {
this.decoder = ChunkDecoderFactory.safeInstance();
} else {
this.decoder = ChunkDecoderFactory.optimalInstance();
}
this.decoder = ChunkDecoderFactory.safeInstance();
Loggers.getLogger(LZFCompressor.class).debug("using [{}] decoder", this.decoder.getClass().getSimpleName());
}

Expand All @@ -63,20 +59,7 @@ public String type() {
}

@Override
public void configure(Settings settings) {
String decoderType = settings.get("compress.lzf.decoder", null);
if (decoderType != null) {
if ("optimal".equalsIgnoreCase(decoderType)) {
this.decoder = ChunkDecoderFactory.optimalInstance();
Loggers.getLogger(LZFCompressor.class).debug("using [{}] decoder", this.decoder.getClass().getSimpleName());
} else if ("safe".equalsIgnoreCase(decoderType)) {
this.decoder = ChunkDecoderFactory.safeInstance();
Loggers.getLogger(LZFCompressor.class).debug("using [{}] decoder", this.decoder.getClass().getSimpleName());
} else {
Loggers.getLogger(LZFCompressor.class).warn("decoder type not recognized [{}], still using [{}]", decoderType, this.decoder.getClass().getSimpleName());
}
}
}
public void configure(Settings settings) {}

@Override
public boolean isCompressed(BytesReference bytes) {
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,70 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/

package org.elasticsearch.common.compress.lzf;

import com.ning.compress.lzf.ChunkDecoder;
import com.ning.compress.lzf.ChunkEncoder;
import com.ning.compress.lzf.LZFChunk;
import com.ning.compress.lzf.util.ChunkDecoderFactory;
import com.ning.compress.lzf.util.ChunkEncoderFactory;
import org.elasticsearch.test.ElasticsearchTestCase;

import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.util.Arrays;

/**
* Test an extremely rare corruption produced by the pure java impl of ChunkEncoder.
*/
public class CorruptedCompressorTests extends ElasticsearchTestCase {

public void testCorruption() throws IOException {
// this test generates a hash collision: [0,1,153,64] hashes the same as [1,153,64,64]
// and then leverages the bug s/inPos/0/ to corrupt the array
// the first array is used to insert a reference from this hash to offset 6
// and then the hash table is reused and still thinks that there is such a hash at position 6
// and at position 7, it finds a sequence with the same hash
// so it inserts a buggy reference
byte[] b1 = new byte[] {0,1,2,3,4,(byte)153,64,64,64,9,9,9,9,9,9,9,9,9,9};
byte[] b2 = new byte[] {1,(byte)153,0,0,0,0,(byte)153,64,64,64,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
ChunkEncoder encoder = ChunkEncoderFactory.safeInstance();
ChunkDecoder decoder = ChunkDecoderFactory.safeInstance();
check(encoder, decoder, b1, 0, b1.length);
final int off = 6;
check(encoder, decoder, b2, off, b2.length - off);
}

private void check(ChunkEncoder encoder, ChunkDecoder decoder, byte[] bytes, int offset, int length) throws IOException {
ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
byte[] expected = new byte[length];
byte[] buffer = new byte[LZFChunk.MAX_CHUNK_LEN];
byte[] output = new byte[length];
System.arraycopy(bytes, offset, expected, 0, length);
encoder.encodeAndWriteChunk(bytes, offset, length, outputStream);
System.out.println(Arrays.toString(Arrays.copyOf(outputStream.toByteArray(), 20)));
InputStream inputStream = new ByteArrayInputStream(outputStream.toByteArray());
assertEquals(decoder.decodeChunk(inputStream, buffer, output), length);

System.out.println(Arrays.toString(Arrays.copyOf(output, 20)));
assertArrayEquals(expected, output);
}
}

0 comments on commit eb2ddea

Please sign in to comment.