diff --git a/hbase-assembly/pom.xml b/hbase-assembly/pom.xml
index 44f8d0a198ff..13142036f077 100644
--- a/hbase-assembly/pom.xml
+++ b/hbase-assembly/pom.xml
@@ -184,10 +184,6 @@
org.apache.hbase
hbase-compression-snappy
-
- org.apache.hbase
- hbase-compression-xz
-
org.apache.hbase
hbase-compression-zstd
diff --git a/hbase-compression/hbase-compression-xz/pom.xml b/hbase-compression/hbase-compression-xz/pom.xml
deleted file mode 100644
index b34cc1bf5928..000000000000
--- a/hbase-compression/hbase-compression-xz/pom.xml
+++ /dev/null
@@ -1,166 +0,0 @@
-
-
-
- 4.0.0
-
- org.apache.hbase
- hbase-compression
- ${revision}
- ../pom.xml
-
- hbase-compression-xz
- Apache HBase - Compression - XZ
- Pure Java compression support using XZ for Java
-
-
-
- org.apache.hbase
- hbase-common
-
-
- org.apache.hbase
- hbase-logging
- test-jar
- test
-
-
- org.apache.hbase
- hbase-common
- test-jar
- test
-
-
- org.apache.hbase
- hbase-testing-util
- test
-
-
- org.apache.hbase
- hbase-annotations
- test-jar
- test
-
-
- org.slf4j
- slf4j-api
-
-
- com.github.stephenc.findbugs
- findbugs-annotations
- compile
- true
-
-
-
- org.tukaani
- xz
-
-
-
- org.slf4j
- jcl-over-slf4j
- test
-
-
- org.slf4j
- jul-to-slf4j
- test
-
-
- org.apache.logging.log4j
- log4j-api
- test
-
-
- org.apache.logging.log4j
- log4j-core
- test
-
-
- org.apache.logging.log4j
- log4j-slf4j-impl
- test
-
-
- org.apache.logging.log4j
- log4j-1.2-api
- test
-
-
- org.hamcrest
- hamcrest-library
- test
-
-
- org.mockito
- mockito-core
- test
-
-
-
-
-
-
-
- maven-assembly-plugin
-
- true
-
-
-
- org.apache.maven.plugins
- maven-checkstyle-plugin
-
- true
-
-
-
- net.revelc.code
- warbucks-maven-plugin
-
-
-
-
-
-
- maven-surefire-plugin
-
-
- net.revelc.code
- warbucks-maven-plugin
-
-
-
-
-
- build-with-jdk11
-
- [1.11,)
-
-
-
- javax.annotation
- javax.annotation-api
-
-
-
-
-
diff --git a/hbase-compression/hbase-compression-xz/src/main/java/org/apache/hadoop/hbase/io/compress/xz/LzmaCodec.java b/hbase-compression/hbase-compression-xz/src/main/java/org/apache/hadoop/hbase/io/compress/xz/LzmaCodec.java
deleted file mode 100644
index a5d583d770c0..000000000000
--- a/hbase-compression/hbase-compression-xz/src/main/java/org/apache/hadoop/hbase/io/compress/xz/LzmaCodec.java
+++ /dev/null
@@ -1,126 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.io.compress.xz;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-import org.apache.hadoop.conf.Configurable;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.io.compress.CompressionUtil;
-import org.apache.hadoop.io.compress.BlockCompressorStream;
-import org.apache.hadoop.io.compress.BlockDecompressorStream;
-import org.apache.hadoop.io.compress.CompressionCodec;
-import org.apache.hadoop.io.compress.CompressionInputStream;
-import org.apache.hadoop.io.compress.CompressionOutputStream;
-import org.apache.hadoop.io.compress.Compressor;
-import org.apache.hadoop.io.compress.Decompressor;
-import org.apache.yetus.audience.InterfaceAudience;
-
-/**
- * Hadoop lzma codec implemented with XZ for Java.
- */
-@InterfaceAudience.Private
-public class LzmaCodec implements Configurable, CompressionCodec {
-
- public static final String LZMA_LEVEL_KEY = "hbase.io.compress.lzma.level";
- public static final int LZMA_LEVEL_DEFAULT = 6;
- public static final String LZMA_BUFFERSIZE_KEY = "hbase.io.compress.lzma.buffersize";
- public static final int LZMA_BUFFERSIZE_DEFAULT = 256 * 1024;
-
- private Configuration conf;
- private int bufferSize;
- private int level;
-
- public LzmaCodec() {
- conf = new Configuration();
- bufferSize = getBufferSize(conf);
- level = getLevel(conf);
- }
-
- @Override
- public Configuration getConf() {
- return conf;
- }
-
- @Override
- public void setConf(Configuration conf) {
- this.conf = conf;
- this.bufferSize = getBufferSize(conf);
- this.level = getLevel(conf);
- }
-
- @Override
- public Compressor createCompressor() {
- return new LzmaCompressor(level, bufferSize);
- }
-
- @Override
- public Decompressor createDecompressor() {
- return new LzmaDecompressor(bufferSize);
- }
-
- @Override
- public CompressionInputStream createInputStream(InputStream in) throws IOException {
- return createInputStream(in, createDecompressor());
- }
-
- @Override
- public CompressionInputStream createInputStream(InputStream in, Decompressor d)
- throws IOException {
- return new BlockDecompressorStream(in, d, bufferSize);
- }
-
- @Override
- public CompressionOutputStream createOutputStream(OutputStream out) throws IOException {
- return createOutputStream(out, createCompressor());
- }
-
- @Override
- public CompressionOutputStream createOutputStream(OutputStream out, Compressor c)
- throws IOException {
- return new BlockCompressorStream(out, c, bufferSize,
- CompressionUtil.compressionOverhead(bufferSize));
- }
-
- @Override
- public Class extends Compressor> getCompressorType() {
- return LzmaCompressor.class;
- }
-
- @Override
- public Class extends Decompressor> getDecompressorType() {
- return LzmaDecompressor.class;
- }
-
- @Override
- public String getDefaultExtension() {
- return ".lzma";
- }
-
- // Package private
-
- static int getLevel(Configuration conf) {
- return conf.getInt(LZMA_LEVEL_KEY, LZMA_LEVEL_DEFAULT);
- }
-
- static int getBufferSize(Configuration conf) {
- return conf.getInt(LZMA_BUFFERSIZE_KEY, LZMA_BUFFERSIZE_DEFAULT);
- }
-
-}
diff --git a/hbase-compression/hbase-compression-xz/src/main/java/org/apache/hadoop/hbase/io/compress/xz/LzmaCompressor.java b/hbase-compression/hbase-compression-xz/src/main/java/org/apache/hadoop/hbase/io/compress/xz/LzmaCompressor.java
deleted file mode 100644
index 88d0f0d0b1cf..000000000000
--- a/hbase-compression/hbase-compression-xz/src/main/java/org/apache/hadoop/hbase/io/compress/xz/LzmaCompressor.java
+++ /dev/null
@@ -1,223 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.io.compress.xz;
-
-import java.io.IOException;
-import java.nio.BufferOverflowException;
-import java.nio.ByteBuffer;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.io.ByteBufferOutputStream;
-import org.apache.hadoop.hbase.io.compress.CompressionUtil;
-import org.apache.hadoop.io.compress.Compressor;
-import org.apache.yetus.audience.InterfaceAudience;
-import org.tukaani.xz.ArrayCache;
-import org.tukaani.xz.BasicArrayCache;
-import org.tukaani.xz.LZMA2Options;
-import org.tukaani.xz.LZMAOutputStream;
-import org.tukaani.xz.UnsupportedOptionsException;
-
-/**
- * Hadoop compressor glue for XZ for Java.
- */
-@InterfaceAudience.Private
-public class LzmaCompressor implements Compressor {
-
- protected static final ArrayCache ARRAY_CACHE = new BasicArrayCache();
- protected ByteBuffer inBuf;
- protected ByteBuffer outBuf;
- protected int bufferSize;
- protected boolean finish, finished;
- protected long bytesRead, bytesWritten;
- protected LZMA2Options lzOptions;
-
- LzmaCompressor(int level, int bufferSize) {
- this.bufferSize = bufferSize;
- this.inBuf = ByteBuffer.allocate(bufferSize);
- this.outBuf = ByteBuffer.allocate(bufferSize);
- this.outBuf.position(bufferSize);
- this.lzOptions = new LZMA2Options();
- try {
- this.lzOptions.setPreset(level);
- } catch (UnsupportedOptionsException e) {
- throw new RuntimeException(e);
- }
- }
-
- @Override
- public int compress(byte[] b, int off, int len) throws IOException {
- // If we have previously compressed our input and still have some buffered bytes
- // remaining, provide them to the caller.
- if (outBuf.hasRemaining()) {
- int remaining = outBuf.remaining(), n = Math.min(remaining, len);
- outBuf.get(b, off, n);
- return n;
- }
- // We don't actually begin compression until our caller calls finish().
- if (finish) {
- if (inBuf.position() > 0) {
- inBuf.flip();
- int uncompressed = inBuf.remaining();
- // If we don't have enough capacity in our currently allocated output buffer,
- // allocate a new one which does.
- int needed = maxCompressedLength(uncompressed);
- // Can we decompress directly into the provided array?
- ByteBuffer writeBuffer;
- boolean direct = false;
- if (len <= needed) {
- writeBuffer = ByteBuffer.wrap(b, off, len);
- direct = true;
- } else {
- if (outBuf.capacity() < needed) {
- needed = CompressionUtil.roundInt2(needed);
- outBuf = ByteBuffer.allocate(needed);
- } else {
- outBuf.clear();
- }
- writeBuffer = outBuf;
- }
- int oldPos = writeBuffer.position();
- // This is pretty ugly. I don't see how to do it better. Stream to byte buffers back to
- // stream back to byte buffers... if only XZ for Java had a public block compression
- // API. It does not. Fortunately the algorithm is so slow, especially at higher levels,
- // that inefficiencies here may not matter.
- try (ByteBufferOutputStream lowerOut = new ByteBufferOutputStream(writeBuffer) {
- @Override
- // ByteBufferOutputStream will reallocate the output buffer if it is too small. We
- // do not want that behavior here.
- protected void checkSizeAndGrow(int extra) {
- long capacityNeeded = curBuf.position() + (long) extra;
- if (capacityNeeded > curBuf.limit()) {
- throw new BufferOverflowException();
- }
- }
- }) {
- try (LZMAOutputStream out =
- new LZMAOutputStream(lowerOut, lzOptions, uncompressed, ARRAY_CACHE)) {
- out.write(inBuf.array(), inBuf.arrayOffset(), uncompressed);
- }
- }
- int written = writeBuffer.position() - oldPos;
- bytesWritten += written;
- inBuf.clear();
- finished = true;
- outBuf.flip();
- if (!direct) {
- int n = Math.min(written, len);
- outBuf.get(b, off, n);
- return n;
- } else {
- return written;
- }
- } else {
- finished = true;
- }
- }
- return 0;
- }
-
- @Override
- public void end() {
- }
-
- @Override
- public void finish() {
- finish = true;
- }
-
- @Override
- public boolean finished() {
- return finished && !outBuf.hasRemaining();
- }
-
- @Override
- public long getBytesRead() {
- return bytesRead;
- }
-
- @Override
- public long getBytesWritten() {
- return bytesWritten;
- }
-
- @Override
- public boolean needsInput() {
- return !finished();
- }
-
- @Override
- public void reinit(Configuration conf) {
- if (conf != null) {
- // Level might have changed
- try {
- int level = LzmaCodec.getLevel(conf);
- this.lzOptions = new LZMA2Options();
- this.lzOptions.setPreset(level);
- } catch (UnsupportedOptionsException e) {
- throw new RuntimeException(e);
- }
- // Buffer size might have changed
- int newBufferSize = LzmaCodec.getBufferSize(conf);
- if (bufferSize != newBufferSize) {
- bufferSize = newBufferSize;
- this.inBuf = ByteBuffer.allocate(bufferSize);
- this.outBuf = ByteBuffer.allocate(bufferSize);
- }
- }
- reset();
- }
-
- @Override
- public void reset() {
- inBuf.clear();
- outBuf.clear();
- outBuf.position(outBuf.capacity());
- bytesRead = 0;
- bytesWritten = 0;
- finish = false;
- finished = false;
- }
-
- @Override
- public void setDictionary(byte[] b, int off, int len) {
- throw new UnsupportedOperationException("setDictionary is not supported");
- }
-
- @Override
- public void setInput(byte[] b, int off, int len) {
- if (inBuf.remaining() < len) {
- // Get a new buffer that can accomodate the accumulated input plus the additional
- // input that would cause a buffer overflow without reallocation.
- // This condition should be fortunately rare, because it is expensive.
- int needed = CompressionUtil.roundInt2(inBuf.capacity() + len);
- ByteBuffer newBuf = ByteBuffer.allocate(needed);
- inBuf.flip();
- newBuf.put(inBuf);
- inBuf = newBuf;
- }
- inBuf.put(b, off, len);
- bytesRead += len;
- finished = false;
- }
-
- // Package private
-
- int maxCompressedLength(int len) {
- return len + CompressionUtil.compressionOverhead(len);
- }
-
-}
diff --git a/hbase-compression/hbase-compression-xz/src/main/java/org/apache/hadoop/hbase/io/compress/xz/LzmaDecompressor.java b/hbase-compression/hbase-compression-xz/src/main/java/org/apache/hadoop/hbase/io/compress/xz/LzmaDecompressor.java
deleted file mode 100644
index b1d065485b5d..000000000000
--- a/hbase-compression/hbase-compression-xz/src/main/java/org/apache/hadoop/hbase/io/compress/xz/LzmaDecompressor.java
+++ /dev/null
@@ -1,151 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.io.compress.xz;
-
-import java.io.IOException;
-import java.nio.ByteBuffer;
-import org.apache.hadoop.hbase.io.ByteBufferInputStream;
-import org.apache.hadoop.hbase.io.compress.CompressionUtil;
-import org.apache.hadoop.io.compress.Decompressor;
-import org.apache.yetus.audience.InterfaceAudience;
-import org.tukaani.xz.ArrayCache;
-import org.tukaani.xz.BasicArrayCache;
-import org.tukaani.xz.LZMAInputStream;
-
-/**
- * Hadoop decompressor glue for XZ for Java.
- */
-@InterfaceAudience.Private
-public class LzmaDecompressor implements Decompressor {
-
- protected static final ArrayCache ARRAY_CACHE = new BasicArrayCache() {
- @Override
- public byte[] getByteArray(int size, boolean fillWithZeros) {
- // Work around a bug in XZ decompression if cached byte arrays are not cleared by
- // always clearing them.
- return super.getByteArray(size, true);
- }
- };
- protected ByteBuffer inBuf, outBuf;
- protected int inLen;
- protected boolean finished;
-
- LzmaDecompressor(int bufferSize) {
- this.inBuf = ByteBuffer.allocate(bufferSize);
- this.outBuf = ByteBuffer.allocate(bufferSize);
- this.outBuf.position(bufferSize);
- }
-
- @Override
- public int decompress(byte[] b, int off, int len) throws IOException {
- if (outBuf.hasRemaining()) {
- int remaining = outBuf.remaining(), n = Math.min(remaining, len);
- outBuf.get(b, off, n);
- return n;
- }
- if (inBuf.position() > 0) {
- inBuf.flip();
- int remaining = inBuf.remaining();
- inLen -= remaining;
- // This is pretty ugly. I don't see how to do it better. Stream to byte buffers back to
- // stream back to byte buffers... if only XZ for Java had a public block compression API.
- // It does not. LZMA decompression speed is reasonably good, so inefficiency here is a
- // shame.
- // Perhaps we could look at using reflection to make package protected classes for block
- // compression in XZ for Java accessible here, that library can be expected to rarely
- // change, if at all.
- outBuf.clear();
- try (ByteBufferInputStream lowerIn = new ByteBufferInputStream(inBuf)) {
- final byte[] buf = new byte[8192];
- try (LZMAInputStream in = new LZMAInputStream(lowerIn, ARRAY_CACHE)) {
- int read;
- do {
- read = in.read(buf);
- if (read > 0) {
- outBuf.put(buf, 0, read);
- }
- } while (read > 0);
- }
- }
- int written = outBuf.position();
- outBuf.flip();
- inBuf.clear();
- int n = Math.min(written, len);
- outBuf.get(b, off, n);
- return n;
- }
- finished = true;
- return 0;
- }
-
- @Override
- public void end() {
- }
-
- @Override
- public boolean finished() {
- return finished;
- }
-
- @Override
- public int getRemaining() {
- return inLen;
- }
-
- @Override
- public boolean needsDictionary() {
- return false;
- }
-
- @Override
- public void reset() {
- inBuf.clear();
- inLen = 0;
- outBuf.clear();
- outBuf.position(outBuf.capacity());
- finished = false;
- }
-
- @Override
- public boolean needsInput() {
- return inBuf.position() == 0;
- }
-
- @Override
- public void setDictionary(byte[] b, int off, int len) {
- throw new UnsupportedOperationException("setDictionary is not supported");
- }
-
- @Override
- public void setInput(byte[] b, int off, int len) {
- if (inBuf.remaining() < len) {
- // Get a new buffer that can accomodate the accumulated input plus the additional
- // input that would cause a buffer overflow without reallocation.
- // This condition should be fortunately rare, because it is expensive.
- int needed = CompressionUtil.roundInt2(inBuf.capacity() + len);
- ByteBuffer newBuf = ByteBuffer.allocate(needed);
- inBuf.flip();
- newBuf.put(inBuf);
- inBuf = newBuf;
- }
- inBuf.put(b, off, len);
- inLen += len;
- finished = false;
- }
-
-}
diff --git a/hbase-compression/hbase-compression-xz/src/test/java/org/apache/hadoop/hbase/io/compress/xz/TestHFileCompressionLzma.java b/hbase-compression/hbase-compression-xz/src/test/java/org/apache/hadoop/hbase/io/compress/xz/TestHFileCompressionLzma.java
deleted file mode 100644
index 734740635084..000000000000
--- a/hbase-compression/hbase-compression-xz/src/test/java/org/apache/hadoop/hbase/io/compress/xz/TestHFileCompressionLzma.java
+++ /dev/null
@@ -1,76 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.io.compress.xz;
-
-import static org.junit.Assert.assertTrue;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.HBaseClassTestRule;
-import org.apache.hadoop.hbase.HBaseTestingUtil;
-import org.apache.hadoop.hbase.io.compress.Compression;
-import org.apache.hadoop.hbase.io.compress.HFileTestBase;
-import org.apache.hadoop.hbase.testclassification.IOTests;
-import org.apache.hadoop.hbase.testclassification.SmallTests;
-import org.junit.BeforeClass;
-import org.junit.ClassRule;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-@Category({ IOTests.class, SmallTests.class })
-public class TestHFileCompressionLzma extends HFileTestBase {
-
- @ClassRule
- public static final HBaseClassTestRule CLASS_RULE =
- HBaseClassTestRule.forClass(TestHFileCompressionLzma.class);
-
- private static Configuration conf;
-
- @BeforeClass
- public static void setUpBeforeClass() throws Exception {
- conf = TEST_UTIL.getConfiguration();
- conf.set(Compression.LZMA_CODEC_CLASS_KEY, LzmaCodec.class.getCanonicalName());
- Compression.Algorithm.LZMA.reload(conf);
- HFileTestBase.setUpBeforeClass();
- }
-
- @Test
- public void test() throws Exception {
- Path path =
- new Path(TEST_UTIL.getDataTestDir(), HBaseTestingUtil.getRandomUUID().toString() + ".hfile");
- doTest(conf, path, Compression.Algorithm.LZMA);
- }
-
- @Test
- public void testReconfLevels() throws Exception {
- Path path_1 = new Path(TEST_UTIL.getDataTestDir(),
- HBaseTestingUtil.getRandomUUID().toString() + ".1.hfile");
- Path path_2 = new Path(TEST_UTIL.getDataTestDir(),
- HBaseTestingUtil.getRandomUUID().toString() + ".2.hfile");
- conf.setInt(LzmaCodec.LZMA_LEVEL_KEY, 1);
- doTest(conf, path_1, Compression.Algorithm.LZMA);
- long len_1 = FS.getFileStatus(path_1).getLen();
- conf.setInt(LzmaCodec.LZMA_LEVEL_KEY, 9);
- doTest(conf, path_2, Compression.Algorithm.LZMA);
- long len_2 = FS.getFileStatus(path_2).getLen();
- LOG.info("Level 1 len {}", len_1);
- LOG.info("Level 9 len {}", len_2);
- assertTrue("Reconfiguraton with LZMA_LEVEL_KEY did not seem to work", len_1 > len_2);
- }
-
-}
diff --git a/hbase-compression/hbase-compression-xz/src/test/java/org/apache/hadoop/hbase/io/compress/xz/TestLzmaCodec.java b/hbase-compression/hbase-compression-xz/src/test/java/org/apache/hadoop/hbase/io/compress/xz/TestLzmaCodec.java
deleted file mode 100644
index e5320da16777..000000000000
--- a/hbase-compression/hbase-compression-xz/src/test/java/org/apache/hadoop/hbase/io/compress/xz/TestLzmaCodec.java
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.io.compress.xz;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HBaseClassTestRule;
-import org.apache.hadoop.hbase.io.compress.CompressionTestBase;
-import org.apache.hadoop.hbase.testclassification.SmallTests;
-import org.junit.ClassRule;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-@Category(SmallTests.class)
-public class TestLzmaCodec extends CompressionTestBase {
-
- @ClassRule
- public static final HBaseClassTestRule CLASS_RULE =
- HBaseClassTestRule.forClass(TestLzmaCodec.class);
-
- @Test
- public void testLzmaCodecSmall() throws Exception {
- codecSmallTest(new LzmaCodec());
- }
-
- @Test
- public void testLzmaCodecLarge() throws Exception {
- codecLargeTest(new LzmaCodec(), 1.1); // poor compressability
- codecLargeTest(new LzmaCodec(), 2);
- codecLargeTest(new LzmaCodec(), 10); // very high compressability
- }
-
- @Test
- public void testLzmaCodecVeryLarge() throws Exception {
- Configuration conf = new Configuration();
- // LZMA levels range from 1 to 9.
- // Level 9 might take several minutes to complete. 3 is our default. 1 will be fast.
- conf.setInt(LzmaCodec.LZMA_LEVEL_KEY, 1);
- LzmaCodec codec = new LzmaCodec();
- codec.setConf(conf);
- codecVeryLargeTest(codec, 3); // like text
- }
-
-}
diff --git a/hbase-compression/hbase-compression-xz/src/test/java/org/apache/hadoop/hbase/io/compress/xz/TestWALCompressionLzma.java b/hbase-compression/hbase-compression-xz/src/test/java/org/apache/hadoop/hbase/io/compress/xz/TestWALCompressionLzma.java
deleted file mode 100644
index aa74926cb819..000000000000
--- a/hbase-compression/hbase-compression-xz/src/test/java/org/apache/hadoop/hbase/io/compress/xz/TestWALCompressionLzma.java
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.io.compress.xz;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HBaseClassTestRule;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.io.compress.Compression;
-import org.apache.hadoop.hbase.regionserver.wal.CompressionContext;
-import org.apache.hadoop.hbase.testclassification.MediumTests;
-import org.apache.hadoop.hbase.testclassification.RegionServerTests;
-import org.apache.hadoop.hbase.wal.CompressedWALTestBase;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.ClassRule;
-import org.junit.experimental.categories.Category;
-
-@Category({ RegionServerTests.class, MediumTests.class })
-public class TestWALCompressionLzma extends CompressedWALTestBase {
-
- @ClassRule
- public static final HBaseClassTestRule CLASS_RULE =
- HBaseClassTestRule.forClass(TestWALCompressionLzma.class);
-
- @BeforeClass
- public static void setUpBeforeClass() throws Exception {
- Configuration conf = TEST_UTIL.getConfiguration();
- conf.set(Compression.LZMA_CODEC_CLASS_KEY, LzmaCodec.class.getCanonicalName());
- Compression.Algorithm.LZMA.reload(conf);
- conf.setBoolean(HConstants.ENABLE_WAL_COMPRESSION, true);
- conf.setBoolean(CompressionContext.ENABLE_WAL_VALUE_COMPRESSION, true);
- conf.set(CompressionContext.WAL_VALUE_COMPRESSION_TYPE, Compression.Algorithm.LZMA.getName());
- TEST_UTIL.startMiniDFSCluster(3);
- }
-
- @AfterClass
- public static void tearDown() throws Exception {
- TEST_UTIL.shutdownMiniCluster();
- }
-
-}
diff --git a/hbase-compression/pom.xml b/hbase-compression/pom.xml
index 4b99b3dd34b9..c2e4633b3987 100644
--- a/hbase-compression/pom.xml
+++ b/hbase-compression/pom.xml
@@ -36,7 +36,6 @@
hbase-compression-brotli
hbase-compression-lz4
hbase-compression-snappy
- hbase-compression-xz
hbase-compression-zstd
diff --git a/pom.xml b/pom.xml
index 6cecebd50af9..fc76a7ff0f56 100644
--- a/pom.xml
+++ b/pom.xml
@@ -917,7 +917,6 @@
1.11.0
1.8.0
1.1.10.4
- 1.9
1.5.5-2