From 4b71eb0a8bc43cd582b110852ad8a91e872b47ea Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Sun, 16 Jan 2022 23:21:25 +0800 Subject: [PATCH] HBASE-26675 Data race on Compactor.writer --- .../regionserver/compactions/Compactor.java | 20 ++++++++++--------- .../compactions/DefaultCompactor.java | 12 +++++------ 2 files changed, 16 insertions(+), 16 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java index 0ee7d349e4c5..93f0555b7f4d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java @@ -96,7 +96,10 @@ public abstract class Compactor { private final boolean dropCacheMajor; private final boolean dropCacheMinor; - protected T writer = null; + // In compaction process only a single thread will access and write to this field, and + // getCompactionTargets is the only place we will access it other than the compaction thread, so + // make it volatile. + protected volatile T writer = null; //TODO: depending on Store is not good but, realistically, all compactors currently do. Compactor(Configuration conf, HStore store) { @@ -547,17 +550,16 @@ protected InternalScanner createScanner(HStore store, ScanInfo scanInfo, dropDeletesFromRow, dropDeletesToRow); } - public List getCompactionTargets(){ - if (writer == null){ + public List getCompactionTargets() { + T writer = this.writer; + if (writer == null) { return Collections.emptyList(); } - synchronized (writer){ - if (writer instanceof StoreFileWriter){ - return Arrays.asList(((StoreFileWriter)writer).getPath()); - } - return ((AbstractMultiFileWriter)writer).writers().stream().map(sfw -> sfw.getPath()).collect( - Collectors.toList()); + if (writer instanceof StoreFileWriter) { + return Arrays.asList(((StoreFileWriter) writer).getPath()); } + return ((AbstractMultiFileWriter) writer).writers().stream().map(sfw -> sfw.getPath()) + .collect(Collectors.toList()); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DefaultCompactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DefaultCompactor.java index ad2384a97ab8..03e3a1b5f394 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DefaultCompactor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DefaultCompactor.java @@ -74,24 +74,22 @@ protected List commitWriter(FileDetails fd, @Override protected void abortWriter() throws IOException { abortWriter(writer); + // this step signals that the target file is no longer written and can be cleaned up + writer = null; } - protected void abortWriter(StoreFileWriter writer) throws IOException { + protected final void abortWriter(StoreFileWriter writer) throws IOException { Path leftoverFile = writer.getPath(); try { writer.close(); } catch (IOException e) { LOG.warn("Failed to close the writer after an unfinished compaction.", e); - } finally { - //this step signals that the target file is no longer writen and can be cleaned up - writer = null; } try { store.getFileSystem().delete(leftoverFile, false); } catch (IOException e) { - LOG.warn( - "Failed to delete the leftover file " + leftoverFile + " after an unfinished compaction.", - e); + LOG.warn("Failed to delete the leftover file {} after an unfinished compaction.", + leftoverFile, e); } } }