From ab0039974dc9d92935e1e6f80a288f03287815fe Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Mon, 3 Feb 2020 14:15:57 +0800 Subject: [PATCH] HBASE-23782 We still reference the hard coded meta descriptor in some places when listing table descriptors (#1115) Signed-off-by: Viraj Jasani --- .../hadoop/hbase/util/FSTableDescriptors.java | 78 ++++++++----------- .../hadoop/hbase/TestHBaseMetaEdit.java | 31 ++++++-- 2 files changed, 55 insertions(+), 54 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java index 1804381c48e3..031d6090c04d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hbase.util; +import edu.umd.cs.findbugs.annotations.Nullable; import java.io.FileNotFoundException; import java.io.IOException; import java.util.Comparator; @@ -27,8 +28,6 @@ import java.util.function.Function; import java.util.regex.Matcher; import java.util.regex.Pattern; - -import edu.umd.cs.findbugs.annotations.Nullable; import org.apache.commons.lang3.NotImplementedException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; @@ -37,23 +36,24 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; +import org.apache.hadoop.hbase.Coprocessor; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.TableDescriptors; +import org.apache.hadoop.hbase.TableInfoMissingException; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; import org.apache.hadoop.hbase.client.CoprocessorDescriptorBuilder; +import org.apache.hadoop.hbase.client.TableDescriptor; +import org.apache.hadoop.hbase.client.TableDescriptorBuilder; import org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint; +import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.regionserver.BloomType; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; -import org.apache.hadoop.hbase.client.TableDescriptor; -import org.apache.hadoop.hbase.client.TableDescriptorBuilder; -import org.apache.hadoop.hbase.Coprocessor; -import org.apache.hadoop.hbase.exceptions.DeserializationException; -import org.apache.hadoop.hbase.HConstants; + import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hbase.thirdparty.com.google.common.primitives.Ints; -import org.apache.hadoop.hbase.TableDescriptors; -import org.apache.hadoop.hbase.TableInfoMissingException; -import org.apache.hadoop.hbase.TableName; /** * Implementation of {@link TableDescriptors} that reads descriptors from the @@ -99,11 +99,6 @@ public class FSTableDescriptors implements TableDescriptors { // TODO. private final Map cache = new ConcurrentHashMap<>(); - /** - * Table descriptor for hbase:meta catalog table - */ - private final TableDescriptor metaTableDescriptor; - /** * Construct a FSTableDescriptors instance using the hbase root dir of the given * conf and the filesystem where that root dir lives. @@ -135,33 +130,30 @@ public FSTableDescriptors(final Configuration conf, final FileSystem fs, * see HMaster#finishActiveMasterInitialization * TODO: This is a workaround. Should remove this ugly code... */ - public FSTableDescriptors(final Configuration conf, final FileSystem fs, - final Path rootdir, final boolean fsreadonly, final boolean usecache, - Function metaObserver) throws IOException { + public FSTableDescriptors(final Configuration conf, final FileSystem fs, final Path rootdir, + final boolean fsreadonly, final boolean usecache, + Function metaObserver) throws IOException { this.fs = fs; this.rootdir = rootdir; this.fsreadonly = fsreadonly; this.usecache = usecache; - TableDescriptor td = null; - try { - td = getTableDescriptorFromFs(fs, rootdir, TableName.META_TABLE_NAME); - } catch (TableInfoMissingException e) { - td = metaObserver == null? createMetaTableDescriptor(conf): - metaObserver.apply(createMetaTableDescriptorBuilder(conf)).build(); - if (!fsreadonly) { + if (!fsreadonly) { + // see if we already have meta descriptor on fs. Write one if not. + try { + getTableDescriptorFromFs(fs, rootdir, TableName.META_TABLE_NAME); + } catch (TableInfoMissingException e) { + TableDescriptorBuilder builder = createMetaTableDescriptorBuilder(conf); + if (metaObserver != null) { + metaObserver.apply(builder); + } + TableDescriptor td = builder.build(); LOG.info("Creating new hbase:meta table default descriptor/schema {}", td); updateTableDescriptor(td); } } - this.metaTableDescriptor = td; } - /** - * - * Make this private as soon as we've undone test dependency. - */ - @VisibleForTesting - public static TableDescriptorBuilder createMetaTableDescriptorBuilder(final Configuration conf) + private static TableDescriptorBuilder createMetaTableDescriptorBuilder(final Configuration conf) throws IOException { // TODO We used to set CacheDataInL1 for META table. When we have BucketCache in file mode, now // the META table data goes to File mode BC only. Test how that affect the system. If too much, @@ -210,12 +202,6 @@ public static TableDescriptorBuilder createMetaTableDescriptorBuilder(final Conf .setPriority(Coprocessor.PRIORITY_SYSTEM).build()); } - @VisibleForTesting - public static TableDescriptor createMetaTableDescriptor(final Configuration conf) - throws IOException { - return createMetaTableDescriptorBuilder(conf).build(); - } - @Override public void setCacheOn() throws IOException { this.cache.clear(); @@ -276,16 +262,12 @@ public TableDescriptor get(final TableName tablename) * Returns a map from table name to table descriptor for all tables. */ @Override - public Map getAll() - throws IOException { + public Map getAll() throws IOException { Map tds = new TreeMap<>(); - if (fsvisited && usecache) { for (Map.Entry entry: this.cache.entrySet()) { tds.put(entry.getKey().getNameWithNamespaceInclAsString(), entry.getValue()); } - // add hbase:meta to the response - tds.put(this.metaTableDescriptor.getTableName().getNameAsString(), metaTableDescriptor); } else { LOG.trace("Fetching table descriptors from the filesystem."); boolean allvisited = true; @@ -568,15 +550,17 @@ private static TableDescriptor readTableDescriptor(FileSystem fs, FileStatus sta * @throws IOException Thrown if failed update. * @throws NotImplementedException if in read only mode */ - @VisibleForTesting Path updateTableDescriptor(TableDescriptor td) - throws IOException { + @VisibleForTesting + Path updateTableDescriptor(TableDescriptor td) throws IOException { if (fsreadonly) { throw new NotImplementedException("Cannot update a table descriptor - in read only mode"); } TableName tableName = td.getTableName(); Path tableDir = getTableDir(tableName); Path p = writeTableDescriptor(fs, td, tableDir, getTableInfoPath(tableDir)); - if (p == null) throw new IOException("Failed update"); + if (p == null) { + throw new IOException("Failed update"); + } LOG.info("Updated tableinfo=" + p); if (usecache) { this.cache.put(td.getTableName(), td); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHBaseMetaEdit.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHBaseMetaEdit.java index bbdb327ed047..6977452724d3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHBaseMetaEdit.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHBaseMetaEdit.java @@ -22,7 +22,7 @@ import static org.junit.Assert.fail; import java.io.IOException; - +import java.util.Collections; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; @@ -30,7 +30,7 @@ import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; import org.apache.hadoop.hbase.regionserver.Region; -import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.MiscTests; import org.apache.hadoop.hbase.util.Bytes; import org.junit.After; @@ -44,7 +44,7 @@ /** * Test being able to edit hbase:meta. */ -@Category({MiscTests.class, LargeTests.class}) +@Category({ MiscTests.class, MediumTests.class }) public class TestHBaseMetaEdit { @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -63,6 +63,23 @@ public void after() throws Exception { UTIL.shutdownMiniCluster(); } + // make sure that with every possible way, we get the same meta table descriptor. + private TableDescriptor getMetaDescriptor() throws TableNotFoundException, IOException { + Admin admin = UTIL.getAdmin(); + TableDescriptor get = admin.getDescriptor(TableName.META_TABLE_NAME); + TableDescriptor list = + admin.listTableDescriptors(true).stream().filter(td -> td.isMetaTable()).findAny().get(); + TableDescriptor listByName = + admin.listTableDescriptors(Collections.singletonList(TableName.META_TABLE_NAME)).get(0); + TableDescriptor listByNs = + admin.listTableDescriptorsByNamespace(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME).stream() + .filter(td -> td.isMetaTable()).findAny().get(); + assertEquals(get, list); + assertEquals(get, listByName); + assertEquals(get, listByNs); + return get; + } + /** * Set versions, set HBASE-16213 indexed block encoding, and add a column family. * Delete the column family. Then try to delete a core hbase:meta family (should fail). @@ -73,7 +90,7 @@ public void after() throws Exception { public void testEditMeta() throws IOException { Admin admin = UTIL.getAdmin(); admin.tableExists(TableName.META_TABLE_NAME); - TableDescriptor originalDescriptor = admin.getDescriptor(TableName.META_TABLE_NAME); + TableDescriptor originalDescriptor = getMetaDescriptor(); ColumnFamilyDescriptor cfd = originalDescriptor.getColumnFamily(HConstants.CATALOG_FAMILY); int oldVersions = cfd.getMaxVersions(); // Add '1' to current versions count. Set encoding too. @@ -85,11 +102,11 @@ public void testEditMeta() throws IOException { ColumnFamilyDescriptor newCfd = ColumnFamilyDescriptorBuilder.newBuilder(extraColumnFamilyName).build(); admin.addColumnFamily(TableName.META_TABLE_NAME, newCfd); - TableDescriptor descriptor = admin.getDescriptor(TableName.META_TABLE_NAME); + TableDescriptor descriptor = getMetaDescriptor(); // Assert new max versions is == old versions plus 1. assertEquals(oldVersions + 1, descriptor.getColumnFamily(HConstants.CATALOG_FAMILY).getMaxVersions()); - descriptor = admin.getDescriptor(TableName.META_TABLE_NAME); + descriptor = getMetaDescriptor(); // Assert new max versions is == old versions plus 1. assertEquals(oldVersions + 1, descriptor.getColumnFamily(HConstants.CATALOG_FAMILY).getMaxVersions()); @@ -107,7 +124,7 @@ public void testEditMeta() throws IOException { assertTrue(r.getStore(extraColumnFamilyName) != null); // Assert we can't drop critical hbase:meta column family but we can drop any other. admin.deleteColumnFamily(TableName.META_TABLE_NAME, newCfd.getName()); - descriptor = admin.getDescriptor(TableName.META_TABLE_NAME); + descriptor = getMetaDescriptor(); assertTrue(descriptor.getColumnFamily(newCfd.getName()) == null); try { admin.deleteColumnFamily(TableName.META_TABLE_NAME, HConstants.CATALOG_FAMILY);