Skip to content

Commit

Permalink
HBASE-23782 We still reference the hard coded meta descriptor in some…
Browse files Browse the repository at this point in the history
… places when listing table descriptors (#1115)

Signed-off-by: Viraj Jasani <[email protected]>
  • Loading branch information
Apache9 authored Feb 3, 2020
1 parent 12f1c7c commit ab00399
Show file tree
Hide file tree
Showing 2 changed files with 55 additions and 54 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
*/
package org.apache.hadoop.hbase.util;

import edu.umd.cs.findbugs.annotations.Nullable;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.Comparator;
Expand All @@ -27,8 +28,6 @@
import java.util.function.Function;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

import edu.umd.cs.findbugs.annotations.Nullable;
import org.apache.commons.lang3.NotImplementedException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
Expand All @@ -37,23 +36,24 @@
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.hbase.Coprocessor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.TableDescriptors;
import org.apache.hadoop.hbase.TableInfoMissingException;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
import org.apache.hadoop.hbase.client.CoprocessorDescriptorBuilder;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint;
import org.apache.hadoop.hbase.exceptions.DeserializationException;
import org.apache.hadoop.hbase.regionserver.BloomType;
import org.apache.yetus.audience.InterfaceAudience;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.Coprocessor;
import org.apache.hadoop.hbase.exceptions.DeserializationException;
import org.apache.hadoop.hbase.HConstants;

import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
import org.apache.hbase.thirdparty.com.google.common.primitives.Ints;
import org.apache.hadoop.hbase.TableDescriptors;
import org.apache.hadoop.hbase.TableInfoMissingException;
import org.apache.hadoop.hbase.TableName;

/**
* Implementation of {@link TableDescriptors} that reads descriptors from the
Expand Down Expand Up @@ -99,11 +99,6 @@ public class FSTableDescriptors implements TableDescriptors {
// TODO.
private final Map<TableName, TableDescriptor> cache = new ConcurrentHashMap<>();

/**
* Table descriptor for <code>hbase:meta</code> catalog table
*/
private final TableDescriptor metaTableDescriptor;

/**
* Construct a FSTableDescriptors instance using the hbase root dir of the given
* conf and the filesystem where that root dir lives.
Expand Down Expand Up @@ -135,33 +130,30 @@ public FSTableDescriptors(final Configuration conf, final FileSystem fs,
* see HMaster#finishActiveMasterInitialization
* TODO: This is a workaround. Should remove this ugly code...
*/
public FSTableDescriptors(final Configuration conf, final FileSystem fs,
final Path rootdir, final boolean fsreadonly, final boolean usecache,
Function<TableDescriptorBuilder, TableDescriptorBuilder> metaObserver) throws IOException {
public FSTableDescriptors(final Configuration conf, final FileSystem fs, final Path rootdir,
final boolean fsreadonly, final boolean usecache,
Function<TableDescriptorBuilder, TableDescriptorBuilder> metaObserver) throws IOException {
this.fs = fs;
this.rootdir = rootdir;
this.fsreadonly = fsreadonly;
this.usecache = usecache;
TableDescriptor td = null;
try {
td = getTableDescriptorFromFs(fs, rootdir, TableName.META_TABLE_NAME);
} catch (TableInfoMissingException e) {
td = metaObserver == null? createMetaTableDescriptor(conf):
metaObserver.apply(createMetaTableDescriptorBuilder(conf)).build();
if (!fsreadonly) {
if (!fsreadonly) {
// see if we already have meta descriptor on fs. Write one if not.
try {
getTableDescriptorFromFs(fs, rootdir, TableName.META_TABLE_NAME);
} catch (TableInfoMissingException e) {
TableDescriptorBuilder builder = createMetaTableDescriptorBuilder(conf);
if (metaObserver != null) {
metaObserver.apply(builder);
}
TableDescriptor td = builder.build();
LOG.info("Creating new hbase:meta table default descriptor/schema {}", td);
updateTableDescriptor(td);
}
}
this.metaTableDescriptor = td;
}

/**
*
* Make this private as soon as we've undone test dependency.
*/
@VisibleForTesting
public static TableDescriptorBuilder createMetaTableDescriptorBuilder(final Configuration conf)
private static TableDescriptorBuilder createMetaTableDescriptorBuilder(final Configuration conf)
throws IOException {
// TODO We used to set CacheDataInL1 for META table. When we have BucketCache in file mode, now
// the META table data goes to File mode BC only. Test how that affect the system. If too much,
Expand Down Expand Up @@ -210,12 +202,6 @@ public static TableDescriptorBuilder createMetaTableDescriptorBuilder(final Conf
.setPriority(Coprocessor.PRIORITY_SYSTEM).build());
}

@VisibleForTesting
public static TableDescriptor createMetaTableDescriptor(final Configuration conf)
throws IOException {
return createMetaTableDescriptorBuilder(conf).build();
}

@Override
public void setCacheOn() throws IOException {
this.cache.clear();
Expand Down Expand Up @@ -276,16 +262,12 @@ public TableDescriptor get(final TableName tablename)
* Returns a map from table name to table descriptor for all tables.
*/
@Override
public Map<String, TableDescriptor> getAll()
throws IOException {
public Map<String, TableDescriptor> getAll() throws IOException {
Map<String, TableDescriptor> tds = new TreeMap<>();

if (fsvisited && usecache) {
for (Map.Entry<TableName, TableDescriptor> entry: this.cache.entrySet()) {
tds.put(entry.getKey().getNameWithNamespaceInclAsString(), entry.getValue());
}
// add hbase:meta to the response
tds.put(this.metaTableDescriptor.getTableName().getNameAsString(), metaTableDescriptor);
} else {
LOG.trace("Fetching table descriptors from the filesystem.");
boolean allvisited = true;
Expand Down Expand Up @@ -568,15 +550,17 @@ private static TableDescriptor readTableDescriptor(FileSystem fs, FileStatus sta
* @throws IOException Thrown if failed update.
* @throws NotImplementedException if in read only mode
*/
@VisibleForTesting Path updateTableDescriptor(TableDescriptor td)
throws IOException {
@VisibleForTesting
Path updateTableDescriptor(TableDescriptor td) throws IOException {
if (fsreadonly) {
throw new NotImplementedException("Cannot update a table descriptor - in read only mode");
}
TableName tableName = td.getTableName();
Path tableDir = getTableDir(tableName);
Path p = writeTableDescriptor(fs, td, tableDir, getTableInfoPath(tableDir));
if (p == null) throw new IOException("Failed update");
if (p == null) {
throw new IOException("Failed update");
}
LOG.info("Updated tableinfo=" + p);
if (usecache) {
this.cache.put(td.getTableName(), td);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,15 +22,15 @@
import static org.junit.Assert.fail;

import java.io.IOException;

import java.util.Collections;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
import org.apache.hadoop.hbase.client.RegionInfoBuilder;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
import org.apache.hadoop.hbase.regionserver.Region;
import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.testclassification.MiscTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.junit.After;
Expand All @@ -44,7 +44,7 @@
/**
* Test being able to edit hbase:meta.
*/
@Category({MiscTests.class, LargeTests.class})
@Category({ MiscTests.class, MediumTests.class })
public class TestHBaseMetaEdit {
@ClassRule
public static final HBaseClassTestRule CLASS_RULE =
Expand All @@ -63,6 +63,23 @@ public void after() throws Exception {
UTIL.shutdownMiniCluster();
}

// make sure that with every possible way, we get the same meta table descriptor.
private TableDescriptor getMetaDescriptor() throws TableNotFoundException, IOException {
Admin admin = UTIL.getAdmin();
TableDescriptor get = admin.getDescriptor(TableName.META_TABLE_NAME);
TableDescriptor list =
admin.listTableDescriptors(true).stream().filter(td -> td.isMetaTable()).findAny().get();
TableDescriptor listByName =
admin.listTableDescriptors(Collections.singletonList(TableName.META_TABLE_NAME)).get(0);
TableDescriptor listByNs =
admin.listTableDescriptorsByNamespace(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME).stream()
.filter(td -> td.isMetaTable()).findAny().get();
assertEquals(get, list);
assertEquals(get, listByName);
assertEquals(get, listByNs);
return get;
}

/**
* Set versions, set HBASE-16213 indexed block encoding, and add a column family.
* Delete the column family. Then try to delete a core hbase:meta family (should fail).
Expand All @@ -73,7 +90,7 @@ public void after() throws Exception {
public void testEditMeta() throws IOException {
Admin admin = UTIL.getAdmin();
admin.tableExists(TableName.META_TABLE_NAME);
TableDescriptor originalDescriptor = admin.getDescriptor(TableName.META_TABLE_NAME);
TableDescriptor originalDescriptor = getMetaDescriptor();
ColumnFamilyDescriptor cfd = originalDescriptor.getColumnFamily(HConstants.CATALOG_FAMILY);
int oldVersions = cfd.getMaxVersions();
// Add '1' to current versions count. Set encoding too.
Expand All @@ -85,11 +102,11 @@ public void testEditMeta() throws IOException {
ColumnFamilyDescriptor newCfd =
ColumnFamilyDescriptorBuilder.newBuilder(extraColumnFamilyName).build();
admin.addColumnFamily(TableName.META_TABLE_NAME, newCfd);
TableDescriptor descriptor = admin.getDescriptor(TableName.META_TABLE_NAME);
TableDescriptor descriptor = getMetaDescriptor();
// Assert new max versions is == old versions plus 1.
assertEquals(oldVersions + 1,
descriptor.getColumnFamily(HConstants.CATALOG_FAMILY).getMaxVersions());
descriptor = admin.getDescriptor(TableName.META_TABLE_NAME);
descriptor = getMetaDescriptor();
// Assert new max versions is == old versions plus 1.
assertEquals(oldVersions + 1,
descriptor.getColumnFamily(HConstants.CATALOG_FAMILY).getMaxVersions());
Expand All @@ -107,7 +124,7 @@ public void testEditMeta() throws IOException {
assertTrue(r.getStore(extraColumnFamilyName) != null);
// Assert we can't drop critical hbase:meta column family but we can drop any other.
admin.deleteColumnFamily(TableName.META_TABLE_NAME, newCfd.getName());
descriptor = admin.getDescriptor(TableName.META_TABLE_NAME);
descriptor = getMetaDescriptor();
assertTrue(descriptor.getColumnFamily(newCfd.getName()) == null);
try {
admin.deleteColumnFamily(TableName.META_TABLE_NAME, HConstants.CATALOG_FAMILY);
Expand Down

0 comments on commit ab00399

Please sign in to comment.