From ea2fe2e41ddf91bce911d72b3576e5464f0d88a6 Mon Sep 17 00:00:00 2001 From: Amareshwari Sri Ramadasu Date: Mon, 11 Jul 2011 07:35:19 +0000 Subject: [PATCH] HIVE-1537. Allow users to specify LOCATION in CREATE DATABASE statement. Contributed by Thiruvel Thirumoolan git-svn-id: https://svn.apache.org/repos/asf/hive/trunk@1145053 13f79535-47bb-0310-9956-ffa450edef68 --- .../hive/metastore/HiveAlterHandler.java | 8 +- .../hadoop/hive/metastore/HiveMetaStore.java | 42 +++-- .../hadoop/hive/metastore/Warehouse.java | 19 ++- .../hive/metastore/TestHiveMetaStore.java | 149 +++++++++++++++++- .../apache/hadoop/hive/ql/exec/StatsTask.java | 2 +- .../hive/ql/parse/DDLSemanticAnalyzer.java | 11 +- .../org/apache/hadoop/hive/ql/parse/Hive.g | 13 +- .../hive/ql/parse/ImportSemanticAnalyzer.java | 14 +- .../hive/ql/parse/SemanticAnalyzer.java | 2 +- .../hadoop/hive/ql/metadata/TestHive.java | 6 +- .../clientpositive/database_location.q | 16 ++ .../clientpositive/database_location.q.out | 116 ++++++++++++++ 12 files changed, 355 insertions(+), 43 deletions(-) create mode 100644 ql/src/test/queries/clientpositive/database_location.q create mode 100644 ql/src/test/results/clientpositive/database_location.q.out diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java index 52caa9f62191..60ac80cb2e76 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java @@ -32,6 +32,7 @@ import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.Partition; import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; /** * Hive specific implementation of alter @@ -115,7 +116,7 @@ public void alterTable(RawStore msdb, Warehouse wh, String dbname, // that means user is asking metastore to move data to new location // corresponding to the new name // get new location - newTblLoc = wh.getDefaultTablePath(newt.getDbName(), newt.getTableName()).toString(); + newTblLoc = wh.getTablePath(msdb.getDatabase(newt.getDbName()), newt.getTableName()).toString(); newt.getSd().setLocation(newTblLoc); oldTblLoc = oldt.getSd().getLocation(); moveData = true; @@ -176,6 +177,11 @@ public void alterTable(RawStore msdb, Warehouse wh, String dbname, throw new InvalidOperationException( "Unable to change partition or table." + " Check metastore logs for detailed stack." + e.getMessage()); + } catch (NoSuchObjectException e) { + LOG.debug(e); + throw new InvalidOperationException( + "Unable to change partition or table. Database " + dbname + " does not exist" + + " Check metastore logs for detailed stack." + e.getMessage()); } finally { if (!success) { msdb.rollbackTransaction(); diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java index f70f39d1caca..e540c6331edd 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java @@ -463,7 +463,7 @@ private void createDefaultDB_core(RawStore ms) throws MetaException, InvalidObje } catch (NoSuchObjectException e) { ms.createDatabase( new Database(DEFAULT_DATABASE_NAME, DEFAULT_DATABASE_COMMENT, - wh.getDefaultDatabasePath(DEFAULT_DATABASE_NAME).toString(), null)); + getDefaultDatabasePath(DEFAULT_DATABASE_NAME).toString(), null)); } HMSHandler.createDefaultDB = true; } @@ -570,31 +570,53 @@ public void shutdown() { logInfo("Metastore shutdown complete."); } + private static final String DATABASE_WAREHOUSE_SUFFIX = ".db"; + + private Path getDefaultDatabasePath(String dbName) throws MetaException { + if (dbName.equalsIgnoreCase(DEFAULT_DATABASE_NAME)) { + return wh.getWhRoot(); + } + return new Path(wh.getWhRoot(), dbName.toLowerCase() + DATABASE_WAREHOUSE_SUFFIX); + } + private void create_database_core(RawStore ms, final Database db) throws AlreadyExistsException, InvalidObjectException, MetaException, IOException { if (!validateName(db.getName())) { throw new InvalidObjectException(db.getName() + " is not a valid database name"); } + if (null == db.getLocationUri()) { + db.setLocationUri(getDefaultDatabasePath(db.getName()).toString()); + } else { + db.setLocationUri(wh.getDnsPath(new Path(db.getLocationUri())).toString()); + } + Path dbPath = new Path(db.getLocationUri()); boolean success = false; + boolean madeDir = false; try { - ms.openTransaction(); - if (null == db.getLocationUri()) { - db.setLocationUri(wh.getDefaultDatabasePath(db.getName()).toString()); + if (!wh.isDir(dbPath)) { + if (!wh.mkdirs(dbPath)) { + throw new MetaException("Unable to create database path " + dbPath + + ", failed to create database " + db.getName()); + } + madeDir = true; } + + ms.openTransaction(); ms.createDatabase(db); success = ms.commitTransaction(); } finally { if (!success) { ms.rollbackTransaction(); - } else { - wh.mkdirs(new Path(db.getLocationUri())); + if (madeDir) { + wh.deleteDir(dbPath, true); + } } for (MetaStoreEventListener listener : listeners) { listener.onCreateDatabase(new CreateDatabaseEvent(db, success, this)); + } } } - } public void create_database(final Database db) throws AlreadyExistsException, InvalidObjectException, MetaException { @@ -923,7 +945,7 @@ public Map get_type_all(String name) throws MetaException { } private void create_table_core(final RawStore ms, final Table tbl) - throws AlreadyExistsException, MetaException, InvalidObjectException { + throws AlreadyExistsException, MetaException, InvalidObjectException, NoSuchObjectException { if (!MetaStoreUtils.validateName(tbl.getTableName()) || !MetaStoreUtils.validateColNames(tbl.getSd().getCols()) @@ -947,8 +969,8 @@ private void create_table_core(final RawStore ms, final Table tbl) if (!TableType.VIRTUAL_VIEW.toString().equals(tbl.getTableType())) { if (tbl.getSd().getLocation() == null || tbl.getSd().getLocation().isEmpty()) { - tblPath = wh.getDefaultTablePath( - tbl.getDbName(), tbl.getTableName()); + tblPath = wh.getTablePath( + ms.getDatabase(tbl.getDbName()), tbl.getTableName()); } else { if (!isExternal(tbl) && !MetaStoreUtils.isNonNativeTable(tbl)) { LOG.warn("Location: " + tbl.getSd().getLocation() diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java b/metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java index b18e3d48ba93..704357bcd740 100755 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java @@ -45,6 +45,7 @@ import org.apache.hadoop.hive.common.FileUtils; import org.apache.hadoop.hive.common.JavaUtils; import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.shims.ShimLoader; @@ -59,8 +60,6 @@ public class Warehouse { private final Configuration conf; private final String whRootString; - private static final String DATABASE_WAREHOUSE_SUFFIX = ".db"; - public static final Log LOG = LogFactory.getLog("hive.metastore.warehouse"); private MetaStoreFS fsHandler = null; @@ -143,7 +142,7 @@ public Path getDnsPath(Path path) throws MetaException { * dir (but that should be ok given that this is only called during DDL * statements for non-external tables). */ - private Path getWhRoot() throws MetaException { + public Path getWhRoot() throws MetaException { if (whRoot != null) { return whRoot; } @@ -156,16 +155,16 @@ public Path getTablePath(String whRootString, String tableName) throws MetaExcep return new Path(whRoot, tableName.toLowerCase()); } - public Path getDefaultDatabasePath(String dbName) throws MetaException { - if (dbName.equalsIgnoreCase(DEFAULT_DATABASE_NAME)) { + public Path getDatabasePath(Database db) throws MetaException { + if (db.getName().equalsIgnoreCase(DEFAULT_DATABASE_NAME)) { return getWhRoot(); } - return new Path(getWhRoot(), dbName.toLowerCase() + DATABASE_WAREHOUSE_SUFFIX); + return new Path(db.getLocationUri()); } - public Path getDefaultTablePath(String dbName, String tableName) + public Path getTablePath(Database db, String tableName) throws MetaException { - return new Path(getDefaultDatabasePath(dbName), tableName.toLowerCase()); + return getDnsPath(new Path(getDatabasePath(db), tableName.toLowerCase())); } public boolean mkdirs(Path f) throws MetaException { @@ -393,9 +392,9 @@ public static void makeSpecFromName(Map partSpec, Path currPath) } } - public Path getPartitionPath(String dbName, String tableName, + public Path getPartitionPath(Database db, String tableName, LinkedHashMap pm) throws MetaException { - return new Path(getDefaultTablePath(dbName, tableName), makePartPath(pm)); + return new Path(getTablePath(db, tableName), makePartPath(pm)); } public Path getPartitionPath(Path tblPath, LinkedHashMap pm) diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java b/metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java index 8d66cc8e3890..308a8a458b1e 100644 --- a/metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java +++ b/metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java @@ -30,6 +30,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.api.AlreadyExistsException; import org.apache.hadoop.hive.metastore.api.ConfigValSecurityException; @@ -473,7 +474,7 @@ public void testDatabase() throws Throwable { assertEquals("name of returned db is different from that of inserted db", TEST_DB1_NAME, db.getName()); assertEquals("location of the returned db is different from that of inserted db", - warehouse.getDefaultDatabasePath(TEST_DB1_NAME).toString(), db.getLocationUri()); + warehouse.getDatabasePath(db).toString(), db.getLocationUri()); Database db2 = new Database(); db2.setName(TEST_DB2_NAME); @@ -484,7 +485,7 @@ public void testDatabase() throws Throwable { assertEquals("name of returned db is different from that of inserted db", TEST_DB2_NAME, db2.getName()); assertEquals("location of the returned db is different from that of inserted db", - warehouse.getDefaultDatabasePath(TEST_DB2_NAME).toString(), db2.getLocationUri()); + warehouse.getDatabasePath(db2).toString(), db2.getLocationUri()); List dbs = client.getDatabases(".*"); @@ -502,6 +503,96 @@ public void testDatabase() throws Throwable { } } + public void testDatabaseLocation() throws Throwable { + try { + // clear up any existing databases + silentDropDatabase(TEST_DB1_NAME); + + Database db = new Database(); + db.setName(TEST_DB1_NAME); + String dbLocation = + HiveConf.getVar(hiveConf, HiveConf.ConfVars.METASTOREWAREHOUSE) + "/_testDB_create_"; + db.setLocationUri(dbLocation); + client.createDatabase(db); + + db = client.getDatabase(TEST_DB1_NAME); + + assertEquals("name of returned db is different from that of inserted db", + TEST_DB1_NAME, db.getName()); + assertEquals("location of the returned db is different from that of inserted db", + warehouse.getDnsPath(new Path(dbLocation)).toString(), db.getLocationUri()); + + client.dropDatabase(TEST_DB1_NAME); + silentDropDatabase(TEST_DB1_NAME); + + db = new Database(); + db.setName(TEST_DB1_NAME); + dbLocation = + HiveConf.getVar(hiveConf, HiveConf.ConfVars.METASTOREWAREHOUSE) + "/test/_testDB_create_"; + FileSystem fs = FileSystem.get(new Path(dbLocation).toUri(), hiveConf); + fs.mkdirs( + new Path(HiveConf.getVar(hiveConf, HiveConf.ConfVars.METASTOREWAREHOUSE) + "/test"), + new FsPermission((short) 0)); + db.setLocationUri(dbLocation); + + boolean createFailed = false; + try { + client.createDatabase(db); + } catch (MetaException cantCreateDB) { + createFailed = true; + } + assertTrue("Database creation succeeded even with permission problem", createFailed); + + boolean objectNotExist = false; + try { + client.getDatabase(TEST_DB1_NAME); + } catch (NoSuchObjectException e) { + objectNotExist = true; + } + assertTrue("Database " + TEST_DB1_NAME + " exists ", objectNotExist); + + // Cleanup + fs.setPermission( + new Path(HiveConf.getVar(hiveConf, HiveConf.ConfVars.METASTOREWAREHOUSE) + "/test"), + new FsPermission((short) 755)); + fs.delete(new Path(HiveConf.getVar(hiveConf, HiveConf.ConfVars.METASTOREWAREHOUSE) + "/test"), true); + + + db = new Database(); + db.setName(TEST_DB1_NAME); + dbLocation = + HiveConf.getVar(hiveConf, HiveConf.ConfVars.METASTOREWAREHOUSE) + "/_testDB_file_"; + fs = FileSystem.get(new Path(dbLocation).toUri(), hiveConf); + fs.createNewFile(new Path(dbLocation)); + fs.deleteOnExit(new Path(dbLocation)); + db.setLocationUri(dbLocation); + + createFailed = false; + try { + client.createDatabase(db); + } catch (MetaException cantCreateDB) { + System.err.println(cantCreateDB.getMessage()); + createFailed = true; + } + assertTrue("Database creation succeeded even location exists and is a file", createFailed); + + objectNotExist = false; + try { + client.getDatabase(TEST_DB1_NAME); + } catch (NoSuchObjectException e) { + objectNotExist = true; + } + assertTrue("Database " + TEST_DB1_NAME + " exists when location is specified and is a file", + objectNotExist); + + } catch (Throwable e) { + System.err.println(StringUtils.stringifyException(e)); + System.err.println("testDatabaseLocation() failed."); + throw e; + } + } + + public void testSimpleTypeApi() throws Exception { try { client.dropType(Constants.INT_TYPE_NAME); @@ -996,6 +1087,60 @@ public void testComplexTable() throws Exception { } } + public void testTableDatabase() throws Exception { + String dbName = "testDb"; + String tblName_1 = "testTbl_1"; + String tblName_2 = "testTbl_2"; + + try { + silentDropDatabase(dbName); + + Database db = new Database(); + db.setName(dbName); + String dbLocation = + HiveConf.getVar(hiveConf, HiveConf.ConfVars.METASTOREWAREHOUSE) + "_testDB_table_create_"; + db.setLocationUri(dbLocation); + client.createDatabase(db); + db = client.getDatabase(dbName); + + Table tbl = new Table(); + tbl.setDbName(dbName); + tbl.setTableName(tblName_1); + + ArrayList cols = new ArrayList(2); + cols.add(new FieldSchema("name", Constants.STRING_TYPE_NAME, "")); + cols.add(new FieldSchema("income", Constants.INT_TYPE_NAME, "")); + + StorageDescriptor sd = new StorageDescriptor(); + sd.setSerdeInfo(new SerDeInfo()); + sd.getSerdeInfo().setName(tbl.getTableName()); + sd.getSerdeInfo().setParameters(new HashMap()); + sd.setParameters(new HashMap()); + sd.getSerdeInfo().getParameters().put( + org.apache.hadoop.hive.serde.Constants.SERIALIZATION_FORMAT, "9"); + sd.getSerdeInfo().setSerializationLib( + org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe.class.getName()); + + tbl.setSd(sd); + tbl.getSd().setCols(cols); + client.createTable(tbl); + tbl = client.getTable(dbName, tblName_1); + + Path path = new Path(tbl.getSd().getLocation()); + System.err.println("Table's location " + path + ", Database's location " + db.getLocationUri()); + assertEquals("Table location is not a subset of the database location", + path.getParent().toString(), db.getLocationUri()); + + } catch (Exception e) { + System.err.println(StringUtils.stringifyException(e)); + System.err.println("testTableDatabase() failed."); + throw e; + } finally { + silentDropDatabase(dbName); + } + } + + public void testGetConfigValue() { String val = "value"; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/StatsTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/StatsTask.java index 04e14dcf300a..071b7ef5e225 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/StatsTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/StatsTask.java @@ -294,7 +294,7 @@ private int aggregateStats() { if (!tableStatsExist && atomic) { return 0; } - Path tablePath = wh.getDefaultTablePath(table.getDbName(), table.getTableName()); + Path tablePath = wh.getTablePath(db.getDatabase(table.getDbName()), table.getTableName()); fileSys = tablePath.getFileSystem(conf); fileStatus = Utilities.getFileStatusRecurse(tablePath, 1, fileSys); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java index 4c3e562f350b..337a8eb9a816 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java @@ -628,6 +628,7 @@ private void analyzeCreateDatabase(ASTNode ast) throws SemanticException { String dbName = unescapeIdentifier(ast.getChild(0).getText()); boolean ifNotExists = false; String dbComment = null; + String dbLocation = null; Map dbProps = null; for (int i = 1; i < ast.getChildCount(); i++) { @@ -642,16 +643,16 @@ private void analyzeCreateDatabase(ASTNode ast) throws SemanticException { case HiveParser.TOK_DATABASEPROPERTIES: dbProps = DDLSemanticAnalyzer.getProps((ASTNode) childNode.getChild(0)); break; + case HiveParser.TOK_DATABASELOCATION: + dbLocation = unescapeSQLString(childNode.getChild(0).getText()); + break; default: throw new SemanticException("Unrecognized token in CREATE DATABASE statement"); } } - CreateDatabaseDesc createDatabaseDesc = new CreateDatabaseDesc(); - createDatabaseDesc.setName(dbName); - createDatabaseDesc.setComment(dbComment); - createDatabaseDesc.setIfNotExists(ifNotExists); - createDatabaseDesc.setLocationUri(null); + CreateDatabaseDesc createDatabaseDesc = + new CreateDatabaseDesc(dbName, dbComment, dbLocation, ifNotExists); if (dbProps != null) { createDatabaseDesc.setDatabaseProperties(dbProps); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/Hive.g b/ql/src/java/org/apache/hadoop/hive/ql/parse/Hive.g index b889c7b6384d..c2bd611d896f 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/Hive.g +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/Hive.g @@ -243,6 +243,7 @@ TOK_SHOWINDEXES; TOK_INDEXCOMMENT; TOK_DESCDATABASE; TOK_DATABASEPROPERTIES; +TOK_DATABASELOCATION; TOK_DBPROPLIST; TOK_ALTERDATABASE_PROPERTIES; TOK_ALTERTABLE_ALTERPARTS_MERGEFILES; @@ -375,9 +376,17 @@ createDatabaseStatement : KW_CREATE (KW_DATABASE|KW_SCHEMA) ifNotExists? name=Identifier + dbLocation? databaseComment? (KW_WITH KW_DBPROPERTIES dbprops=dbProperties)? - -> ^(TOK_CREATEDATABASE $name ifNotExists? databaseComment? $dbprops?) + -> ^(TOK_CREATEDATABASE $name ifNotExists? dbLocation? databaseComment? $dbprops?) + ; + +dbLocation +@init { msgs.push("database location specification"); } +@after { msgs.pop(); } + : + KW_LOCATION locn=StringLiteral -> ^(TOK_DATABASELOCATION $locn) ; dbProperties @@ -591,7 +600,7 @@ alterDatabaseStatementSuffix @after { msgs.pop(); } : alterDatabaseSuffixProperties ; - + alterDatabaseSuffixProperties @init { msgs.push("alter database properties statement"); } @after { msgs.pop(); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java index d090991ab4a6..300157513f65 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java @@ -46,6 +46,7 @@ import org.apache.hadoop.hive.ql.exec.TaskFactory; import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.hooks.WriteEntity; +import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.metadata.InvalidTableException; import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.hadoop.hive.ql.plan.AddPartitionDesc; @@ -234,8 +235,8 @@ public void analyzeInternal(ASTNode ast) throws SemanticException { tblDesc), conf); Table table = new Table(dbname, tblDesc.getTableName()); conf.set("import.destination.dir", - wh.getDnsPath(wh.getDefaultTablePath( - db.getCurrentDatabase(), tblDesc.getTableName())).toString()); + wh.getTablePath(db.getDatabase(db.getCurrentDatabase()), + tblDesc.getTableName()).toString()); if ((tblDesc.getPartCols() != null) && (tblDesc.getPartCols().size() != 0)) { for (AddPartitionDesc addPartitionDesc : partitionDescs) { t.addDependentTask( @@ -252,8 +253,7 @@ public void analyzeInternal(ASTNode ast) throws SemanticException { if (tblDesc.getLocation() != null) { tablePath = new Path(tblDesc.getLocation()); } else { - tablePath = wh.getDnsPath(wh.getDefaultTablePath( - db.getCurrentDatabase(), tblDesc.getTableName())); + tablePath = wh.getTablePath(db.getDatabase(db.getCurrentDatabase()), tblDesc.getTableName()); } checkTargetLocationEmpty(fs, tablePath); t.addDependentTask(loadTable(fromURI, table)); @@ -288,7 +288,7 @@ private Task loadTable(URI fromURI, Table table) { private Task addSinglePartition(URI fromURI, FileSystem fs, CreateTableDesc tblDesc, Table table, Warehouse wh, - AddPartitionDesc addPartitionDesc) throws MetaException, IOException, SemanticException { + AddPartitionDesc addPartitionDesc) throws MetaException, IOException, HiveException { if (tblDesc.isExternal() && tblDesc.getLocation() == null) { LOG.debug("Importing in-place: adding AddPart for partition " + partSpecToString(addPartitionDesc.getPartSpec())); @@ -304,8 +304,8 @@ private Task addSinglePartition(URI fromURI, FileSystem fs, CreateTableDesc t tgtPath = new Path(table.getDataLocation().toString(), Warehouse.makePartPath(addPartitionDesc.getPartSpec())); } else { - tgtPath = new Path(wh.getDnsPath(wh.getDefaultTablePath( - db.getCurrentDatabase(), tblDesc.getTableName())), + tgtPath = new Path(wh.getTablePath( + db.getDatabase(db.getCurrentDatabase()), tblDesc.getTableName()), Warehouse.makePartPath(addPartitionDesc.getPartSpec())); } } else { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java index 8fe598415fa1..dc9ab1bc2a02 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java @@ -6810,7 +6810,7 @@ private void genMapRedTasks(QB qb) throws SemanticException { try { dumpTable = db.newTable(qb.getTableDesc().getTableName()); Warehouse wh = new Warehouse(conf); - targetPath = wh.getDefaultTablePath(dumpTable.getDbName(), dumpTable + targetPath = wh.getTablePath(db.getDatabase(dumpTable.getDbName()), dumpTable .getTableName()); } catch (HiveException e) { throw new SemanticException(e); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java b/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java index 14961d209a6b..ff60ad22e427 100755 --- a/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java @@ -155,9 +155,8 @@ public void testTable() throws Throwable { .getOwner(), ft.getOwner()); assertEquals("Table retention didn't match for table: " + tableName, tbl.getRetention(), ft.getRetention()); - String dbPath = wh.getDefaultDatabasePath(DEFAULT_DATABASE_NAME).toString(); assertEquals("Data location is not set correctly", - wh.getDefaultTablePath(DEFAULT_DATABASE_NAME, tableName).toString(), + wh.getTablePath(hm.getDatabase(DEFAULT_DATABASE_NAME), tableName).toString(), ft.getDataLocation().toString()); // now that URI is set correctly, set the original table's uri and then // compare the two tables @@ -229,9 +228,8 @@ public void testThriftTable() throws Throwable { .getOwner(), ft.getOwner()); assertEquals("Table retention didn't match for table: " + tableName, tbl.getRetention(), ft.getRetention()); - String dbPath = wh.getDefaultDatabasePath(DEFAULT_DATABASE_NAME).toString(); assertEquals("Data location is not set correctly", - wh.getDefaultTablePath(DEFAULT_DATABASE_NAME, tableName).toString(), + wh.getTablePath(hm.getDatabase(DEFAULT_DATABASE_NAME), tableName).toString(), ft.getDataLocation().toString()); // now that URI is set correctly, set the original table's uri and then // compare the two tables diff --git a/ql/src/test/queries/clientpositive/database_location.q b/ql/src/test/queries/clientpositive/database_location.q new file mode 100644 index 000000000000..ac8687fe0264 --- /dev/null +++ b/ql/src/test/queries/clientpositive/database_location.q @@ -0,0 +1,16 @@ +create database db1; +describe database extended db1; +use db1; +create table table_db1 (name string, value int); +describe formatted table_db1; +show tables; + +create database db2 location '${hiveconf:hive.metastore.warehouse.dir}/db2'; +describe database extended db2; +use db2; +create table table_db2 (name string, value int); +describe formatted table_db2; +show tables; + +drop database db2 cascade; +drop database db1 cascade; \ No newline at end of file diff --git a/ql/src/test/results/clientpositive/database_location.q.out b/ql/src/test/results/clientpositive/database_location.q.out new file mode 100644 index 000000000000..a09f5a2d79e1 --- /dev/null +++ b/ql/src/test/results/clientpositive/database_location.q.out @@ -0,0 +1,116 @@ +PREHOOK: query: create database db1 +PREHOOK: type: CREATEDATABASE +POSTHOOK: query: create database db1 +POSTHOOK: type: CREATEDATABASE +PREHOOK: query: describe database extended db1 +PREHOOK: type: DESCDATABASE +POSTHOOK: query: describe database extended db1 +POSTHOOK: type: DESCDATABASE +db1 pfile:/home/thiruvel/projects/hive/secure.trunk/build/ql/test/data/warehouse/db1.db +PREHOOK: query: use db1 +PREHOOK: type: SWITCHDATABASE +POSTHOOK: query: use db1 +POSTHOOK: type: SWITCHDATABASE +PREHOOK: query: create table table_db1 (name string, value int) +PREHOOK: type: CREATETABLE +POSTHOOK: query: create table table_db1 (name string, value int) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: db1@table_db1 +PREHOOK: query: describe formatted table_db1 +PREHOOK: type: DESCTABLE +POSTHOOK: query: describe formatted table_db1 +POSTHOOK: type: DESCTABLE +# col_name data_type comment + +name string None +value int None + +# Detailed Table Information +Database: db1 +Owner: thiruvel +CreateTime: Tue Jun 21 06:57:44 PDT 2011 +LastAccessTime: UNKNOWN +Protect Mode: None +Retention: 0 +Location: pfile:/home/thiruvel/projects/hive/secure.trunk/build/ql/test/data/warehouse/db1.db/table_db1 +Table Type: MANAGED_TABLE +Table Parameters: + transient_lastDdlTime 1308664664 + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +InputFormat: org.apache.hadoop.mapred.TextInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: show tables +PREHOOK: type: SHOWTABLES +POSTHOOK: query: show tables +POSTHOOK: type: SHOWTABLES +table_db1 +PREHOOK: query: create database db2 location 'pfile:///home/thiruvel/projects/hive/secure.trunk/build/ql/test/data/warehouse/db2' +PREHOOK: type: CREATEDATABASE +POSTHOOK: query: create database db2 location 'pfile:///home/thiruvel/projects/hive/secure.trunk/build/ql/test/data/warehouse/db2' +POSTHOOK: type: CREATEDATABASE +PREHOOK: query: describe database extended db2 +PREHOOK: type: DESCDATABASE +POSTHOOK: query: describe database extended db2 +POSTHOOK: type: DESCDATABASE +db2 pfile:/home/thiruvel/projects/hive/secure.trunk/build/ql/test/data/warehouse/db2 +PREHOOK: query: use db2 +PREHOOK: type: SWITCHDATABASE +POSTHOOK: query: use db2 +POSTHOOK: type: SWITCHDATABASE +PREHOOK: query: create table table_db2 (name string, value int) +PREHOOK: type: CREATETABLE +POSTHOOK: query: create table table_db2 (name string, value int) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: db2@table_db2 +PREHOOK: query: describe formatted table_db2 +PREHOOK: type: DESCTABLE +POSTHOOK: query: describe formatted table_db2 +POSTHOOK: type: DESCTABLE +# col_name data_type comment + +name string None +value int None + +# Detailed Table Information +Database: db2 +Owner: thiruvel +CreateTime: Tue Jun 21 06:57:45 PDT 2011 +LastAccessTime: UNKNOWN +Protect Mode: None +Retention: 0 +Location: pfile:/home/thiruvel/projects/hive/secure.trunk/build/ql/test/data/warehouse/db2/table_db2 +Table Type: MANAGED_TABLE +Table Parameters: + transient_lastDdlTime 1308664665 + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +InputFormat: org.apache.hadoop.mapred.TextInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: show tables +PREHOOK: type: SHOWTABLES +POSTHOOK: query: show tables +POSTHOOK: type: SHOWTABLES +table_db2 +PREHOOK: query: drop database db2 cascade +PREHOOK: type: DROPDATABASE +POSTHOOK: query: drop database db2 cascade +POSTHOOK: type: DROPDATABASE +PREHOOK: query: drop database db1 cascade +PREHOOK: type: DROPDATABASE +POSTHOOK: query: drop database db1 cascade +POSTHOOK: type: DROPDATABASE