diff --git a/beeline/src/java/org/apache/hive/beeline/BeeLine.java b/beeline/src/java/org/apache/hive/beeline/BeeLine.java index 49b5fad11be7..33d13013049d 100644 --- a/beeline/src/java/org/apache/hive/beeline/BeeLine.java +++ b/beeline/src/java/org/apache/hive/beeline/BeeLine.java @@ -1831,7 +1831,7 @@ String getPromptForCli() { String prompt; // read prompt configuration and substitute variables. HiveConf conf = getCommands().getHiveConf(true); - prompt = conf.getVar(HiveConf.ConfVars.CLIPROMPT); + prompt = conf.getVar(HiveConf.ConfVars.CLI_PROMPT); prompt = getCommands().substituteVariables(conf, prompt); return prompt + getFormattedDb() + "> "; } diff --git a/beeline/src/java/org/apache/hive/beeline/BeeLineOpts.java b/beeline/src/java/org/apache/hive/beeline/BeeLineOpts.java index 17af4e2cd714..04ebab7df2e4 100644 --- a/beeline/src/java/org/apache/hive/beeline/BeeLineOpts.java +++ b/beeline/src/java/org/apache/hive/beeline/BeeLineOpts.java @@ -291,7 +291,7 @@ public void updateBeeLineOptsFromConf() { if (conf == null) { conf = beeLine.getCommands().getHiveConf(false); } - setForce(HiveConf.getBoolVar(conf, HiveConf.ConfVars.CLIIGNOREERRORS)); + setForce(HiveConf.getBoolVar(conf, HiveConf.ConfVars.CLI_IGNORE_ERRORS)); } } @@ -529,7 +529,7 @@ public boolean getShowDbInPrompt() { return showDbInPrompt; } else { HiveConf conf = beeLine.getCommands().getHiveConf(true); - return HiveConf.getBoolVar(conf, HiveConf.ConfVars.CLIPRINTCURRENTDB); + return HiveConf.getBoolVar(conf, HiveConf.ConfVars.CLI_PRINT_CURRENT_DB); } } diff --git a/beeline/src/test/org/apache/hive/beeline/cli/TestHiveCli.java b/beeline/src/test/org/apache/hive/beeline/cli/TestHiveCli.java index 5ea4d11b7abd..048ca59becb0 100644 --- a/beeline/src/test/org/apache/hive/beeline/cli/TestHiveCli.java +++ b/beeline/src/test/org/apache/hive/beeline/cli/TestHiveCli.java @@ -280,7 +280,7 @@ private void verifyCMD(String CMD, String keywords, OutputStream os, String[] op public static void init(){ // something changed scratch dir permissions, so test can't execute HiveConf hiveConf = new HiveConf(); - String scratchDir = hiveConf.get(HiveConf.ConfVars.SCRATCHDIR.varname); + String scratchDir = hiveConf.get(HiveConf.ConfVars.SCRATCH_DIR.varname); File file = new File(scratchDir); if (file.exists()) { file.setWritable(true, false); diff --git a/cli/src/java/org/apache/hadoop/hive/cli/CliDriver.java b/cli/src/java/org/apache/hadoop/hive/cli/CliDriver.java index d0d37b506ab2..6eb679ddebc1 100644 --- a/cli/src/java/org/apache/hadoop/hive/cli/CliDriver.java +++ b/cli/src/java/org/apache/hadoop/hive/cli/CliDriver.java @@ -425,7 +425,7 @@ public void handle(Signal signal) { ret = processCmd(command.toString()); lastRet = ret; } catch (CommandProcessorException e) { - boolean ignoreErrors = HiveConf.getBoolVar(conf, HiveConf.ConfVars.CLIIGNOREERRORS); + boolean ignoreErrors = HiveConf.getBoolVar(conf, HiveConf.ConfVars.CLI_IGNORE_ERRORS); if (!ignoreErrors) { throw e; } @@ -773,7 +773,7 @@ public int run(String[] args) throws Exception { } // read prompt configuration and substitute variables. - prompt = conf.getVar(HiveConf.ConfVars.CLIPROMPT); + prompt = conf.getVar(HiveConf.ConfVars.CLI_PROMPT); prompt = new VariableSubstitution(new HiveVariableSource() { @Override public Map getHiveVariable() { @@ -936,7 +936,7 @@ protected void setupConsoleReader() throws IOException { * @return String to show user for current db value */ private static String getFormattedDb(HiveConf conf, CliSessionState ss) { - if (!HiveConf.getBoolVar(conf, HiveConf.ConfVars.CLIPRINTCURRENTDB)) { + if (!HiveConf.getBoolVar(conf, HiveConf.ConfVars.CLI_PRINT_CURRENT_DB)) { return ""; } //BUG: This will not work in remote mode - HIVE-5153 diff --git a/common/src/java/org/apache/hadoop/hive/common/LogUtils.java b/common/src/java/org/apache/hadoop/hive/common/LogUtils.java index afe6607298a9..806ed9be66cf 100644 --- a/common/src/java/org/apache/hadoop/hive/common/LogUtils.java +++ b/common/src/java/org/apache/hadoop/hive/common/LogUtils.java @@ -117,11 +117,11 @@ public static String initHiveLog4jCommon(HiveConf conf, ConfVars confVarName) // property specified file found in local file system // use the specified file if (confVarName == HiveConf.ConfVars.HIVE_EXEC_LOG4J_FILE) { - String queryId = HiveConf.getVar(conf, HiveConf.ConfVars.HIVEQUERYID); + String queryId = HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_QUERY_ID); if(queryId == null || (queryId = queryId.trim()).isEmpty()) { queryId = "unknown-" + System.currentTimeMillis(); } - System.setProperty(HiveConf.ConfVars.HIVEQUERYID.toString(), queryId); + System.setProperty(HiveConf.ConfVars.HIVE_QUERY_ID.toString(), queryId); } final boolean async = checkAndSetAsyncLogging(conf); // required for MDC based routing appender so that child threads can inherit the MDC context @@ -157,8 +157,8 @@ private static String initHiveLog4jDefault( if (hive_l4j == null) { hive_l4j = LogUtils.class.getClassLoader().getResource(HIVE_L4J); } - System.setProperty(HiveConf.ConfVars.HIVEQUERYID.toString(), - HiveConf.getVar(conf, HiveConf.ConfVars.HIVEQUERYID)); + System.setProperty(HiveConf.ConfVars.HIVE_QUERY_ID.toString(), + HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_QUERY_ID)); break; case HIVE_LOG4J_FILE: hive_l4j = LogUtils.class.getClassLoader().getResource(HIVE_L4J); @@ -216,8 +216,8 @@ public static String maskIfPassword(String key, String value) { */ public static void registerLoggingContext(Configuration conf) { if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_SERVER2_LOGGING_OPERATION_ENABLED)) { - MDC.put(SESSIONID_LOG_KEY, HiveConf.getVar(conf, HiveConf.ConfVars.HIVESESSIONID)); - MDC.put(QUERYID_LOG_KEY, HiveConf.getVar(conf, HiveConf.ConfVars.HIVEQUERYID)); + MDC.put(SESSIONID_LOG_KEY, HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_SESSION_ID)); + MDC.put(QUERYID_LOG_KEY, HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_QUERY_ID)); MDC.put(OPERATIONLOG_LEVEL_KEY, HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_SERVER2_LOGGING_OPERATION_LEVEL)); MDC.put(OPERATIONLOG_LOCATION_KEY, HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_SERVER2_LOGGING_OPERATION_LOG_LOCATION)); l4j.info("Thread context registration is done."); diff --git a/common/src/java/org/apache/hadoop/hive/common/ServerUtils.java b/common/src/java/org/apache/hadoop/hive/common/ServerUtils.java index d7f4b146ed52..f4f7a9cb051f 100644 --- a/common/src/java/org/apache/hadoop/hive/common/ServerUtils.java +++ b/common/src/java/org/apache/hadoop/hive/common/ServerUtils.java @@ -38,7 +38,7 @@ public class ServerUtils { public static void cleanUpScratchDir(HiveConf hiveConf) { if (hiveConf.getBoolVar(HiveConf.ConfVars.HIVE_START_CLEANUP_SCRATCHDIR)) { - String hiveScratchDir = hiveConf.get(HiveConf.ConfVars.SCRATCHDIR.varname); + String hiveScratchDir = hiveConf.get(HiveConf.ConfVars.SCRATCH_DIR.varname); try { Path jobScratchDir = new Path(hiveScratchDir); LOG.info("Cleaning scratchDir : " + hiveScratchDir); diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java index 1fa63ae3821a..cbe91a509ff2 100644 --- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java +++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java @@ -156,7 +156,7 @@ public static ResultFileFormat from(String value) { } public ResultFileFormat getResultFileFormat() { - return ResultFileFormat.from(this.getVar(ConfVars.HIVEQUERYRESULTFILEFORMAT)); + return ResultFileFormat.from(this.getVar(ConfVars.HIVE_QUERY_RESULT_FILEFORMAT)); } public interface EncoderDecoder { @@ -265,21 +265,21 @@ private static URL checkConfigFile(File f) { * be recreated so that the change will take effect. */ public static final HiveConf.ConfVars[] metaVars = { - HiveConf.ConfVars.METASTOREWAREHOUSE, - HiveConf.ConfVars.REPLDIR, - HiveConf.ConfVars.METASTOREURIS, - HiveConf.ConfVars.METASTORESELECTION, + HiveConf.ConfVars.METASTORE_WAREHOUSE, + HiveConf.ConfVars.REPL_DIR, + HiveConf.ConfVars.METASTORE_URIS, + HiveConf.ConfVars.METASTORE_SELECTION, HiveConf.ConfVars.METASTORE_SERVER_PORT, - HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, - HiveConf.ConfVars.METASTORETHRIFTFAILURERETRIES, + HiveConf.ConfVars.METASTORE_THRIFT_CONNECTION_RETRIES, + HiveConf.ConfVars.METASTORE_THRIFT_FAILURE_RETRIES, HiveConf.ConfVars.METASTORE_CLIENT_CONNECT_RETRY_DELAY, HiveConf.ConfVars.METASTORE_CLIENT_SOCKET_TIMEOUT, HiveConf.ConfVars.METASTORE_CLIENT_SOCKET_LIFETIME, - HiveConf.ConfVars.METASTOREPWD, - HiveConf.ConfVars.METASTORECONNECTURLHOOK, - HiveConf.ConfVars.METASTORECONNECTURLKEY, - HiveConf.ConfVars.METASTORESERVERMINTHREADS, - HiveConf.ConfVars.METASTORESERVERMAXTHREADS, + HiveConf.ConfVars.METASTORE_PWD, + HiveConf.ConfVars.METASTORE_CONNECT_URL_HOOK, + HiveConf.ConfVars.METASTORE_CONNECT_URL_KEY, + HiveConf.ConfVars.METASTORE_SERVER_MIN_THREADS, + HiveConf.ConfVars.METASTORE_SERVER_MAX_THREADS, HiveConf.ConfVars.METASTORE_TCP_KEEP_ALIVE, HiveConf.ConfVars.METASTORE_INT_ORIGINAL, HiveConf.ConfVars.METASTORE_INT_ARCHIVED, @@ -315,9 +315,9 @@ private static URL checkConfigFile(File f) { HiveConf.ConfVars.METASTORE_BATCH_RETRIEVE_OBJECTS_MAX, HiveConf.ConfVars.METASTORE_INIT_HOOKS, HiveConf.ConfVars.METASTORE_PRE_EVENT_LISTENERS, - HiveConf.ConfVars.HMSHANDLERATTEMPTS, - HiveConf.ConfVars.HMSHANDLERINTERVAL, - HiveConf.ConfVars.HMSHANDLERFORCERELOADCONF, + HiveConf.ConfVars.HMS_HANDLER_ATTEMPTS, + HiveConf.ConfVars.HMS_HANDLER_INTERVAL, + HiveConf.ConfVars.HMS_HANDLER_FORCE_RELOAD_CONF, HiveConf.ConfVars.METASTORE_PARTITION_NAME_WHITELIST_PATTERN, HiveConf.ConfVars.METASTORE_ORM_RETRIEVE_MAPNULLS_AS_EMPTY_STRINGS, HiveConf.ConfVars.METASTORE_DISALLOW_INCOMPATIBLE_COL_TYPE_CHANGES, @@ -376,16 +376,16 @@ private static URL checkConfigFile(File f) { * for different databases. */ public static final HiveConf.ConfVars[] dbVars = { - HiveConf.ConfVars.HADOOPBIN, - HiveConf.ConfVars.METASTOREWAREHOUSE, - HiveConf.ConfVars.SCRATCHDIR + HiveConf.ConfVars.HADOOP_BIN, + HiveConf.ConfVars.METASTORE_WAREHOUSE, + HiveConf.ConfVars.SCRATCH_DIR }; /** * encoded parameter values are ;-) encoded. Use decoder to get ;-) decoded string */ static final HiveConf.ConfVars[] ENCODED_CONF = { - ConfVars.HIVEQUERYSTRING + ConfVars.HIVE_QUERY_STRING }; /** @@ -489,31 +489,31 @@ public static enum ConfVars { // QL execution stuff DFS_XATTR_ONLY_SUPPORTED_ON_RESERVED_NAMESPACE("dfs.xattr.supported.only.on.reserved.namespace", false, "DFS supports xattr only on Reserved Name space (/.reserved/raw)"), - SCRIPTWRAPPER("hive.exec.script.wrapper", null, ""), + SCRIPT_WRAPPER("hive.exec.script.wrapper", null, ""), PLAN("hive.exec.plan", "", ""), - STAGINGDIR("hive.exec.stagingdir", ".hive-staging", + STAGING_DIR("hive.exec.stagingdir", ".hive-staging", "Directory name that will be created inside table locations in order to support HDFS encryption. " + "This is replaces ${hive.exec.scratchdir} for query results with the exception of read-only tables. " + "In all cases ${hive.exec.scratchdir} is still used for other temporary files, such as job plans."), - SCRATCHDIR("hive.exec.scratchdir", "/tmp/hive", + SCRATCH_DIR("hive.exec.scratchdir", "/tmp/hive", "HDFS root scratch dir for Hive jobs which gets created with write all (733) permission. " + "For each connecting user, an HDFS scratch dir: ${hive.exec.scratchdir}/ is created, " + "with ${hive.scratch.dir.permission}."), - REPLDIR("hive.repl.rootdir","/user/${system:user.name}/repl/", + REPL_DIR("hive.repl.rootdir","/user/${system:user.name}/repl/", "HDFS root dir for all replication dumps."), - REPLCMENABLED("hive.repl.cm.enabled", false, + REPL_CM_ENABLED("hive.repl.cm.enabled", false, "Turn on ChangeManager, so delete files will go to cmrootdir."), - REPLCMDIR("hive.repl.cmrootdir","/user/${system:user.name}/cmroot/", + REPL_CM_DIR("hive.repl.cmrootdir","/user/${system:user.name}/cmroot/", "Root dir for ChangeManager, used for deleted files."), - REPLCMRETIAN("hive.repl.cm.retain","10d", + REPL_CM_RETAIN("hive.repl.cm.retain","10d", new TimeValidator(TimeUnit.DAYS), "Time to retain removed files in cmrootdir."), - REPLCMENCRYPTEDDIR("hive.repl.cm.encryptionzone.rootdir", ".cmroot", + REPL_CM_ENCRYPTED_DIR("hive.repl.cm.encryptionzone.rootdir", ".cmroot", "Root dir for ChangeManager if encryption zones are enabled, used for deleted files."), - REPLCMFALLBACKNONENCRYPTEDDIR("hive.repl.cm.nonencryptionzone.rootdir", + REPL_CM_FALLBACK_NONENCRYPTED_DIR("hive.repl.cm.nonencryptionzone.rootdir", "", "Root dir for ChangeManager for non encrypted paths if hive.repl.cmrootdir is encrypted."), - REPLCMINTERVAL("hive.repl.cm.interval","3600s", + REPL_CM_INTERVAL("hive.repl.cm.interval","3600s", new TimeValidator(TimeUnit.SECONDS), "Interval for cmroot cleanup thread."), REPL_HA_DATAPATH_REPLACE_REMOTE_NAMESERVICE("hive.repl.ha.datapath.replace.remote.nameservice", false, @@ -694,93 +694,93 @@ public static enum ConfVars { REPL_STATS_TOP_EVENTS_COUNTS("hive.repl.stats.events.count", 5, "Number of topmost expensive events that needs to be maintained per event type for the replication statistics." + " Maximum permissible limit is 10."), - LOCALSCRATCHDIR("hive.exec.local.scratchdir", + LOCAL_SCRATCH_DIR("hive.exec.local.scratchdir", "${system:java.io.tmpdir}" + File.separator + "${system:user.name}", "Local scratch space for Hive jobs"), DOWNLOADED_RESOURCES_DIR("hive.downloaded.resources.dir", "${system:java.io.tmpdir}" + File.separator + "${hive.session.id}_resources", "Temporary local directory for added resources in the remote file system."), - SCRATCHDIRPERMISSION("hive.scratch.dir.permission", "700", + SCRATCH_DIR_PERMISSION("hive.scratch.dir.permission", "700", "The permission for the user specific scratch directories that get created."), - SUBMITVIACHILD("hive.exec.submitviachild", false, ""), - SUBMITLOCALTASKVIACHILD("hive.exec.submit.local.task.via.child", true, + SUBMIT_VIA_CHILD("hive.exec.submitviachild", false, ""), + SUBMIT_LOCAL_TASK_VIA_CHILD("hive.exec.submit.local.task.via.child", true, "Determines whether local tasks (typically mapjoin hashtable generation phase) runs in \n" + "separate JVM (true recommended) or not. \n" + "Avoids the overhead of spawning new JVM, but can lead to out-of-memory issues."), - SCRIPTERRORLIMIT("hive.exec.script.maxerrsize", 100000, + SCRIPT_ERROR_LIMIT("hive.exec.script.maxerrsize", 100000, "Maximum number of bytes a script is allowed to emit to standard error (per map-reduce task). \n" + "This prevents runaway scripts from filling logs partitions to capacity"), - ALLOWPARTIALCONSUMP("hive.exec.script.allow.partial.consumption", false, + ALLOW_PARTIAL_CONSUMP("hive.exec.script.allow.partial.consumption", false, "When enabled, this option allows a user script to exit successfully without consuming \n" + "all the data from the standard input."), - STREAMREPORTERPERFIX("stream.stderr.reporter.prefix", "reporter:", + STREAM_REPORTER_PREFIX("stream.stderr.reporter.prefix", "reporter:", "Streaming jobs that log to standard error with this prefix can log counter or status information."), - STREAMREPORTERENABLED("stream.stderr.reporter.enabled", true, + STREAM_REPORTER_ENABLED("stream.stderr.reporter.enabled", true, "Enable consumption of status and counter messages for streaming jobs."), - COMPRESSRESULT("hive.exec.compress.output", false, + COMPRESS_RESULT("hive.exec.compress.output", false, "This controls whether the final outputs of a query (to a local/HDFS file or a Hive table) is compressed. \n" + "The compression codec and other options are determined from Hadoop config variables mapred.output.compress*"), - COMPRESSINTERMEDIATE("hive.exec.compress.intermediate", false, + COMPRESS_INTERMEDIATE("hive.exec.compress.intermediate", false, "This controls whether intermediate files produced by Hive between multiple map-reduce jobs are compressed. \n" + "The compression codec and other options are determined from Hadoop config variables mapred.output.compress*"), - COMPRESSINTERMEDIATECODEC("hive.intermediate.compression.codec", "", ""), - COMPRESSINTERMEDIATETYPE("hive.intermediate.compression.type", "", ""), - BYTESPERREDUCER("hive.exec.reducers.bytes.per.reducer", (long) (256 * 1000 * 1000), + COMPRESS_INTERMEDIATE_CODEC("hive.intermediate.compression.codec", "", ""), + COMPRESS_INTERMEDIATE_TYPE("hive.intermediate.compression.type", "", ""), + BYTES_PER_REDUCER("hive.exec.reducers.bytes.per.reducer", (long) (256 * 1000 * 1000), "size per reducer.The default is 256Mb, i.e if the input size is 1G, it will use 4 reducers."), - MAXREDUCERS("hive.exec.reducers.max", 1009, + MAX_REDUCERS("hive.exec.reducers.max", 1009, "max number of reducers will be used. If the one specified in the configuration parameter mapred.reduce.tasks is\n" + "negative, Hive will use this one as the max number of reducers when automatically determine number of reducers."), - PREEXECHOOKS("hive.exec.pre.hooks", "", + PRE_EXEC_HOOKS("hive.exec.pre.hooks", "", "Comma-separated list of pre-execution hooks to be invoked for each statement. \n" + "A pre-execution hook is specified as the name of a Java class which implements the \n" + "org.apache.hadoop.hive.ql.hooks.ExecuteWithHookContext interface."), - POSTEXECHOOKS("hive.exec.post.hooks", "", + POST_EXEC_HOOKS("hive.exec.post.hooks", "", "Comma-separated list of post-execution hooks to be invoked for each statement. \n" + "A post-execution hook is specified as the name of a Java class which implements the \n" + "org.apache.hadoop.hive.ql.hooks.ExecuteWithHookContext interface."), - ONFAILUREHOOKS("hive.exec.failure.hooks", "", + ON_FAILURE_HOOKS("hive.exec.failure.hooks", "", "Comma-separated list of on-failure hooks to be invoked for each statement. \n" + "An on-failure hook is specified as the name of Java class which implements the \n" + "org.apache.hadoop.hive.ql.hooks.ExecuteWithHookContext interface."), - QUERYREDACTORHOOKS("hive.exec.query.redactor.hooks", "", + QUERY_REDACTOR_HOOKS("hive.exec.query.redactor.hooks", "", "Comma-separated list of hooks to be invoked for each query which can \n" + "transform the query before it's placed in the job.xml file. Must be a Java class which \n" + "extends from the org.apache.hadoop.hive.ql.hooks.Redactor abstract class."), - CLIENTSTATSPUBLISHERS("hive.client.stats.publishers", "", + CLIENT_STATS_PUBLISHERS("hive.client.stats.publishers", "", "Comma-separated list of statistics publishers to be invoked on counters on each job. \n" + "A client stats publisher is specified as the name of a Java class which implements the \n" + "org.apache.hadoop.hive.ql.stats.ClientStatsPublisher interface."), - BASICSTATSTASKSMAXTHREADSFACTOR("hive.basic.stats.max.threads.factor", 2, "Determines the maximum number of " + BASIC_STATS_TASKS_MAX_THREADS_FACTOR("hive.basic.stats.max.threads.factor", 2, "Determines the maximum number of " + "threads that can be used for collection of file level statistics. If the value configured is x, then the " + "maximum number of threads that can be used is x multiplied by the number of available processors. A value" + " of less than 1, makes stats collection sequential."), - EXECPARALLEL("hive.exec.parallel", false, "Whether to execute jobs in parallel"), - EXECPARALLETHREADNUMBER("hive.exec.parallel.thread.number", 8, + EXEC_PARALLEL("hive.exec.parallel", false, "Whether to execute jobs in parallel"), + EXEC_PARALLEL_THREAD_NUMBER("hive.exec.parallel.thread.number", 8, "How many jobs at most can be executed in parallel"), @Deprecated - HIVESPECULATIVEEXECREDUCERS("hive.mapred.reduce.tasks.speculative.execution", false, + HIVE_SPECULATIVE_EXEC_REDUCERS("hive.mapred.reduce.tasks.speculative.execution", false, "(Deprecated) Whether speculative execution for reducers should be turned on. "), - HIVECOUNTERSPULLINTERVAL("hive.exec.counters.pull.interval", 1000L, + HIVE_COUNTERS_PULL_INTERVAL("hive.exec.counters.pull.interval", 1000L, "The interval with which to poll the JobTracker for the counters the running job. \n" + "The smaller it is the more load there will be on the jobtracker, the higher it is the less granular the caught will be."), - DYNAMICPARTITIONING("hive.exec.dynamic.partition", true, + DYNAMIC_PARTITIONING("hive.exec.dynamic.partition", true, "Whether or not to allow dynamic partitions in DML/DDL."), - DYNAMICPARTITIONINGMODE("hive.exec.dynamic.partition.mode", "nonstrict", + DYNAMIC_PARTITIONING_MODE("hive.exec.dynamic.partition.mode", "nonstrict", new StringSet("strict", "nonstrict"), "In strict mode, the user must specify at least one static partition\n" + "in case the user accidentally overwrites all partitions.\n" + "In nonstrict mode all partitions are allowed to be dynamic."), - DYNAMICPARTITIONMAXPARTS("hive.exec.max.dynamic.partitions", 1000, + DYNAMIC_PARTITION_MAX_PARTS("hive.exec.max.dynamic.partitions", 1000, "Maximum number of dynamic partitions allowed to be created in total."), - DYNAMICPARTITIONMAXPARTSPERNODE("hive.exec.max.dynamic.partitions.pernode", 100, + DYNAMIC_PARTITION_MAX_PARTS_PER_NODE("hive.exec.max.dynamic.partitions.pernode", 100, "Maximum number of dynamic partitions allowed to be created in each mapper/reducer node."), - DYNAMICPARTITIONCONVERT("hive.exec.dynamic.partition.type.conversion", true, + DYNAMIC_PARTITION_CONVERT("hive.exec.dynamic.partition.type.conversion", true, "Whether to check and cast a dynamic partition column before creating the partition " + "directory. For example, if partition p is type int and we insert string '001', then if " + "this value is true, directory p=1 will be created; if false, p=001"), - MAXCREATEDFILES("hive.exec.max.created.files", 100000L, + MAX_CREATED_FILES("hive.exec.max.created.files", 100000L, "Maximum number of HDFS files created by all mappers/reducers in a MapReduce job."), - DEFAULTPARTITIONNAME("hive.exec.default.partition.name", "__HIVE_DEFAULT_PARTITION__", + DEFAULT_PARTITION_NAME("hive.exec.default.partition.name", "__HIVE_DEFAULT_PARTITION__", "The default partition name in case the dynamic partition column value is null/empty string or any other values that cannot be escaped. \n" + "This value must not contain any special character used in HDFS URI (e.g., ':', '%', '/' etc). \n" + "The user has to be aware that the dynamic partition value should not contain this value to avoid confusions."), @@ -841,17 +841,17 @@ public static enum ConfVars { HIVE_IN_REPL_TEST_FILES_SORTED("hive.in.repl.test.files.sorted", false, "internal usage only, set to true if the file listing is required in sorted order during bootstrap load", true), - LOCALMODEAUTO("hive.exec.mode.local.auto", false, + LOCAL_MODE_AUTO("hive.exec.mode.local.auto", false, "Let Hive determine whether to run in local mode automatically"), - LOCALMODEMAXBYTES("hive.exec.mode.local.auto.inputbytes.max", 134217728L, + LOCAL_MODE_MAX_BYTES("hive.exec.mode.local.auto.inputbytes.max", 134217728L, "When hive.exec.mode.local.auto is true, input bytes should less than this for local mode."), - LOCALMODEMAXINPUTFILES("hive.exec.mode.local.auto.input.files.max", 4, + LOCAL_MODE_MAX_INPUT_FILES("hive.exec.mode.local.auto.input.files.max", 4, "When hive.exec.mode.local.auto is true, the number of tasks should less than this for local mode."), DROP_IGNORES_NON_EXISTENT("hive.exec.drop.ignorenonexistent", true, "Do not report an error if DROP TABLE/VIEW/Index/Function specifies a nonexistent table/view/function"), - HIVEIGNOREMAPJOINHINT("hive.ignore.mapjoin.hint", true, "Ignore the mapjoin hint"), + HIVE_IGNORE_MAPJOIN_HINT("hive.ignore.mapjoin.hint", true, "Ignore the mapjoin hint"), HIVE_CONF_LOCKED_LIST("hive.conf.locked.list", "", "Comma separated " + "list of configuration options which are locked and can not be changed at runtime. Warning is logged and the " + @@ -886,28 +886,28 @@ public static enum ConfVars { // Properties with null values are ignored and exist only for the purpose of giving us // a symbolic name to reference in the Hive source code. Properties with non-null // values will override any values set in the underlying Hadoop configuration. - HADOOPBIN("hadoop.bin.path", findHadoopBinary(), "", true), - YARNBIN("yarn.bin.path", findYarnBinary(), "", true), - MAPREDBIN("mapred.bin.path", findMapRedBinary(), "", true), + HADOOP_BIN("hadoop.bin.path", findHadoopBinary(), "", true), + YARN_BIN("yarn.bin.path", findYarnBinary(), "", true), + MAPRED_BIN("mapred.bin.path", findMapRedBinary(), "", true), HIVE_FS_HAR_IMPL("fs.har.impl", "org.apache.hadoop.hive.shims.HiveHarFileSystem", "The implementation for accessing Hadoop Archives. Note that this won't be applicable to Hadoop versions less than 0.20"), - MAPREDMAXSPLITSIZE(FileInputFormat.SPLIT_MAXSIZE, 256000000L, "", true), - MAPREDMINSPLITSIZE(FileInputFormat.SPLIT_MINSIZE, 1L, "", true), - MAPREDMINSPLITSIZEPERNODE(CombineFileInputFormat.SPLIT_MINSIZE_PERNODE, 1L, "", true), - MAPREDMINSPLITSIZEPERRACK(CombineFileInputFormat.SPLIT_MINSIZE_PERRACK, 1L, "", true), + MAPRED_MAX_SPLIT_SIZE(FileInputFormat.SPLIT_MAXSIZE, 256000000L, "", true), + MAPRED_MIN_SPLIT_SIZE(FileInputFormat.SPLIT_MINSIZE, 1L, "", true), + MAPRED_MIN_SPLIT_SIZE_PER_NODE(CombineFileInputFormat.SPLIT_MINSIZE_PERNODE, 1L, "", true), + MAPRED_MIN_SPLIT_SIZE_PER_RACK(CombineFileInputFormat.SPLIT_MINSIZE_PERRACK, 1L, "", true), // The number of reduce tasks per job. Hadoop sets this value to 1 by default // By setting this property to -1, Hive will automatically determine the correct // number of reducers. - HADOOPNUMREDUCERS("mapreduce.job.reduces", -1, "", true), + HADOOP_NUM_REDUCERS("mapreduce.job.reduces", -1, "", true), // Metastore stuff. Be sure to update HiveConf.metaVars when you add something here! - METASTOREDBTYPE("hive.metastore.db.type", "DERBY", new StringSet("DERBY", "ORACLE", "MYSQL", "MSSQL", "POSTGRES"), + METASTORE_DB_TYPE("hive.metastore.db.type", "DERBY", new StringSet("DERBY", "ORACLE", "MYSQL", "MSSQL", "POSTGRES"), "Type of database used by the metastore. Information schema & JDBCStorageHandler depend on it."), /** * @deprecated Use MetastoreConf.WAREHOUSE */ @Deprecated - METASTOREWAREHOUSE("hive.metastore.warehouse.dir", "/user/hive/warehouse", + METASTORE_WAREHOUSE("hive.metastore.warehouse.dir", "/user/hive/warehouse", "location of default database for the warehouse"), HIVE_METASTORE_WAREHOUSE_EXTERNAL("hive.metastore.warehouse.external.dir", null, @@ -918,14 +918,14 @@ public static enum ConfVars { * @deprecated Use MetastoreConf.THRIFT_URIS */ @Deprecated - METASTOREURIS("hive.metastore.uris", "", + METASTORE_URIS("hive.metastore.uris", "", "Thrift URI for the remote metastore. Used by metastore client to connect to remote metastore."), /** * @deprecated Use MetastoreConf.THRIFT_URI_SELECTION */ @Deprecated - METASTORESELECTION("hive.metastore.uri.selection", "RANDOM", + METASTORE_SELECTION("hive.metastore.uri.selection", "RANDOM", new StringSet("SEQUENTIAL", "RANDOM"), "Determines the selection mechanism used by metastore client to connect to remote " + "metastore. SEQUENTIAL implies that the first valid metastore from the URIs specified " + @@ -976,13 +976,13 @@ public static enum ConfVars { * @deprecated Use MetastoreConf.THRIFT_CONNECTION_RETRIES */ @Deprecated - METASTORETHRIFTCONNECTIONRETRIES("hive.metastore.connect.retries", 3, + METASTORE_THRIFT_CONNECTION_RETRIES("hive.metastore.connect.retries", 3, "Number of retries while opening a connection to metastore"), /** * @deprecated Use MetastoreConf.THRIFT_FAILURE_RETRIES */ @Deprecated - METASTORETHRIFTFAILURERETRIES("hive.metastore.failure.retries", 1, + METASTORE_THRIFT_FAILURE_RETRIES("hive.metastore.failure.retries", 1, "Number of retries upon failure of Thrift metastore calls"), /** * @deprecated Use MetastoreConf.SERVER_PORT @@ -1016,25 +1016,25 @@ public static enum ConfVars { * @deprecated Use MetastoreConf.PWD */ @Deprecated - METASTOREPWD("javax.jdo.option.ConnectionPassword", "mine", + METASTORE_PWD("javax.jdo.option.ConnectionPassword", "mine", "password to use against metastore database"), /** * @deprecated Use MetastoreConf.CONNECT_URL_HOOK */ @Deprecated - METASTORECONNECTURLHOOK("hive.metastore.ds.connection.url.hook", "", + METASTORE_CONNECT_URL_HOOK("hive.metastore.ds.connection.url.hook", "", "Name of the hook to use for retrieving the JDO connection URL. If empty, the value in javax.jdo.option.ConnectionURL is used"), /** * @deprecated Use MetastoreConf.MULTITHREADED */ @Deprecated - METASTOREMULTITHREADED("javax.jdo.option.Multithreaded", true, + METASTORE_MULTI_THREADED("javax.jdo.option.Multithreaded", true, "Set this to true if multiple threads access metastore through JDO concurrently."), /** * @deprecated Use MetastoreConf.CONNECT_URL_KEY */ @Deprecated - METASTORECONNECTURLKEY("javax.jdo.option.ConnectionURL", + METASTORE_CONNECT_URL_KEY("javax.jdo.option.ConnectionURL", "jdbc:derby:;databaseName=metastore_db;create=true", "JDBC connect string for a JDBC metastore.\n" + "To use SSL to encrypt/authenticate the connection, provide database-specific SSL flag in the connection URL.\n" + @@ -1050,19 +1050,19 @@ public static enum ConfVars { * @deprecated Use MetastoreConf.HMS_HANDLER_ATTEMPTS */ @Deprecated - HMSHANDLERATTEMPTS("hive.hmshandler.retry.attempts", 10, + HMS_HANDLER_ATTEMPTS("hive.hmshandler.retry.attempts", 10, "The number of times to retry a HMSHandler call if there were a connection error."), /** * @deprecated Use MetastoreConf.HMS_HANDLER_INTERVAL */ @Deprecated - HMSHANDLERINTERVAL("hive.hmshandler.retry.interval", "2000ms", + HMS_HANDLER_INTERVAL("hive.hmshandler.retry.interval", "2000ms", new TimeValidator(TimeUnit.MILLISECONDS), "The time between HMSHandler retry attempts on failure."), /** * @deprecated Use MetastoreConf.HMS_HANDLER_FORCE_RELOAD_CONF */ @Deprecated - HMSHANDLERFORCERELOADCONF("hive.hmshandler.force.reload.conf", false, + HMS_HANDLER_FORCE_RELOAD_CONF("hive.hmshandler.force.reload.conf", false, "Whether to force reloading of the HMSHandler configuration (including\n" + "the connection URL, before the next metastore query that accesses the\n" + "datastore. Once reloaded, this value is reset to false. Used for\n" + @@ -1071,19 +1071,19 @@ public static enum ConfVars { * @deprecated Use MetastoreConf.SERVER_MAX_MESSAGE_SIZE */ @Deprecated - METASTORESERVERMAXMESSAGESIZE("hive.metastore.server.max.message.size", 100*1024*1024L, + METASTORE_SERVER_MAX_MESSAGE_SIZE("hive.metastore.server.max.message.size", 100*1024*1024L, "Maximum message size in bytes a HMS will accept."), /** * @deprecated Use MetastoreConf.SERVER_MIN_THREADS */ @Deprecated - METASTORESERVERMINTHREADS("hive.metastore.server.min.threads", 200, + METASTORE_SERVER_MIN_THREADS("hive.metastore.server.min.threads", 200, "Minimum number of worker threads in the Thrift server's pool."), /** * @deprecated Use MetastoreConf.SERVER_MAX_THREADS */ @Deprecated - METASTORESERVERMAXTHREADS("hive.metastore.server.max.threads", 1000, + METASTORE_SERVER_MAX_THREADS("hive.metastore.server.max.threads", 1000, "Maximum number of worker threads in the Thrift server's pool."), /** * @deprecated Use MetastoreConf.TCP_KEEP_ALIVE @@ -1729,10 +1729,10 @@ public static enum ConfVars { "alongside the dropped table data. This ensures that the metadata will be cleaned up along with the dropped table data."), // CLI - CLIIGNOREERRORS("hive.cli.errors.ignore", false, ""), - CLIPRINTCURRENTDB("hive.cli.print.current.db", false, + CLI_IGNORE_ERRORS("hive.cli.errors.ignore", false, ""), + CLI_PRINT_CURRENT_DB("hive.cli.print.current.db", false, "Whether to include the current database in the Hive prompt."), - CLIPROMPT("hive.cli.prompt", "hive", + CLI_PROMPT("hive.cli.prompt", "hive", "Command line prompt configuration value. Other hiveconf can be used in this configuration value. \n" + "Variable substitution will only be invoked at the Hive CLI startup."), /** @@ -1744,9 +1744,9 @@ public static enum ConfVars { // Things we log in the jobconf // session identifier - HIVESESSIONID("hive.session.id", "", ""), + HIVE_SESSION_ID("hive.session.id", "", ""), // whether session is running in silent mode or not - HIVESESSIONSILENT("hive.session.silent", false, ""), + HIVE_SESSION_SILENT("hive.session.silent", false, ""), HIVE_LOCAL_TIME_ZONE("hive.local.time.zone", "LOCAL", "Sets the time-zone for displaying and interpreting time stamps. If this property value is set to\n" + @@ -1757,35 +1757,35 @@ public static enum ConfVars { HIVE_SESSION_HISTORY_ENABLED("hive.session.history.enabled", false, "Whether to log Hive query, query plan, runtime statistics etc."), - HIVEQUERYSTRING("hive.query.string", "", + HIVE_QUERY_STRING("hive.query.string", "", "Query being executed (might be multiple per a session)"), - HIVEQUERYID("hive.query.id", "", + HIVE_QUERY_ID("hive.query.id", "", "ID for query being executed (might be multiple per a session)"), - HIVEQUERYTAG("hive.query.tag", null, "Tag for the queries in the session. User can kill the queries with the tag " + + HIVE_QUERY_TAG("hive.query.tag", null, "Tag for the queries in the session. User can kill the queries with the tag " + "in another session. Currently there is no tag duplication check, user need to make sure his tag is unique. " + "Also 'kill query' needs to be issued to all HiveServer2 instances to proper kill the queries"), - HIVEJOBNAMELENGTH("hive.jobname.length", 50, "max jobname length"), + HIVE_JOBNAME_LENGTH("hive.jobname.length", 50, "max jobname length"), // hive jar - HIVEJAR("hive.jar.path", "", + HIVE_JAR("hive.jar.path", "", "The location of hive_cli.jar that is used when submitting jobs in a separate jvm."), - HIVEAUXJARS("hive.aux.jars.path", "", + HIVE_AUX_JARS("hive.aux.jars.path", "", "The location of the plugin jars that contain implementations of user defined functions and serdes."), // reloadable jars - HIVERELOADABLEJARS("hive.reloadable.aux.jars.path", "", + HIVE_RELOADABLE_JARS("hive.reloadable.aux.jars.path", "", "The locations of the plugin jars, which can be a comma-separated folders or jars. Jars can be renewed\n" + "by executing reload command. And these jars can be " + "used as the auxiliary classes like creating a UDF or SerDe."), // hive added files and jars - HIVEADDEDFILES("hive.added.files.path", "", "This an internal parameter."), - HIVEADDEDJARS("hive.added.jars.path", "", "This an internal parameter."), - HIVEADDEDARCHIVES("hive.added.archives.path", "", "This an internal parameter."), - HIVEADDFILESUSEHDFSLOCATION("hive.resource.use.hdfs.location", true, "Reference HDFS based files/jars directly instead of " + HIVE_ADDED_FILES("hive.added.files.path", "", "This an internal parameter."), + HIVE_ADDED_JARS("hive.added.jars.path", "", "This an internal parameter."), + HIVE_ADDED_ARCHIVES("hive.added.archives.path", "", "This an internal parameter."), + HIVE_ADD_FILES_USE_HDFS_LOCATION("hive.resource.use.hdfs.location", true, "Reference HDFS based files/jars directly instead of " + "copy to session based HDFS scratch directory, to make distributed cache more useful."), HIVE_CURRENT_DATABASE("hive.current.database", "", "Database name used by current session. Internal usage only.", true), @@ -1795,17 +1795,17 @@ public static enum ConfVars { new TimeValidator(TimeUnit.SECONDS), "How long to run autoprogressor for the script/UDTF operators.\n" + "Set to 0 for forever."), - HIVESCRIPTAUTOPROGRESS("hive.script.auto.progress", false, + HIVE_SCRIPT_AUTO_PROGRESS("hive.script.auto.progress", false, "Whether Hive Transform/Map/Reduce Clause should automatically send progress information to TaskTracker \n" + "to avoid the task getting killed because of inactivity. Hive sends progress information when the script is \n" + "outputting to stderr. This option removes the need of periodically producing stderr messages, \n" + "but users should be cautious because this may prevent infinite loops in the scripts to be killed by TaskTracker."), - HIVESCRIPTIDENVVAR("hive.script.operator.id.env.var", "HIVE_SCRIPT_OPERATOR_ID", + HIVE_SCRIPT_ID_ENV_VAR("hive.script.operator.id.env.var", "HIVE_SCRIPT_OPERATOR_ID", "Name of the environment variable that holds the unique script operator ID in the user's \n" + "transform function (the custom mapper/reducer that the user has specified in the query)"), - HIVESCRIPTTRUNCATEENV("hive.script.operator.truncate.env", false, + HIVE_SCRIPT_TRUNCATE_ENV("hive.script.operator.truncate.env", false, "Truncate each environment variable for external script in scripts operator to 20KB (to fit system limits)"), - HIVESCRIPT_ENV_BLACKLIST("hive.script.operator.env.blacklist", + HIVE_SCRIPT_ENV_BLACKLIST("hive.script.operator.env.blacklist", "hive.txn.valid.txns,hive.txn.tables.valid.writeids,hive.txn.valid.writeids,hive.script.operator.env.blacklist,hive.repl.current.table.write.id", "Comma separated list of keys from the configuration file not to convert to environment " + "variables when invoking the script operator"), @@ -1838,16 +1838,16 @@ public static enum ConfVars { "Set the owner of files loaded using load data in managed tables."), @Deprecated - HIVEMAPREDMODE("hive.mapred.mode", null, + HIVE_MAPRED_MODE("hive.mapred.mode", null, "Deprecated; use hive.strict.checks.* settings instead."), - HIVEALIAS("hive.alias", "", ""), - HIVEMAPSIDEAGGREGATE("hive.map.aggr", true, "Whether to use map-side aggregation in Hive Group By queries"), - HIVEGROUPBYSKEW("hive.groupby.skewindata", false, "Whether there is skew in data to optimize group by queries"), + HIVE_ALIAS("hive.alias", "", ""), + HIVE_MAPSIDE_AGGREGATE("hive.map.aggr", true, "Whether to use map-side aggregation in Hive Group By queries"), + HIVE_GROUPBY_SKEW("hive.groupby.skewindata", false, "Whether there is skew in data to optimize group by queries"), HIVE_JOIN_SHORTCUT_UNMATCHED_ROWS("hive.join.shortcut.unmatched.rows", true, "Enables to shortcut processing of known filtered rows in merge joins. internal use only. may affect correctness"), - HIVEJOINEMITINTERVAL("hive.join.emit.interval", 1000, + HIVE_JOIN_EMIT_INTERVAL("hive.join.emit.interval", 1000, "How many rows in the right-most join operand Hive should buffer before emitting the join result."), - HIVEJOINCACHESIZE("hive.join.cache.size", 25000, + HIVE_JOIN_CACHE_SIZE("hive.join.cache.size", 25000, "How many rows in the joining tables (except the streaming table) should be cached in memory."), HIVE_PUSH_RESIDUAL_INNER("hive.join.inner.residual", false, "Whether to push non-equi filter predicates within inner joins. This can improve efficiency in " @@ -1963,31 +1963,31 @@ public static enum ConfVars { // hive.mapjoin.bucket.cache.size has been replaced by hive.smbjoin.cache.row, // need to remove by hive .13. Also, do not change default (see SMB operator) - HIVEMAPJOINBUCKETCACHESIZE("hive.mapjoin.bucket.cache.size", 100, ""), + HIVE_MAPJOIN_BUCKET_CACHE_SIZE("hive.mapjoin.bucket.cache.size", 100, ""), - HIVEMAPJOINUSEOPTIMIZEDTABLE("hive.mapjoin.optimized.hashtable", true, + HIVE_MAPJOIN_USE_OPTIMIZED_TABLE("hive.mapjoin.optimized.hashtable", true, "Whether Hive should use memory-optimized hash table for MapJoin.\n" + "Only works on Tez because memory-optimized hashtable cannot be serialized."), - HIVEMAPJOINOPTIMIZEDTABLEPROBEPERCENT("hive.mapjoin.optimized.hashtable.probe.percent", + HIVE_MAPJOIN_OPTIMIZED_TABLE_PROBE_PERCENT("hive.mapjoin.optimized.hashtable.probe.percent", (float) 0.5, "Probing space percentage of the optimized hashtable"), - HIVEMAPJOINPARALELHASHTABLETHREADS("hive.mapjoin.hashtable.load.threads", 2, + HIVE_MAPJOIN_PARALEL_HASHTABLE_THREADS("hive.mapjoin.hashtable.load.threads", 2, "Number of threads used to load records from a broadcast edge in HT used for MJ"), - HIVEUSEHYBRIDGRACEHASHJOIN("hive.mapjoin.hybridgrace.hashtable", false, "Whether to use hybrid" + + HIVE_USE_HYBRIDGRACE_HASHJOIN("hive.mapjoin.hybridgrace.hashtable", false, "Whether to use hybrid" + "grace hash join as the join method for mapjoin. Tez only."), - HIVEHYBRIDGRACEHASHJOINMEMCHECKFREQ("hive.mapjoin.hybridgrace.memcheckfrequency", 1024, "For " + + HIVE_HYBRIDGRACE_HASHJOIN_MEMCHECK_FREQ("hive.mapjoin.hybridgrace.memcheckfrequency", 1024, "For " + "hybrid grace hash join, how often (how many rows apart) we check if memory is full. " + "This number should be power of 2."), - HIVEHYBRIDGRACEHASHJOINMINWBSIZE("hive.mapjoin.hybridgrace.minwbsize", 524288, "For hybrid grace" + + HIVE_HYBRIDGRACE_HASHJOIN_MIN_WB_SIZE("hive.mapjoin.hybridgrace.minwbsize", 524288, "For hybrid grace" + "Hash join, the minimum write buffer size used by optimized hashtable. Default is 512 KB."), - HIVEHYBRIDGRACEHASHJOINMINNUMPARTITIONS("hive.mapjoin.hybridgrace.minnumpartitions", 16, "For" + + HIVE_HYBRIDGRACE_HASHJOIN_MIN_NUM_PARTITIONS("hive.mapjoin.hybridgrace.minnumpartitions", 16, "For" + "Hybrid grace hash join, the minimum number of partitions to create."), - HIVEHASHTABLEWBSIZE("hive.mapjoin.optimized.hashtable.wbsize", 8 * 1024 * 1024, + HIVE_HASHTABLE_WB_SIZE("hive.mapjoin.optimized.hashtable.wbsize", 8 * 1024 * 1024, "Optimized hashtable (see hive.mapjoin.optimized.hashtable) uses a chain of buffers to\n" + "store data. This is one buffer size. HT may be slightly faster if this is larger, but for small\n" + "joins unnecessary memory will be allocated and then trimmed."), - HIVEHYBRIDGRACEHASHJOINBLOOMFILTER("hive.mapjoin.hybridgrace.bloomfilter", true, "Whether to " + + HIVE_HYBRIDGRACE_HASHJOIN_BLOOMFILTER("hive.mapjoin.hybridgrace.bloomfilter", true, "Whether to " + "use BloomFilter in Hybrid grace hash join to minimize unnecessary spilling."), - HIVEMAPJOINFULLOUTER("hive.mapjoin.full.outer", true, + HIVE_MAPJOIN_FULL_OUTER("hive.mapjoin.full.outer", true, "Whether to use MapJoin for FULL OUTER JOINs."), HIVE_TEST_MAPJOINFULLOUTER_OVERRIDE( "hive.test.mapjoin.full.outer.override", @@ -1996,27 +1996,27 @@ public static enum ConfVars { "setting. Using enable will force it on and disable will force it off.\n" + "The default none is do nothing, of course", true), - HIVESMBJOINCACHEROWS("hive.smbjoin.cache.rows", 10000, + HIVE_SMBJOIN_CACHE_ROWS("hive.smbjoin.cache.rows", 10000, "How many rows with the same key value should be cached in memory per smb joined table."), - HIVEGROUPBYMAPINTERVAL("hive.groupby.mapaggr.checkinterval", 100000, + HIVE_GROUPBY_MAP_INTERVAL("hive.groupby.mapaggr.checkinterval", 100000, "Number of rows after which size of the grouping keys/aggregation classes is performed"), - HIVEMAPAGGRHASHMEMORY("hive.map.aggr.hash.percentmemory", (float) 0.5, + HIVE_MAP_AGGR_HASH_MEMORY("hive.map.aggr.hash.percentmemory", (float) 0.5, "Portion of total memory to be used by map-side group aggregation hash table"), - HIVEMAPJOINFOLLOWEDBYMAPAGGRHASHMEMORY("hive.mapjoin.followby.map.aggr.hash.percentmemory", (float) 0.3, + HIVE_MAPJOIN_FOLLOWEDBY_MAP_AGGR_HASH_MEMORY("hive.mapjoin.followby.map.aggr.hash.percentmemory", (float) 0.3, "Portion of total memory to be used by map-side group aggregation hash table, when this group by is followed by map join"), - HIVEMAPAGGRMEMORYTHRESHOLD("hive.map.aggr.hash.force.flush.memory.threshold", (float) 0.9, + HIVE_MAP_AGGR_MEMORY_THRESHOLD("hive.map.aggr.hash.force.flush.memory.threshold", (float) 0.9, "The max memory to be used by map-side group aggregation hash table.\n" + "If the memory usage is higher than this number, force to flush data"), - HIVEMAPAGGRHASHMINREDUCTION("hive.map.aggr.hash.min.reduction", (float) 0.99, + HIVE_MAP_AGGR_HASH_MIN_REDUCTION("hive.map.aggr.hash.min.reduction", (float) 0.99, "Hash aggregation will be turned off if the ratio between hash table size and input rows is bigger than this number. \n" + "Set to 1 to make sure hash aggregation is never turned off."), - HIVEMAPAGGRHASHMINREDUCTIONLOWERBOUND("hive.map.aggr.hash.min.reduction.lower.bound", (float) 0.4, + HIVE_MAP_AGGR_HASH_MIN_REDUCTION_LOWER_BOUND("hive.map.aggr.hash.min.reduction.lower.bound", (float) 0.4, "Lower bound of Hash aggregate reduction filter. See also: hive.map.aggr.hash.min.reduction"), - HIVEMAPAGGRHASHMINREDUCTIONSTATSADJUST("hive.map.aggr.hash.min.reduction.stats", true, + HIVE_MAP_AGGR_HASH_MIN_REDUCTION_STATS_ADJUST("hive.map.aggr.hash.min.reduction.stats", true, "Whether the value for hive.map.aggr.hash.min.reduction should be set statically using stats estimates. \n" + "If this is enabled, the default value for hive.map.aggr.hash.min.reduction is only used as an upper-bound\n" + "for the value set in the map-side group by operators."), - HIVEMULTIGROUPBYSINGLEREDUCER("hive.multigroupby.singlereducer", true, + HIVE_MULTI_GROUPBY_SINGLE_REDUCER("hive.multigroupby.singlereducer", true, "Whether to optimize multi group by query to generate single M/R job plan. If the multi group by query has \n" + "common group by keys, it will be optimized to generate single M/R job."), HIVE_MAP_GROUPBY_SORT("hive.map.groupby.sorted", false, @@ -2056,14 +2056,14 @@ public static enum ConfVars { "Distributed copies (distcp) will be used instead for bigger files so that copies can be done faster."), // for hive udtf operator - HIVEUDTFAUTOPROGRESS("hive.udtf.auto.progress", false, + HIVE_UDTF_AUTO_PROGRESS("hive.udtf.auto.progress", false, "Whether Hive should automatically send progress information to TaskTracker \n" + "when using UDTF's to prevent the task getting killed because of inactivity. Users should be cautious \n" + "because this may prevent TaskTracker from killing tasks with infinite loops."), - HIVEDEFAULTFILEFORMAT("hive.default.fileformat", "TextFile", new StringSet("TextFile", "SequenceFile", "RCfile", "ORC", "parquet"), + HIVE_DEFAULT_FILEFORMAT("hive.default.fileformat", "TextFile", new StringSet("TextFile", "SequenceFile", "RCfile", "ORC", "parquet"), "Default file format for CREATE TABLE statement. Users can explicitly override it by CREATE TABLE ... STORED AS [FORMAT]"), - HIVEDEFAULTMANAGEDFILEFORMAT("hive.default.fileformat.managed", "none", + HIVE_DEFAULT_MANAGED_FILEFORMAT("hive.default.fileformat.managed", "none", new StringSet("none", "TextFile", "SequenceFile", "RCfile", "ORC", "parquet"), "Default file format for CREATE TABLE statement applied to managed tables only. External tables will be \n" + "created with format specified by hive.default.fileformat. Leaving this null will result in using hive.default.fileformat \n" + @@ -2072,17 +2072,17 @@ public static enum ConfVars { "Default storage handler class for CREATE TABLE statements. If this is set to a valid class, a 'CREATE TABLE ... STORED AS ... LOCATION ...' command will " + "be equivalent to 'CREATE TABLE ... STORED BY [default.storage.handler.class] LOCATION ...'. Any STORED AS clauses will be ignored, given that STORED BY and STORED AS are " + "incompatible within the same command. Users can explicitly override the default class by issuing 'CREATE TABLE ... STORED BY [overriding.storage.handler.class] ...'"), - HIVEQUERYRESULTFILEFORMAT("hive.query.result.fileformat", ResultFileFormat.SEQUENCEFILE.toString(), + HIVE_QUERY_RESULT_FILEFORMAT("hive.query.result.fileformat", ResultFileFormat.SEQUENCEFILE.toString(), new StringSet(ResultFileFormat.getValidSet()), "Default file format for storing result of the query."), - HIVECHECKFILEFORMAT("hive.fileformat.check", true, "Whether to check file format or not when loading data files"), + HIVE_CHECK_FILEFORMAT("hive.fileformat.check", true, "Whether to check file format or not when loading data files"), // default serde for rcfile - HIVEDEFAULTRCFILESERDE("hive.default.rcfile.serde", + HIVE_DEFAULT_RCFILE_SERDE("hive.default.rcfile.serde", "org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe", "The default SerDe Hive will use for the RCFile format"), - HIVEDEFAULTSERDE("hive.default.serde", + HIVE_DEFAULT_SERDE("hive.default.serde", "org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe", "The default SerDe Hive will use for storage formats that do not specify a SerDe."), @@ -2090,7 +2090,7 @@ public static enum ConfVars { * @deprecated Use MetastoreConf.SERDES_USING_METASTORE_FOR_SCHEMA */ @Deprecated - SERDESUSINGMETASTOREFORSCHEMA("hive.serdes.using.metastore.for.schema", + SERDES_USING_METASTORE_FOR_SCHEMA("hive.serdes.using.metastore.for.schema", "org.apache.hadoop.hive.ql.io.orc.OrcSerde," + "org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe," + "org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe," + @@ -2105,9 +2105,9 @@ public static enum ConfVars { HIVE_LEGACY_SCHEMA_FOR_ALL_SERDES("hive.legacy.schema.for.all.serdes", false, "A backward compatibility setting for external metastore users that do not handle \n" + - SERDESUSINGMETASTOREFORSCHEMA.varname + " correctly. This may be removed at any time."), + SERDES_USING_METASTORE_FOR_SCHEMA.varname + " correctly. This may be removed at any time."), - HIVEHISTORYFILELOC("hive.querylog.location", + HIVE_HISTORY_FILE_LOC("hive.querylog.location", "${system:java.io.tmpdir}" + File.separator + "${system:user.name}", "Location of Hive run time structured log file"), @@ -2126,26 +2126,26 @@ public static enum ConfVars { "logged less frequently than specified.\n" + "This only has an effect if hive.querylog.enable.plan.progress is set to true."), - HIVESCRIPTSERDE("hive.script.serde", "org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe", + HIVE_SCRIPT_SERDE("hive.script.serde", "org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe", "The default SerDe for transmitting input data to and reading output data from the user scripts. "), - HIVESCRIPTRECORDREADER("hive.script.recordreader", + HIVE_SCRIPT_RECORD_READER("hive.script.recordreader", "org.apache.hadoop.hive.ql.exec.TextRecordReader", "The default record reader for reading data from the user scripts. "), - HIVESCRIPTRECORDWRITER("hive.script.recordwriter", + HIVE_SCRIPT_RECORD_WRITER("hive.script.recordwriter", "org.apache.hadoop.hive.ql.exec.TextRecordWriter", "The default record writer for writing data to the user scripts. "), - HIVESCRIPTESCAPE("hive.transform.escape.input", false, + HIVE_SCRIPT_ESCAPE("hive.transform.escape.input", false, "This adds an option to escape special chars (newlines, carriage returns and\n" + "tabs) when they are passed to the user script. This is useful if the Hive tables\n" + "can contain data that contains special characters."), - HIVEBINARYRECORDMAX("hive.binary.record.max.length", 1000, + HIVE_BINARY_RECORD_MAX("hive.binary.record.max.length", 1000, "Read from a binary stream and treat each hive.binary.record.max.length bytes as a record. \n" + "The last record before the end of stream can have less than hive.binary.record.max.length bytes"), - HIVEHADOOPMAXMEM("hive.mapred.local.mem", 0, "mapper/reducer memory in local mode"), + HIVE_HADOOP_MAX_MEM("hive.mapred.local.mem", 0, "mapper/reducer memory in local mode"), //small table file size - HIVESMALLTABLESFILESIZE("hive.mapjoin.smalltable.filesize", 25000000L, + HIVE_SMALL_TABLES_FILESIZE("hive.mapjoin.smalltable.filesize", 25000000L, "The threshold for the input file size of the small tables; if the file size is smaller \n" + "than this threshold, it will try to convert the common join into map join"), @@ -2169,53 +2169,53 @@ public static enum ConfVars { "If true, VectorizedOrcAcidRowBatchReader will compute min/max " + "ROW__ID for the split and only load delete events in that range.\n" ), - HIVESAMPLERANDOMNUM("hive.sample.seednumber", 0, + HIVE_SAMPLE_RANDOM_NUM("hive.sample.seednumber", 0, "A number used to percentage sampling. By changing this number, user will change the subsets of data sampled."), // test mode in hive mode - HIVETESTMODE("hive.test.mode", false, + HIVE_TEST_MODE("hive.test.mode", false, "Whether Hive is running in test mode. If yes, it turns on sampling and prefixes the output tablename.", false), - HIVEEXIMTESTMODE("hive.exim.test.mode", false, + HIVE_EXIM_TEST_MODE("hive.exim.test.mode", false, "The subset of test mode that only enables custom path handling for ExIm.", false), - HIVETESTMODEPREFIX("hive.test.mode.prefix", "test_", + HIVE_TEST_MODE_PREFIX("hive.test.mode.prefix", "test_", "In test mode, specifies prefixes for the output table", false), - HIVETESTMODESAMPLEFREQ("hive.test.mode.samplefreq", 32, + HIVE_TEST_MODE_SAMPLE_FREQ("hive.test.mode.samplefreq", 32, "In test mode, specifies sampling frequency for table, which is not bucketed,\n" + "For example, the following query:\n" + " INSERT OVERWRITE TABLE dest SELECT col1 from src\n" + "would be converted to\n" + " INSERT OVERWRITE TABLE test_dest\n" + " SELECT col1 from src TABLESAMPLE (BUCKET 1 out of 32 on rand(1))", false), - HIVETESTMODENOSAMPLE("hive.test.mode.nosamplelist", "", + HIVE_TEST_MODE_NOSAMPLE("hive.test.mode.nosamplelist", "", "In test mode, specifies comma separated table names which would not apply sampling", false), - HIVETESTMODEDUMMYSTATAGGR("hive.test.dummystats.aggregator", "", "internal variable for test", false), - HIVETESTMODEDUMMYSTATPUB("hive.test.dummystats.publisher", "", "internal variable for test", false), - HIVETESTCURRENTTIMESTAMP("hive.test.currenttimestamp", null, "current timestamp for test", false), - HIVETESTMODEROLLBACKTXN("hive.test.rollbacktxn", false, "For testing only. Will mark every ACID transaction aborted", false), - HIVETESTMODEFAILCOMPACTION("hive.test.fail.compaction", false, "For testing only. Will cause CompactorMR to fail.", false), - HIVETESTMODEFAILLOADDYNAMICPARTITION("hive.test.fail.load.dynamic.partition", false, "For testing only. Will cause loadDynamicPartition to fail.", false), - HIVETESTMODEFAILHEARTBEATER("hive.test.fail.heartbeater", false, "For testing only. Will cause Heartbeater to fail.", false), + HIVE_TEST_MODE_DUMMY_STAT_AGGR("hive.test.dummystats.aggregator", "", "internal variable for test", false), + HIVE_TEST_MODE_DUMMY_STAT_PUB("hive.test.dummystats.publisher", "", "internal variable for test", false), + HIVE_TEST_CURRENT_TIMESTAMP("hive.test.currenttimestamp", null, "current timestamp for test", false), + HIVE_TEST_MODE_ROLLBACK_TXN("hive.test.rollbacktxn", false, "For testing only. Will mark every ACID transaction aborted", false), + HIVE_TEST_MODE_FAIL_COMPACTION("hive.test.fail.compaction", false, "For testing only. Will cause CompactorMR to fail.", false), + HIVE_TEST_MODE_FAIL_LOAD_DYNAMIC_PARTITION("hive.test.fail.load.dynamic.partition", false, "For testing only. Will cause loadDynamicPartition to fail.", false), + HIVE_TEST_MODE_FAIL_HEARTBEATER("hive.test.fail.heartbeater", false, "For testing only. Will cause Heartbeater to fail.", false), TESTMODE_BUCKET_CODEC_VERSION("hive.test.bucketcodec.version", 1, "For testing only. Will make ACID subsystem write RecordIdentifier.bucketId in specified\n" + "format", false), HIVE_EXTEND_BUCKET_ID_RANGE("hive.extend.bucketid.range", true, "Dynamically allocate some bits from statement id when bucket id overflows. This allows having more than 4096 buckets."), - HIVETESTMODEACIDKEYIDXSKIP("hive.test.acid.key.index.skip", false, "For testing only. OrcRecordUpdater will skip " + HIVE_TEST_MODE_ACID_KEY_IDX_SKIP("hive.test.acid.key.index.skip", false, "For testing only. OrcRecordUpdater will skip " + "generation of the hive.acid.key.index", false), - HIVEMERGEMAPFILES("hive.merge.mapfiles", true, + HIVE_MERGE_MAPFILES("hive.merge.mapfiles", true, "Merge small files at the end of a map-only job"), - HIVEMERGEMAPREDFILES("hive.merge.mapredfiles", false, + HIVE_MERGE_MAPRED_FILES("hive.merge.mapredfiles", false, "Merge small files at the end of a map-reduce job"), - HIVEMERGETEZFILES("hive.merge.tezfiles", false, "Merge small files at the end of a Tez DAG"), - HIVEMERGEMAPFILESSIZE("hive.merge.size.per.task", (long) (256 * 1000 * 1000), + HIVE_MERGE_TEZ_FILES("hive.merge.tezfiles", false, "Merge small files at the end of a Tez DAG"), + HIVE_MERGE_MAP_FILES_SIZE("hive.merge.size.per.task", (long) (256 * 1000 * 1000), "Size of merged files at the end of the job"), - HIVEMERGEMAPFILESAVGSIZE("hive.merge.smallfiles.avgsize", (long) (16 * 1000 * 1000), + HIVE_MERGE_MAP_FILES_AVG_SIZE("hive.merge.smallfiles.avgsize", (long) (16 * 1000 * 1000), "When the average output file size of a job is less than this number, Hive will start an additional \n" + "map-reduce job to merge the output files into bigger files. This is only done for map-only jobs \n" + "if hive.merge.mapfiles is true, and for map-reduce jobs if hive.merge.mapredfiles is true."), - HIVEMERGERCFILEBLOCKLEVEL("hive.merge.rcfile.block.level", true, ""), - HIVEMERGEORCFILESTRIPELEVEL("hive.merge.orcfile.stripe.level", true, + HIVE_MERGE_RCFILE_BLOCK_LEVEL("hive.merge.rcfile.block.level", true, ""), + HIVE_MERGE_ORC_FILE_STRIPE_LEVEL("hive.merge.orcfile.stripe.level", true, "When hive.merge.mapfiles, hive.merge.mapredfiles or hive.merge.tezfiles is enabled\n" + "while writing a table with ORC file format, enabling this config will do stripe-level\n" + "fast merge for small ORC files. Note that enabling this config will not honor the\n" + @@ -2233,11 +2233,11 @@ public static enum ConfVars { HIVE_ICEBERG_ALLOW_DATAFILES_IN_TABLE_LOCATION_ONLY("hive.iceberg.allow.datafiles.in.table.location.only", false, "If this is set to true, then all the data files being read should be withing the table location"), - HIVEUSEEXPLICITRCFILEHEADER("hive.exec.rcfile.use.explicit.header", true, + HIVE_USE_EXPLICIT_RCFILE_HEADER("hive.exec.rcfile.use.explicit.header", true, "If this is set the header for RCFiles will simply be RCF. If this is not\n" + "set the header will be that borrowed from sequence files, e.g. SEQ- followed\n" + "by the input and output RCFile formats."), - HIVEUSERCFILESYNCCACHE("hive.exec.rcfile.use.sync.cache", true, ""), + HIVE_USE_RCFILE_SYNC_CACHE("hive.exec.rcfile.use.sync.cache", true, ""), HIVE_RCFILE_RECORD_INTERVAL("hive.io.rcfile.record.interval", Integer.MAX_VALUE, ""), HIVE_RCFILE_COLUMN_NUMBER_CONF("hive.io.rcfile.column.number.conf", 0, ""), @@ -2373,109 +2373,109 @@ public static enum ConfVars { "The default is false, which means only 'TRUE' and 'FALSE' are treated as legal\n" + "boolean literal."), - HIVESKEWJOIN("hive.optimize.skewjoin", false, + HIVE_SKEW_JOIN("hive.optimize.skewjoin", false, "Whether to enable skew join optimization. \n" + "The algorithm is as follows: At runtime, detect the keys with a large skew. Instead of\n" + "processing those keys, store them temporarily in an HDFS directory. In a follow-up map-reduce\n" + "job, process those skewed keys. The same key need not be skewed for all the tables, and so,\n" + "the follow-up map-reduce job (for the skewed keys) would be much faster, since it would be a\n" + "map-join."), - HIVEDYNAMICPARTITIONHASHJOIN("hive.optimize.dynamic.partition.hashjoin", false, + HIVE_DYNAMIC_PARTITION_HASHJOIN("hive.optimize.dynamic.partition.hashjoin", false, "Whether to enable dynamically partitioned hash join optimization. \n" + "This setting is also dependent on enabling hive.auto.convert.join"), - HIVECONVERTJOIN("hive.auto.convert.join", true, + HIVE_CONVERT_JOIN("hive.auto.convert.join", true, "Whether Hive enables the optimization about converting common join into mapjoin based on the input file size"), - HIVECONVERTJOINNOCONDITIONALTASK("hive.auto.convert.join.noconditionaltask", true, + HIVE_CONVERT_JOIN_NOCONDITIONALTASK("hive.auto.convert.join.noconditionaltask", true, "Whether Hive enables the optimization about converting common join into mapjoin based on the input file size. \n" + "If this parameter is on, and the sum of size for n-1 of the tables/partitions for a n-way join is smaller than the\n" + "specified size, the join is directly converted to a mapjoin (there is no conditional task)."), HIVE_CONVERT_ANTI_JOIN("hive.auto.convert.anti.join", true, "Whether Hive enables the optimization about converting join with null filter to anti join."), - HIVECONVERTJOINNOCONDITIONALTASKTHRESHOLD("hive.auto.convert.join.noconditionaltask.size", + HIVE_CONVERT_JOIN_NOCONDITIONAL_TASK_THRESHOLD("hive.auto.convert.join.noconditionaltask.size", 10000000L, "If hive.auto.convert.join.noconditionaltask is off, this parameter does not take affect. \n" + "However, if it is on, and the sum of size for n-1 of the tables/partitions for a n-way join is smaller than this size, \n" + "the join is directly converted to a mapjoin(there is no conditional task). The default is 10MB"), - HIVECONVERTJOINUSENONSTAGED("hive.auto.convert.join.use.nonstaged", false, + HIVE_CONVERT_JOIN_USE_NONSTAGED("hive.auto.convert.join.use.nonstaged", false, "For conditional joins, if input stream from a small alias can be directly applied to join operator without \n" + "filtering or projection, the alias need not to be pre-staged in distributed cache via mapred local task.\n" + "Currently, this is not working with vectorization or tez execution engine."), - HIVESKEWJOINKEY("hive.skewjoin.key", 100000, + HIVE_SKEWJOIN_KEY("hive.skewjoin.key", 100000, "Determine if we get a skew key in join. If we see more than the specified number of rows with the same key in join operator,\n" + "we think the key as a skew join key. "), - HIVESKEWJOINMAPJOINNUMMAPTASK("hive.skewjoin.mapjoin.map.tasks", 10000, + HIVE_SKEWJOIN_MAPJOIN_NUM_MAP_TASK("hive.skewjoin.mapjoin.map.tasks", 10000, "Determine the number of map task used in the follow up map join job for a skew join.\n" + "It should be used together with hive.skewjoin.mapjoin.min.split to perform a fine-grained control."), - HIVESKEWJOINMAPJOINMINSPLIT("hive.skewjoin.mapjoin.min.split", 33554432L, + HIVE_SKEWJOIN_MAPJOIN_MIN_SPLIT("hive.skewjoin.mapjoin.min.split", 33554432L, "Determine the number of map task at most used in the follow up map join job for a skew join by specifying \n" + "the minimum split size. It should be used together with hive.skewjoin.mapjoin.map.tasks to perform a fine-grained control."), - HIVESENDHEARTBEAT("hive.heartbeat.interval", 1000, + HIVE_SEND_HEARTBEAT("hive.heartbeat.interval", 1000, "Send a heartbeat after this interval - used by mapjoin and filter operators"), - HIVELIMITMAXROWSIZE("hive.limit.row.max.size", 100000L, + HIVE_LIMIT_MAX_ROW_SIZE("hive.limit.row.max.size", 100000L, "When trying a smaller subset of data for simple LIMIT, how much size we need to guarantee each row to have at least."), - HIVELIMITOPTLIMITFILE("hive.limit.optimize.limit.file", 10, - "When trying a smaller subset of data for simple LIMIT, maximum number of files we can sample."), - HIVELIMITOPTENABLE("hive.limit.optimize.enable", false, + HIVE_LIMIT_OPT_LIMIT_FILE("hive.limit.optimize.limit.file", 10, + "When trying a smaller subset of data for simple LIMIT, maximum number of fil:wq:es we can sample."), + HIVE_LIMIT_OPT_ENABLE("hive.limit.optimize.enable", false, "Whether to enable to optimization to trying a smaller subset of data for simple LIMIT first."), - HIVELIMITOPTMAXFETCH("hive.limit.optimize.fetch.max", 50000, + HIVE_LIMIT_OPT_MAX_FETCH("hive.limit.optimize.fetch.max", 50000, "Maximum number of rows allowed for a smaller subset of data for simple LIMIT, if it is a fetch query. \n" + "Insert queries are not restricted by this limit."), - HIVELIMITPUSHDOWNMEMORYUSAGE("hive.limit.pushdown.memory.usage", 0.1f, new RatioValidator(), + HIVE_LIMIT_PUSHDOWN_MEMORY_USAGE("hive.limit.pushdown.memory.usage", 0.1f, new RatioValidator(), "The fraction of available memory to be used for buffering rows in Reducesink operator for limit pushdown optimization."), - HIVECONVERTJOINMAXENTRIESHASHTABLE("hive.auto.convert.join.hashtable.max.entries", 21000000L, + HIVE_CONVERT_JOIN_MAX_ENTRIES_HASHTABLE("hive.auto.convert.join.hashtable.max.entries", 21000000L, "If hive.auto.convert.join.noconditionaltask is off, this parameter does not take affect. \n" + "However, if it is on, and the predicted number of entries in hashtable for a given join \n" + "input is larger than this number, the join will not be converted to a mapjoin. \n" + "The value \"-1\" means no limit."), - XPRODSMALLTABLEROWSTHRESHOLD("hive.xprod.mapjoin.small.table.rows", 1,"Maximum number of rows on build side" + XPROD_SMALL_TABLE_ROWS_THRESHOLD("hive.xprod.mapjoin.small.table.rows", 1,"Maximum number of rows on build side" + " of map join before it switches over to cross product edge"), - HIVECONVERTJOINMAXSHUFFLESIZE("hive.auto.convert.join.shuffle.max.size", 10000000000L, + HIVE_CONVERT_JOIN_MAX_SHUFFLE_SIZE("hive.auto.convert.join.shuffle.max.size", 10000000000L, "If hive.auto.convert.join.noconditionaltask is off, this parameter does not take affect. \n" + "However, if it is on, and the predicted size of the larger input for a given join is greater \n" + "than this number, the join will not be converted to a dynamically partitioned hash join. \n" + "The value \"-1\" means no limit."), - HIVEHASHTABLEKEYCOUNTADJUSTMENT("hive.hashtable.key.count.adjustment", 0.99f, + HIVE_HASHTABLE_KEY_COUNT_ADJUSTMENT("hive.hashtable.key.count.adjustment", 0.99f, "Adjustment to mapjoin hashtable size derived from table and column statistics; the estimate" + " of the number of keys is divided by this value. If the value is 0, statistics are not used" + "and hive.hashtable.initialCapacity is used instead."), - HIVEHASHTABLETHRESHOLD("hive.hashtable.initialCapacity", 100000, "Initial capacity of " + + HIVE_HASHTABLE_THRESHOLD("hive.hashtable.initialCapacity", 100000, "Initial capacity of " + "mapjoin hashtable if statistics are absent, or if hive.hashtable.key.count.adjustment is set to 0"), - HIVEHASHTABLELOADFACTOR("hive.hashtable.loadfactor", (float) 0.75, ""), - HIVEHASHTABLEFOLLOWBYGBYMAXMEMORYUSAGE("hive.mapjoin.followby.gby.localtask.max.memory.usage", (float) 0.55, + HIVE_HASHTABLE_LOAD_FACTOR("hive.hashtable.loadfactor", (float) 0.75, ""), + HIVE_HASHTABLE_FOLLOWBY_GBY_MAX_MEMORY_USAGE("hive.mapjoin.followby.gby.localtask.max.memory.usage", (float) 0.55, "This number means how much memory the local task can take to hold the key/value into an in-memory hash table \n" + "when this map join is followed by a group by. If the local task's memory usage is more than this number, \n" + "the local task will abort by itself. It means the data of the small table is too large " + "to be held in memory."), - HIVEHASHTABLEMAXMEMORYUSAGE("hive.mapjoin.localtask.max.memory.usage", (float) 0.90, + HIVE_HASHTABLE_MAX_MEMORY_USAGE("hive.mapjoin.localtask.max.memory.usage", (float) 0.90, "This number means how much memory the local task can take to hold the key/value into an in-memory hash table. \n" + "If the local task's memory usage is more than this number, the local task will abort by itself. \n" + "It means the data of the small table is too large to be held in memory."), - HIVEHASHTABLESCALE("hive.mapjoin.check.memory.rows", (long)100000, + HIVE_HASHTABLE_SCALE("hive.mapjoin.check.memory.rows", (long)100000, "The number means after how many rows processed it needs to check the memory usage"), - HIVEINPUTFORMAT("hive.input.format", "org.apache.hadoop.hive.ql.io.CombineHiveInputFormat", + HIVE_INPUT_FORMAT("hive.input.format", "org.apache.hadoop.hive.ql.io.CombineHiveInputFormat", "The default input format. Set this to HiveInputFormat if you encounter problems with CombineHiveInputFormat."), - HIVETEZINPUTFORMAT("hive.tez.input.format", "org.apache.hadoop.hive.ql.io.HiveInputFormat", + HIVE_TEZ_INPUT_FORMAT("hive.tez.input.format", "org.apache.hadoop.hive.ql.io.HiveInputFormat", "The default input format for tez. Tez groups splits in the AM."), - HIVETEZCONTAINERSIZE("hive.tez.container.size", -1, + HIVE_TEZ_CONTAINER_SIZE("hive.tez.container.size", -1, "By default Tez will spawn containers of the size of a mapper. This can be used to overwrite."), - HIVETEZCPUVCORES("hive.tez.cpu.vcores", -1, + HIVE_TEZ_CPU_VCORES("hive.tez.cpu.vcores", -1, "By default Tez will ask for however many cpus map-reduce is configured to use per container.\n" + "This can be used to overwrite."), - HIVETEZJAVAOPTS("hive.tez.java.opts", null, + HIVE_TEZ_JAVA_OPTS("hive.tez.java.opts", null, "By default Tez will use the Java options from map tasks. This can be used to overwrite."), - HIVETEZLOGLEVEL("hive.tez.log.level", "INFO", + HIVE_TEZ_LOG_LEVEL("hive.tez.log.level", "INFO", "The log level to use for tasks executing as part of the DAG.\n" + "Used only if hive.tez.java.opts is used to configure Java options."), - HIVETEZHS2USERACCESS("hive.tez.hs2.user.access", true, + HIVE_TEZ_HS2_USER_ACCESS("hive.tez.hs2.user.access", true, "Whether to grant access to the hs2/hive user for queries"), - HIVEQUERYNAME ("hive.query.name", null, + HIVE_QUERY_NAME("hive.query.name", null, "This named is used by Tez to set the dag name. This name in turn will appear on \n" + "the Tez UI representing the work that was done."), - HIVETEZJOBNAME("tez.job.name", "HIVE-%s", + HIVE_TEZ_JOB_NAME("tez.job.name", "HIVE-%s", "This named is used by Tez to set the job name. This name in turn will appear on \n" + "the Yarn UI representing the Yarn Application Name. And The job name may be a \n" + "Java String.format() string, to which the session ID will be supplied as the single parameter."), @@ -2491,15 +2491,15 @@ public static enum ConfVars { "This time slice should align with the flush interval of the logging library else file pruning may\n" + "incorrectly prune files leading to incorrect results from sys.logs table."), - HIVEOPTIMIZEBUCKETINGSORTING("hive.optimize.bucketingsorting", true, + HIVE_OPTIMIZE_BUCKETING_SORTING("hive.optimize.bucketingsorting", true, "Don't create a reducer for enforcing \n" + "bucketing/sorting for queries of the form: \n" + "insert overwrite table T2 select * from T1;\n" + "where T1 and T2 are bucketed/sorted by the same keys into the same number of buckets."), - HIVEPARTITIONER("hive.mapred.partitioner", "org.apache.hadoop.hive.ql.io.DefaultHivePartitioner", ""), - HIVEENFORCESORTMERGEBUCKETMAPJOIN("hive.enforce.sortmergebucketmapjoin", false, + HIVE_PARTITIONER("hive.mapred.partitioner", "org.apache.hadoop.hive.ql.io.DefaultHivePartitioner", ""), + HIVE_ENFORCE_SORT_MERGE_BUCKET_MAPJOIN("hive.enforce.sortmergebucketmapjoin", false, "If the user asked for sort-merge bucketed map-side join, and it cannot be performed, should the query fail or not ?"), - HIVEENFORCEBUCKETMAPJOIN("hive.enforce.bucketmapjoin", false, + HIVE_ENFORCE_BUCKET_MAPJOIN("hive.enforce.bucketmapjoin", false, "If the user asked for bucketed map-side join, and it cannot be performed, \n" + "should the query fail or not ? For example, if the buckets in the tables being joined are\n" + "not a multiple of each other, bucketed map-side join cannot be performed, and the\n" + @@ -2539,24 +2539,24 @@ public static enum ConfVars { "with few files (10 files), the sort-merge join will only use 10 mappers, and a simple map-only join might be faster\n" + "if the complete small table can fit in memory, and a map-join can be performed."), - HIVESCRIPTOPERATORTRUST("hive.exec.script.trust", false, ""), - HIVEROWOFFSET("hive.exec.rowoffset", false, + HIVE_SCRIPT_OPERATOR_TRUST("hive.exec.script.trust", false, ""), + HIVE_ROW_OFFSET("hive.exec.rowoffset", false, "Whether to provide the row offset virtual column"), // Optimizer - HIVEOPTINDEXFILTER("hive.optimize.index.filter", true, "Whether to enable automatic use of indexes"), + HIVE_OPT_INDEX_FILTER("hive.optimize.index.filter", true, "Whether to enable automatic use of indexes"), - HIVEOPTPPD("hive.optimize.ppd", true, + HIVE_OPT_PPD("hive.optimize.ppd", true, "Whether to enable predicate pushdown"), - HIVEOPTPPD_WINDOWING("hive.optimize.ppd.windowing", true, + HIVE_OPT_PPD_WINDOWING("hive.optimize.ppd.windowing", true, "Whether to enable predicate pushdown through windowing"), - HIVEPPDRECOGNIZETRANSITIVITY("hive.ppd.recognizetransivity", true, + HIVE_PPD_RECOGNIZE_TRANSITIVITY("hive.ppd.recognizetransivity", true, "Whether to transitively replicate predicate filters over equijoin conditions."), - HIVEPPD_RECOGNIZE_COLUMN_EQUALITIES("hive.ppd.recognize.column.equalities", true, + HIVE_PPD_RECOGNIZE_COLUMN_EQUALITIES("hive.ppd.recognize.column.equalities", true, "Whether we should traverse the join branches to discover transitive propagation opportunities over" + " equijoin conditions. \n" + "Requires hive.ppd.recognizetransivity to be set to true."), - HIVEPPDREMOVEDUPLICATEFILTERS("hive.ppd.remove.duplicatefilters", true, + HIVE_PPD_REMOVE_DUPLICATE_FILTERS("hive.ppd.remove.duplicatefilters", true, "During query optimization, filters may be pushed down in the operator tree. \n" + "If this config is true only pushed down filters remain in the operator tree, \n" + "and the original filter is removed. If this config is false, the original filter \n" + @@ -2565,44 +2565,44 @@ public static enum ConfVars { true, "Whether to transitively infer disjunctive predicates across joins. \n" + "Disjunctive predicates are hard to simplify and pushing them down might lead to infinite rule matching " + "causing stackoverflow and OOM errors"), - HIVEPOINTLOOKUPOPTIMIZER("hive.optimize.point.lookup", true, + HIVE_POINT_LOOKUP_OPTIMIZER("hive.optimize.point.lookup", true, "Whether to transform OR clauses in Filter operators into IN clauses"), - HIVEPOINTLOOKUPOPTIMIZERMIN("hive.optimize.point.lookup.min", 2, + HIVE_POINT_LOOKUP_OPTIMIZER_MIN("hive.optimize.point.lookup.min", 2, "Minimum number of OR clauses needed to transform into IN clauses"), HIVEOPT_TRANSFORM_IN_MAXNODES("hive.optimize.transform.in.maxnodes", 16, "Maximum number of IN expressions beyond which IN will not be transformed into OR clause"), - HIVECOUNTDISTINCTOPTIMIZER("hive.optimize.countdistinct", true, + HIVE_COUNT_DISTINCT_OPTIMIZER("hive.optimize.countdistinct", true, "Whether to transform count distinct into two stages"), - HIVEPARTITIONCOLUMNSEPARATOR("hive.optimize.partition.columns.separate", true, + HIVE_PARTITION_COLUMN_SEPARATOR("hive.optimize.partition.columns.separate", true, "Extract partition columns from IN clauses"), // Constant propagation optimizer - HIVEOPTCONSTANTPROPAGATION("hive.optimize.constant.propagation", true, "Whether to enable constant propagation optimizer"), - HIVEIDENTITYPROJECTREMOVER("hive.optimize.remove.identity.project", true, "Removes identity project from operator tree"), - HIVEMETADATAONLYQUERIES("hive.optimize.metadataonly", false, + HIVE_OPT_CONSTANT_PROPAGATION("hive.optimize.constant.propagation", true, "Whether to enable constant propagation optimizer"), + HIVE_IDENTITY_PROJECT_REMOVER("hive.optimize.remove.identity.project", true, "Removes identity project from operator tree"), + HIVE_METADATA_ONLY_QUERIES("hive.optimize.metadataonly", false, "Whether to eliminate scans of the tables from which no columns are selected. Note\n" + "that, when selecting from empty tables with data files, this can produce incorrect\n" + "results, so it's disabled by default. It works correctly for normal tables."), - HIVENULLSCANOPTIMIZE("hive.optimize.null.scan", true, "Don't scan relations which are guaranteed to not generate any rows"), - HIVEOPTPPD_STORAGE("hive.optimize.ppd.storage", true, + HIVE_NULL_SCAN_OPTIMIZE("hive.optimize.null.scan", true, "Don't scan relations which are guaranteed to not generate any rows"), + HIVE_OPT_PPD_STORAGE("hive.optimize.ppd.storage", true, "Whether to push predicates down to storage handlers"), - HIVEOPTGROUPBY("hive.optimize.groupby", true, + HIVE_OPT_GROUPBY("hive.optimize.groupby", true, "Whether to enable the bucketed group by from bucketed partitions/tables."), - HIVEOPTBUCKETMAPJOIN("hive.optimize.bucketmapjoin", false, + HIVE_OPT_BUCKET_MAPJOIN("hive.optimize.bucketmapjoin", false, "Whether to try bucket mapjoin"), - HIVEOPTSORTMERGEBUCKETMAPJOIN("hive.optimize.bucketmapjoin.sortedmerge", false, + HIVE_OPT_SORT_MERGE_BUCKET_MAPJOIN("hive.optimize.bucketmapjoin.sortedmerge", false, "Whether to try sorted bucket merge map join"), - HIVEOPTREDUCEDEDUPLICATION("hive.optimize.reducededuplication", true, + HIVE_OPT_REDUCE_DEDUPLICATION("hive.optimize.reducededuplication", true, "Remove extra map-reduce jobs if the data is already clustered by the same key which needs to be used again. \n" + "This should always be set to true. Since it is a new feature, it has been made configurable."), - HIVEOPTREDUCEDEDUPLICATIONMINREDUCER("hive.optimize.reducededuplication.min.reducer", 4, + HIVE_OPT_REDUCE_DEDUPLICATION_MIN_REDUCER("hive.optimize.reducededuplication.min.reducer", 4, "Reduce deduplication merges two RSs by moving key/parts/reducer-num of the child RS to parent RS. \n" + "That means if reducer-num of the child RS is fixed (order by or forced bucketing) and small, it can make very slow, single MR.\n" + "The optimization will be automatically disabled if number of reducers would be less than specified value."), - HIVEOPTJOINREDUCEDEDUPLICATION("hive.optimize.joinreducededuplication", true, + HIVE_OPT_JOIN_REDUCE_DEDUPLICATION("hive.optimize.joinreducededuplication", true, "Remove extra shuffle/sorting operations after join algorithm selection has been executed. \n" + "Currently it only works with Apache Tez. This should always be set to true. \n" + "Since it is a new feature, it has been made configurable."), - HIVEOPTSORTDYNAMICPARTITIONTHRESHOLD("hive.optimize.sort.dynamic.partition.threshold", 0, + HIVE_OPT_SORT_DYNAMIC_PARTITION_THRESHOLD("hive.optimize.sort.dynamic.partition.threshold", 0, "When enabled dynamic partitioning column will be globally sorted.\n" + "This way we can keep only one record writer open for each partition value\n" + "in the reducer thereby reducing the memory pressure on reducers.\n" + @@ -2613,13 +2613,13 @@ public static enum ConfVars { "Setting it to any other positive integer will make Hive use this as threshold for number of writers."), - HIVESAMPLINGFORORDERBY("hive.optimize.sampling.orderby", false, "Uses sampling on order-by clause for parallel execution."), - HIVESAMPLINGNUMBERFORORDERBY("hive.optimize.sampling.orderby.number", 1000, "Total number of samples to be obtained."), - HIVESAMPLINGPERCENTFORORDERBY("hive.optimize.sampling.orderby.percent", 0.1f, new RatioValidator(), + HIVE_SAMPLING_FOR_ORDERBY("hive.optimize.sampling.orderby", false, "Uses sampling on order-by clause for parallel execution."), + HIVE_SAMPLING_NUMBER_FOR_ORDERBY("hive.optimize.sampling.orderby.number", 1000, "Total number of samples to be obtained."), + HIVE_SAMPLING_PERCENT_FOR_ORDERBY("hive.optimize.sampling.orderby.percent", 0.1f, new RatioValidator(), "Probability with which a row will be chosen."), HIVE_REMOVE_ORDERBY_IN_SUBQUERY("hive.remove.orderby.in.subquery", true, "If set to true, order/sort by without limit in sub queries will be removed."), - HIVEOPTIMIZEDISTINCTREWRITE("hive.optimize.distinct.rewrite", true, "When applicable this " + HIVE_OPTIMIZE_DISTINCT_REWRITE("hive.optimize.distinct.rewrite", true, "When applicable this " + "optimization rewrites distinct aggregates from a single stage to multi-stage " + "aggregation. This may not be optimal in all cases. Ideally, whether to trigger it or " + "not should be cost based decision. Until Hive formalizes cost model for this, this is config driven."), @@ -2640,7 +2640,7 @@ public static enum ConfVars { "By default, when writing data into a table and UNION ALL is the last step of the query, Hive on Tez will\n" + "create a subdirectory for each branch of the UNION ALL. When this property is enabled,\n" + "the subdirectories are removed, and the files are renamed and moved to the parent directory"), - HIVEOPTCORRELATION("hive.optimize.correlation", false, "exploit intra-query correlations."), + HIVE_OPT_CORRELATION("hive.optimize.correlation", false, "exploit intra-query correlations."), HIVE_OPTIMIZE_LIMIT_TRANSPOSE("hive.optimize.limittranspose", false, "Whether to push a limit through left/right outer join or union. If the value is true and the size of the outer\n" + @@ -3549,9 +3549,9 @@ public static enum ConfVars { "This value is only used for a given table if the kudu.master_addresses table property is not set."), // For har files - HIVEARCHIVEENABLED("hive.archive.enabled", false, "Whether archiving operations are permitted"), + HIVE_ARCHIVE_ENABLED("hive.archive.enabled", false, "Whether archiving operations are permitted"), - HIVEFETCHTASKCONVERSION("hive.fetch.task.conversion", "more", new StringSet("none", "minimal", "more"), + HIVE_FETCH_TASK_CONVERSION("hive.fetch.task.conversion", "more", new StringSet("none", "minimal", "more"), "Some select queries can be converted to single FETCH task minimizing latency.\n" + "Currently the query should be single sourced not having any subquery and should not have\n" + "any aggregations or distincts (which incurs RS), lateral views and joins.\n" + @@ -3559,30 +3559,30 @@ public static enum ConfVars { "1. minimal : SELECT STAR, FILTER on partition columns, LIMIT only\n" + "2. more : SELECT, FILTER, LIMIT only (support TABLESAMPLE and virtual columns)" ), - HIVEFETCHTASKCACHING("hive.fetch.task.caching", true, + HIVE_FETCH_TASK_CACHING("hive.fetch.task.caching", true, "Enabling the caching of the result of fetch tasks eliminates the chance of running into a failing read." + " On the other hand, if enabled, the hive.fetch.task.conversion.threshold must be adjusted accordingly. That" + " is 200MB by default which must be lowered in case of enabled caching to prevent the consumption of too much memory."), - HIVEFETCHTASKCONVERSIONTHRESHOLD("hive.fetch.task.conversion.threshold", 209715200L, + HIVE_FETCH_TASK_CONVERSION_THRESHOLD("hive.fetch.task.conversion.threshold", 209715200L, "Input threshold for applying hive.fetch.task.conversion. If target table is native, input length\n" + "is calculated by summation of file lengths. If it's not native, storage handler for the table\n" + "can optionally implement org.apache.hadoop.hive.ql.metadata.InputEstimator interface."), - HIVEFETCHTASKAGGR("hive.fetch.task.aggr", false, + HIVE_FETCH_TASK_AGGR("hive.fetch.task.aggr", false, "Aggregation queries with no group-by clause (for example, select count(*) from src) execute\n" + "final aggregations in single reduce task. If this is set true, Hive delegates final aggregation\n" + "stage to fetch task, possibly decreasing the query time."), - HIVEOPTIMIZEMETADATAQUERIES("hive.compute.query.using.stats", true, + HIVE_OPTIMIZE_METADATA_QUERIES("hive.compute.query.using.stats", true, "When set to true Hive will answer a few queries like count(1) purely using stats\n" + "stored in metastore. For basic stats collection turn on the config hive.stats.autogather to true.\n" + "For more advanced stats collection need to run analyze table queries."), // Serde for FetchTask - HIVEFETCHOUTPUTSERDE("hive.fetch.output.serde", "org.apache.hadoop.hive.serde2.DelimitedJSONSerDe", + HIVE_FETCH_OUTPUT_SERDE("hive.fetch.output.serde", "org.apache.hadoop.hive.serde2.DelimitedJSONSerDe", "The SerDe used by FetchTask to serialize the fetch output."), - HIVEEXPREVALUATIONCACHE("hive.cache.expr.evaluation", true, + HIVE_EXPR_EVALUATION_CACHE("hive.cache.expr.evaluation", true, "If true, the evaluation result of a deterministic expression referenced twice or more\n" + "will be cached.\n" + "For example, in a filter condition like '.. where key + 10 = 100 or key + 10 = 0'\n" + @@ -3591,12 +3591,12 @@ public static enum ConfVars { "or filter operators."), // Hive Variables - HIVEVARIABLESUBSTITUTE("hive.variable.substitute", true, + HIVE_VARIABLE_SUBSTITUTE("hive.variable.substitute", true, "This enables substitution using syntax like ${var} ${system:var} and ${env:var}."), - HIVEVARIABLESUBSTITUTEDEPTH("hive.variable.substitute.depth", 40, + HIVE_VARIABLE_SUBSTITUTE_DEPTH("hive.variable.substitute.depth", 40, "The maximum replacements the substitution engine will do."), - HIVECONFVALIDATION("hive.conf.validation", true, + HIVE_CONF_VALIDATION("hive.conf.validation", true, "Enables type checking for registered Hive configurations"), SEMANTIC_ANALYZER_HOOK("hive.semantic.analyzer.hook", "", ""), @@ -4551,7 +4551,7 @@ public static enum ConfVars { " it will now take 512 reducers, similarly if the max number of reducers is 511,\n" + " and a job was going to use this many, it will now use 256 reducers."), - HIVEOPTLISTBUCKETING("hive.optimize.listbucketing", false, + HIVE_OPT_LIST_BUCKETING("hive.optimize.listbucketing", false, "Enable list bucketing optimizer. Default value is false so that we disable it by default."), // Allow TCP Keep alive socket option for for HiveServer or a maximum timeout for the socket. @@ -4741,11 +4741,11 @@ public static enum ConfVars { "Whether to generate consistent split locations when generating splits in the AM"), HIVE_PREWARM_ENABLED("hive.prewarm.enabled", false, "Enables container prewarm for Tez(Hadoop 2 only)"), HIVE_PREWARM_NUM_CONTAINERS("hive.prewarm.numcontainers", 10, "Controls the number of containers to prewarm for Tez (Hadoop 2 only)"), - HIVESTAGEIDREARRANGE("hive.stageid.rearrange", "none", new StringSet("none", "idonly", "traverse", "execution"), ""), - HIVEEXPLAINDEPENDENCYAPPENDTASKTYPES("hive.explain.dependency.append.tasktype", false, ""), - HIVEUSEGOOGLEREGEXENGINE("hive.use.googleregex.engine",false,"whether to use google regex engine or not, default regex engine is java.util.regex"), + HIVE_STAGE_ID_REARRANGE("hive.stageid.rearrange", "none", new StringSet("none", "idonly", "traverse", "execution"), ""), + HIVE_EXPLAIN_DEPENDENCY_APPEND_TASK_TYPES("hive.explain.dependency.append.tasktype", false, ""), + HIVE_USE_GOOGLE_REGEX_ENGINE("hive.use.googleregex.engine",false,"whether to use google regex engine or not, default regex engine is java.util.regex"), - HIVECOUNTERGROUP("hive.counters.group.name", "HIVE", + HIVE_COUNTER_GROUP("hive.counters.group.name", "HIVE", "The name of counter group for internal Hive variables (CREATED_FILE, FATAL_ERROR, etc.)"), HIVE_QUOTEDID_SUPPORT("hive.support.quoted.identifiers", "column", @@ -5497,7 +5497,7 @@ public static enum ConfVars { "validated for all SQL operations after every defined interval (default: 500ms) and corresponding action\n" + "defined in the trigger will be taken"), - NWAYJOINREORDER("hive.reorder.nway.joins", true, + N_WAY_JOIN_REORDER("hive.reorder.nway.joins", true, "Runs reordering of tables within single n-way join (i.e.: picks streamtable)"), HIVE_MERGE_NWAY_JOINS("hive.merge.nway.joins", false, "Merge adjacent joins into a single n-way join"), @@ -5583,7 +5583,7 @@ public static enum ConfVars { "hive.rewrite.data.policy", "Comma separated list of configuration options which are immutable at runtime"), HIVE_CONF_HIDDEN_LIST("hive.conf.hidden.list", - METASTOREPWD.varname + "," + HIVE_SERVER2_SSL_KEYSTORE_PASSWORD.varname + METASTORE_PWD.varname + "," + HIVE_SERVER2_SSL_KEYSTORE_PASSWORD.varname + "," + HIVE_SERVER2_WEBUI_SSL_KEYSTORE_PASSWORD.varname + "," + DRUID_METADATA_DB_PASSWORD.varname // Adding the S3 credentials from Hadoop config to be hidden @@ -6366,11 +6366,11 @@ public String getQueryString() { } public static String getQueryString(Configuration conf) { - return getVar(conf, ConfVars.HIVEQUERYSTRING, EncoderDecoderFactory.URL_ENCODER_DECODER); + return getVar(conf, ConfVars.HIVE_QUERY_STRING, EncoderDecoderFactory.URL_ENCODER_DECODER); } public static String getQueryId(Configuration conf) { - return getVar(conf, HiveConf.ConfVars.HIVEQUERYID, ""); + return getVar(conf, HiveConf.ConfVars.HIVE_QUERY_ID, ""); } public void setQueryString(String query) { @@ -6378,7 +6378,7 @@ public void setQueryString(String query) { } public static void setQueryString(Configuration conf, String query) { - setVar(conf, ConfVars.HIVEQUERYSTRING, query, EncoderDecoderFactory.URL_ENCODER_DECODER); + setVar(conf, ConfVars.HIVE_QUERY_STRING, query, EncoderDecoderFactory.URL_ENCODER_DECODER); } public void logVars(PrintStream ps) { for (ConfVars one : ConfVars.values()) { @@ -6482,7 +6482,7 @@ private void initialize(Class cls) { // if embedded metastore is to be used as per config so far // then this is considered like the metastore server case - String msUri = this.getVar(HiveConf.ConfVars.METASTOREURIS); + String msUri = this.getVar(HiveConf.ConfVars.METASTORE_URIS); // This is hackery, but having hive-common depend on standalone-metastore is really bad // because it will pull all of the metastore code into every module. We need to check that // we aren't using the standalone metastore. If we are, we should treat it the same as a @@ -6539,18 +6539,18 @@ private void initialize(Class cls) { // if the running class was loaded directly (through eclipse) rather than through a // jar then this would be needed if (hiveJar == null) { - hiveJar = this.get(ConfVars.HIVEJAR.varname); + hiveJar = this.get(ConfVars.HIVE_JAR.varname); } if (auxJars == null) { - auxJars = StringUtils.join(FileUtils.getJarFilesByPath(this.get(ConfVars.HIVEAUXJARS.varname), this), ','); + auxJars = StringUtils.join(FileUtils.getJarFilesByPath(this.get(ConfVars.HIVE_AUX_JARS.varname), this), ','); } if (getBoolVar(ConfVars.METASTORE_SCHEMA_VERIFICATION)) { setBoolVar(ConfVars.METASTORE_AUTO_CREATE_ALL, false); } - if (getBoolVar(HiveConf.ConfVars.HIVECONFVALIDATION)) { + if (getBoolVar(HiveConf.ConfVars.HIVE_CONF_VALIDATION)) { List trimmed = new ArrayList(); for (Map.Entry entry : this) { String key = entry.getKey(); @@ -6633,28 +6633,28 @@ private static String[] convertVarsToRegex(String[] paramList) { */ private static final String[] SQL_STD_AUTH_SAFE_VAR_NAMES = new String[] { ConfVars.AGGR_JOIN_TRANSPOSE.varname, - ConfVars.BYTESPERREDUCER.varname, + ConfVars.BYTES_PER_REDUCER.varname, ConfVars.CLIENT_STATS_COUNTERS.varname, ConfVars.CREATE_TABLES_AS_ACID.varname, ConfVars.CREATE_TABLE_AS_EXTERNAL.varname, - ConfVars.DEFAULTPARTITIONNAME.varname, + ConfVars.DEFAULT_PARTITION_NAME.varname, ConfVars.DROP_IGNORES_NON_EXISTENT.varname, - ConfVars.HIVECOUNTERGROUP.varname, - ConfVars.HIVEDEFAULTMANAGEDFILEFORMAT.varname, - ConfVars.HIVEENFORCEBUCKETMAPJOIN.varname, - ConfVars.HIVEENFORCESORTMERGEBUCKETMAPJOIN.varname, - ConfVars.HIVEEXPREVALUATIONCACHE.varname, - ConfVars.HIVEQUERYRESULTFILEFORMAT.varname, - ConfVars.HIVEHASHTABLELOADFACTOR.varname, - ConfVars.HIVEHASHTABLETHRESHOLD.varname, - ConfVars.HIVEIGNOREMAPJOINHINT.varname, - ConfVars.HIVELIMITMAXROWSIZE.varname, - ConfVars.HIVEMAPREDMODE.varname, - ConfVars.HIVEMAPSIDEAGGREGATE.varname, - ConfVars.HIVEOPTIMIZEMETADATAQUERIES.varname, - ConfVars.HIVEROWOFFSET.varname, - ConfVars.HIVEVARIABLESUBSTITUTE.varname, - ConfVars.HIVEVARIABLESUBSTITUTEDEPTH.varname, + ConfVars.HIVE_COUNTER_GROUP.varname, + ConfVars.HIVE_DEFAULT_MANAGED_FILEFORMAT.varname, + ConfVars.HIVE_ENFORCE_BUCKET_MAPJOIN.varname, + ConfVars.HIVE_ENFORCE_SORT_MERGE_BUCKET_MAPJOIN.varname, + ConfVars.HIVE_EXPR_EVALUATION_CACHE.varname, + ConfVars.HIVE_QUERY_RESULT_FILEFORMAT.varname, + ConfVars.HIVE_HASHTABLE_LOAD_FACTOR.varname, + ConfVars.HIVE_HASHTABLE_THRESHOLD.varname, + ConfVars.HIVE_IGNORE_MAPJOIN_HINT.varname, + ConfVars.HIVE_LIMIT_MAX_ROW_SIZE.varname, + ConfVars.HIVE_MAPRED_MODE.varname, + ConfVars.HIVE_MAPSIDE_AGGREGATE.varname, + ConfVars.HIVE_OPTIMIZE_METADATA_QUERIES.varname, + ConfVars.HIVE_ROW_OFFSET.varname, + ConfVars.HIVE_VARIABLE_SUBSTITUTE.varname, + ConfVars.HIVE_VARIABLE_SUBSTITUTE_DEPTH.varname, ConfVars.HIVE_AUTOGEN_COLUMNALIAS_PREFIX_INCLUDEFUNCNAME.varname, ConfVars.HIVE_AUTOGEN_COLUMNALIAS_PREFIX_LABEL.varname, ConfVars.HIVE_CHECK_CROSS_PRODUCT.varname, @@ -6695,14 +6695,14 @@ private static String[] convertVarsToRegex(String[] paramList) { ConfVars.LLAP_CLIENT_CONSISTENT_SPLITS.varname, ConfVars.LLAP_ENABLE_GRACE_JOIN_IN_LLAP.varname, ConfVars.LLAP_ALLOW_PERMANENT_FNS.varname, - ConfVars.MAXCREATEDFILES.varname, - ConfVars.MAXREDUCERS.varname, - ConfVars.NWAYJOINREORDER.varname, + ConfVars.MAX_CREATED_FILES.varname, + ConfVars.MAX_REDUCERS.varname, + ConfVars.N_WAY_JOIN_REORDER.varname, ConfVars.OUTPUT_FILE_EXTENSION.varname, ConfVars.SHOW_JOB_FAIL_DEBUG_INFO.varname, ConfVars.TASKLOG_DEBUG_TIMEOUT.varname, - ConfVars.HIVEQUERYID.varname, - ConfVars.HIVEQUERYTAG.varname, + ConfVars.HIVE_QUERY_ID.varname, + ConfVars.HIVE_QUERY_TAG.varname, }; /** @@ -6892,7 +6892,7 @@ public String getAuxJars() { */ public void setAuxJars(String auxJars) { this.auxJars = auxJars; - setVar(this, ConfVars.HIVEAUXJARS, auxJars); + setVar(this, ConfVars.HIVE_AUX_JARS, auxJars); } public URL getHiveDefaultLocation() { @@ -7074,7 +7074,7 @@ public static class StrictChecks { private static String makeMessage(String what, ConfVars setting) { return what + " are disabled for safety reasons. If you know what you are doing, please set " - + setting.varname + " to false and make sure that " + ConfVars.HIVEMAPREDMODE.varname + + + setting.varname + " to false and make sure that " + ConfVars.HIVE_MAPRED_MODE.varname + " is not set to 'strict' to proceed. Note that you may get errors or incorrect " + "results if you make a mistake while using some of the unsafe features."; } @@ -7107,7 +7107,7 @@ public static void checkOffsetWithoutOrderBy(Configuration conf) throws Semantic } private static boolean isAllowed(Configuration conf, ConfVars setting) { - String mode = HiveConf.getVar(conf, ConfVars.HIVEMAPREDMODE, (String)null); + String mode = HiveConf.getVar(conf, ConfVars.HIVE_MAPRED_MODE, (String)null); return (mode != null) ? !"strict".equals(mode) : !HiveConf.getBoolVar(conf, setting); } } diff --git a/common/src/java/org/apache/hadoop/hive/conf/VariableSubstitution.java b/common/src/java/org/apache/hadoop/hive/conf/VariableSubstitution.java index 973b9acae278..4ea28227628a 100644 --- a/common/src/java/org/apache/hadoop/hive/conf/VariableSubstitution.java +++ b/common/src/java/org/apache/hadoop/hive/conf/VariableSubstitution.java @@ -59,12 +59,12 @@ public String substitute(HiveConf conf, String expr) { if (expr == null) { return expr; } - if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVEVARIABLESUBSTITUTE)) { + if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_VARIABLE_SUBSTITUTE)) { l4j.debug("Substitution is on: " + expr); } else { return expr; } - int depth = HiveConf.getIntVar(conf, HiveConf.ConfVars.HIVEVARIABLESUBSTITUTEDEPTH); + int depth = HiveConf.getIntVar(conf, HiveConf.ConfVars.HIVE_VARIABLE_SUBSTITUTE_DEPTH); return substitute(conf, expr, depth); } } diff --git a/common/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java b/common/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java index 0968227fff01..4d94e6dae5aa 100644 --- a/common/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java +++ b/common/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java @@ -421,7 +421,7 @@ public enum ErrorMsg { REPLACE_UNSUPPORTED_TYPE_CONVERSION(10314, "Replacing columns with unsupported type conversion (from {0} to {1}) for column {2}. SerDe may be incompatible", true), HIVE_GROUPING_SETS_AGGR_NOMAPAGGR_MULTIGBY(10315, "Grouping sets aggregations (with rollups or cubes) are not allowed when " + - "HIVEMULTIGROUPBYSINGLEREDUCER is turned on. Set hive.multigroupby.singlereducer=false if you want to use grouping sets"), + "HIVE_MULTI_GROUPBY_SINGLE_REDUCER is turned on. Set hive.multigroupby.singlereducer=false if you want to use grouping sets"), CANNOT_RETRIEVE_TABLE_METADATA(10316, "Error while retrieving table metadata"), INVALID_AST_TREE(10318, "Internal error : Invalid AST"), ERROR_SERIALIZE_METASTORE(10319, "Error while serializing the metastore objects"), diff --git a/common/src/test/org/apache/hadoop/hive/conf/TestHiveConf.java b/common/src/test/org/apache/hadoop/hive/conf/TestHiveConf.java index bff79a98faa5..0e8e4c35fc5d 100644 --- a/common/src/test/org/apache/hadoop/hive/conf/TestHiveConf.java +++ b/common/src/test/org/apache/hadoop/hive/conf/TestHiveConf.java @@ -71,19 +71,19 @@ public void testConfProperties() throws Exception { // checkHiveConf(ConfVars.HADOOPFS.varname, "core-site.xml"); // Make sure non-null-valued ConfVar properties *do* override the Hadoop Configuration - checkHadoopConf(ConfVars.HADOOPNUMREDUCERS.varname, "1"); - checkConfVar(ConfVars.HADOOPNUMREDUCERS, "-1"); - checkHiveConf(ConfVars.HADOOPNUMREDUCERS.varname, "-1"); + checkHadoopConf(ConfVars.HADOOP_NUM_REDUCERS.varname, "1"); + checkConfVar(ConfVars.HADOOP_NUM_REDUCERS, "-1"); + checkHiveConf(ConfVars.HADOOP_NUM_REDUCERS.varname, "-1"); // Non-null ConfVar only defined in ConfVars - checkHadoopConf(ConfVars.HIVESKEWJOINKEY.varname, null); - checkConfVar(ConfVars.HIVESKEWJOINKEY, "100000"); - checkHiveConf(ConfVars.HIVESKEWJOINKEY.varname, "100000"); + checkHadoopConf(ConfVars.HIVE_SKEWJOIN_KEY.varname, null); + checkConfVar(ConfVars.HIVE_SKEWJOIN_KEY, "100000"); + checkHiveConf(ConfVars.HIVE_SKEWJOIN_KEY.varname, "100000"); // ConfVar overridden in in hive-site.xml - checkHadoopConf(ConfVars.HIVETESTMODEDUMMYSTATAGGR.varname, null); - checkConfVar(ConfVars.HIVETESTMODEDUMMYSTATAGGR, ""); - checkHiveConf(ConfVars.HIVETESTMODEDUMMYSTATAGGR.varname, "value2"); + checkHadoopConf(ConfVars.HIVE_TEST_MODE_DUMMY_STAT_AGGR.varname, null); + checkConfVar(ConfVars.HIVE_TEST_MODE_DUMMY_STAT_AGGR, ""); + checkHiveConf(ConfVars.HIVE_TEST_MODE_DUMMY_STAT_AGGR.varname, "value2"); //Property defined for hive masking algorithm checkConfVar(ConfVars.HIVE_MASKING_ALGO, "sha256"); @@ -94,7 +94,7 @@ public void testConfProperties() throws Exception { checkHiveConf("test.property1", "value1"); // Test HiveConf property variable substitution in hive-site.xml - checkHiveConf("test.var.hiveconf.property", ConfVars.DEFAULTPARTITIONNAME.getDefaultValue()); + checkHiveConf("test.var.hiveconf.property", ConfVars.DEFAULT_PARTITION_NAME.getDefaultValue()); } @Test @@ -150,7 +150,7 @@ public void testHiddenConfig() throws Exception { } ArrayList hiddenList = Lists.newArrayList( - HiveConf.ConfVars.METASTOREPWD.varname, + HiveConf.ConfVars.METASTORE_PWD.varname, HiveConf.ConfVars.HIVE_SERVER2_SSL_KEYSTORE_PASSWORD.varname, HiveConf.ConfVars.HIVE_SERVER2_WEBUI_SSL_KEYSTORE_PASSWORD.varname, "fs.s3.awsSecretAccessKey", @@ -196,7 +196,7 @@ public void testEncodingDecoding() throws UnsupportedEncodingException { HiveConf conf = new HiveConf(); String query = "select blah, '\u0001' from random_table"; conf.setQueryString(query); - Assert.assertEquals(URLEncoder.encode(query, "UTF-8"), conf.get(ConfVars.HIVEQUERYSTRING.varname)); + Assert.assertEquals(URLEncoder.encode(query, "UTF-8"), conf.get(ConfVars.HIVE_QUERY_STRING.varname)); Assert.assertEquals(query, conf.getQueryString()); } diff --git a/common/src/test/org/apache/hadoop/hive/conf/TestHiveConfRestrictList.java b/common/src/test/org/apache/hadoop/hive/conf/TestHiveConfRestrictList.java index 1d0beaf58678..76b825b26f5c 100644 --- a/common/src/test/org/apache/hadoop/hive/conf/TestHiveConfRestrictList.java +++ b/common/src/test/org/apache/hadoop/hive/conf/TestHiveConfRestrictList.java @@ -36,7 +36,7 @@ public class TestHiveConfRestrictList { public void setUp() throws Exception { System.setProperty(ConfVars.HIVE_CONF_RESTRICTED_LIST.varname, - ConfVars.HIVETESTMODEPREFIX.varname); + ConfVars.HIVE_TEST_MODE_PREFIX.varname); conf = new HiveConf(); } @@ -46,7 +46,7 @@ public void setUp() throws Exception { */ @Test public void testRestriction() throws Exception { - verifyRestriction(ConfVars.HIVETESTMODEPREFIX.varname, "foo"); + verifyRestriction(ConfVars.HIVE_TEST_MODE_PREFIX.varname, "foo"); conf.verifyAndSet(ConfVars.HIVE_AM_SPLIT_GENERATION.varname, "false"); } @@ -56,7 +56,7 @@ public void testRestriction() throws Exception { */ @Test public void testMultipleRestrictions() throws Exception { - verifyRestriction(ConfVars.HIVETESTMODEPREFIX.varname, "foo"); + verifyRestriction(ConfVars.HIVE_TEST_MODE_PREFIX.varname, "foo"); verifyRestriction(ConfVars.HIVE_IN_TEST.varname, "true"); } @@ -75,25 +75,25 @@ public void testRestrictList() throws Exception { */ @Test public void testAppendRestriction() throws Exception { - String appendListStr = ConfVars.SCRATCHDIR.varname + "," + - ConfVars.LOCALSCRATCHDIR.varname + "," + - ConfVars.METASTOREURIS.varname; + String appendListStr = ConfVars.SCRATCH_DIR.varname + "," + + ConfVars.LOCAL_SCRATCH_DIR.varname + "," + + ConfVars.METASTORE_URIS.varname; conf.addToRestrictList(appendListStr); // check if the new configs are added to HIVE_CONF_RESTRICTED_LIST String newRestrictList = conf.getVar(ConfVars.HIVE_CONF_RESTRICTED_LIST); - assertTrue(newRestrictList.contains(ConfVars.SCRATCHDIR.varname)); - assertTrue(newRestrictList.contains(ConfVars.LOCALSCRATCHDIR.varname)); - assertTrue(newRestrictList.contains(ConfVars.METASTOREURIS.varname)); + assertTrue(newRestrictList.contains(ConfVars.SCRATCH_DIR.varname)); + assertTrue(newRestrictList.contains(ConfVars.LOCAL_SCRATCH_DIR.varname)); + assertTrue(newRestrictList.contains(ConfVars.METASTORE_URIS.varname)); // check if the old values are still there in HIVE_CONF_RESTRICTED_LIST - assertTrue(newRestrictList.contains(ConfVars.HIVETESTMODEPREFIX.varname)); + assertTrue(newRestrictList.contains(ConfVars.HIVE_TEST_MODE_PREFIX.varname)); // verify that the new configs are in effect - verifyRestriction(ConfVars.HIVETESTMODEPREFIX.varname, "foo"); + verifyRestriction(ConfVars.HIVE_TEST_MODE_PREFIX.varname, "foo"); verifyRestriction(ConfVars.HIVE_CONF_RESTRICTED_LIST.varname, "foo"); - verifyRestriction(ConfVars.LOCALSCRATCHDIR.varname, "foo"); - verifyRestriction(ConfVars.METASTOREURIS.varname, "foo"); + verifyRestriction(ConfVars.LOCAL_SCRATCH_DIR.varname, "foo"); + verifyRestriction(ConfVars.METASTORE_URIS.varname, "foo"); } private void verifyRestriction(String varName, String newVal) { diff --git a/contrib/src/java/org/apache/hadoop/hive/contrib/metastore/hooks/SampleURLHook.java b/contrib/src/java/org/apache/hadoop/hive/contrib/metastore/hooks/SampleURLHook.java index 7e09faf44b8e..4a553d5f9df9 100644 --- a/contrib/src/java/org/apache/hadoop/hive/contrib/metastore/hooks/SampleURLHook.java +++ b/contrib/src/java/org/apache/hadoop/hive/contrib/metastore/hooks/SampleURLHook.java @@ -33,7 +33,7 @@ public class SampleURLHook implements JDOConnectionURLHook { @Override public String getJdoConnectionUrl(Configuration conf) throws Exception { if (originalUrl == null) { - originalUrl = conf.get(HiveConf.ConfVars.METASTORECONNECTURLKEY.varname, ""); + originalUrl = conf.get(HiveConf.ConfVars.METASTORE_CONNECT_URL_KEY.varname, ""); return "jdbc:derby:;databaseName=target/tmp/junit_metastore_db_blank;create=true"; } else { return originalUrl; diff --git a/druid-handler/src/java/org/apache/hadoop/hive/druid/DruidStorageHandler.java b/druid-handler/src/java/org/apache/hadoop/hive/druid/DruidStorageHandler.java index cd35ebd81f71..656fa40c03fa 100644 --- a/druid-handler/src/java/org/apache/hadoop/hive/druid/DruidStorageHandler.java +++ b/druid-handler/src/java/org/apache/hadoop/hive/druid/DruidStorageHandler.java @@ -819,7 +819,7 @@ private List fetchSegmentsMetadata(Path segmentDescriptorDir) throw private String getUniqueId() { if (uniqueId == null) { uniqueId = - Preconditions.checkNotNull(Strings.emptyToNull(HiveConf.getVar(getConf(), HiveConf.ConfVars.HIVEQUERYID)), + Preconditions.checkNotNull(Strings.emptyToNull(HiveConf.getVar(getConf(), HiveConf.ConfVars.HIVE_QUERY_ID)), "Hive query id is null"); } return uniqueId; diff --git a/druid-handler/src/java/org/apache/hadoop/hive/druid/io/DruidQueryBasedInputFormat.java b/druid-handler/src/java/org/apache/hadoop/hive/druid/io/DruidQueryBasedInputFormat.java index fc50e0b9216d..2a2be067125f 100644 --- a/druid-handler/src/java/org/apache/hadoop/hive/druid/io/DruidQueryBasedInputFormat.java +++ b/druid-handler/src/java/org/apache/hadoop/hive/druid/io/DruidQueryBasedInputFormat.java @@ -101,7 +101,7 @@ public static DruidQueryRecordReader getDruidQueryReader(String druidQueryType) protected HiveDruidSplit[] getInputSplits(Configuration conf) throws IOException { String address = HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_DRUID_BROKER_DEFAULT_ADDRESS); - String queryId = HiveConf.getVar(conf, HiveConf.ConfVars.HIVEQUERYID); + String queryId = HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_QUERY_ID); if (StringUtils.isEmpty(address)) { throw new IOException("Druid broker address not specified in configuration"); } diff --git a/druid-handler/src/test/org/apache/hadoop/hive/druid/TestDruidStorageHandler.java b/druid-handler/src/test/org/apache/hadoop/hive/druid/TestDruidStorageHandler.java index 7d94f1afc996..5040831b9a94 100644 --- a/druid-handler/src/test/org/apache/hadoop/hive/druid/TestDruidStorageHandler.java +++ b/druid-handler/src/test/org/apache/hadoop/hive/druid/TestDruidStorageHandler.java @@ -111,7 +111,7 @@ private DataSegment createSegment(String location, Interval interval, String ver Mockito.when(tableMock.getDbName()).thenReturn(DB_NAME); Mockito.when(tableMock.getTableName()).thenReturn(TABLE_NAME); config = new Configuration(); - config.set(String.valueOf(HiveConf.ConfVars.HIVEQUERYID), "hive-" + UUID.randomUUID().toString()); + config.set(String.valueOf(HiveConf.ConfVars.HIVE_QUERY_ID), "hive-" + UUID.randomUUID().toString()); config.set(String.valueOf(HiveConf.ConfVars.DRUID_WORKING_DIR), tableWorkingPath); config.set(String.valueOf(HiveConf.ConfVars.DRUID_SEGMENT_DIRECTORY), new Path(tableWorkingPath, "finalSegmentDir").toString()); diff --git a/hbase-handler/src/test/org/apache/hadoop/hive/hbase/TestHBaseQueries.java b/hbase-handler/src/test/org/apache/hadoop/hive/hbase/TestHBaseQueries.java index da69f0887f77..16bdd8cf3d27 100644 --- a/hbase-handler/src/test/org/apache/hadoop/hive/hbase/TestHBaseQueries.java +++ b/hbase-handler/src/test/org/apache/hadoop/hive/hbase/TestHBaseQueries.java @@ -105,7 +105,7 @@ public void testRollbackDoesNotDeleteOriginTableWhenCTLTFails() throws CommandPr conf.setBoolVar(HiveConf.ConfVars.HIVE_STRICT_MANAGED_TABLES, true); conf.setBoolVar(HiveConf.ConfVars.CREATE_TABLES_AS_ACID, true); conf.setBoolVar(HiveConf.ConfVars.HIVE_CREATE_TABLES_AS_INSERT_ONLY, true); - conf.setVar(HiveConf.ConfVars.HIVEDEFAULTMANAGEDFILEFORMAT, "ORC"); + conf.setVar(HiveConf.ConfVars.HIVE_DEFAULT_MANAGED_FILEFORMAT, "ORC"); driver = DriverFactory.newDriver(conf); diff --git a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/common/HCatConstants.java b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/common/HCatConstants.java index 1e319b4e7dea..c42e9936f47a 100644 --- a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/common/HCatConstants.java +++ b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/common/HCatConstants.java @@ -81,7 +81,7 @@ private HCatConstants() { // restrict instantiation public static final String HCAT_TABLE_SCHEMA = "hcat.table.schema"; - public static final String HCAT_METASTORE_URI = HiveConf.ConfVars.METASTOREURIS.varname; + public static final String HCAT_METASTORE_URI = HiveConf.ConfVars.METASTORE_URIS.varname; public static final String HCAT_PERMS = "hcat.perms"; diff --git a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/common/HiveClientCache.java b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/common/HiveClientCache.java index d786e3c4822e..11e53d94a48b 100644 --- a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/common/HiveClientCache.java +++ b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/common/HiveClientCache.java @@ -39,7 +39,6 @@ import org.apache.hadoop.hive.metastore.RetryingMetaStoreClient; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.annotation.NoReconnect; -import org.apache.hadoop.hive.shims.ShimLoader; import org.apache.hadoop.hive.shims.Utils; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hive.common.util.ShutdownHookManager; @@ -327,7 +326,7 @@ static class HiveClientCacheKey { final private int threadId; private HiveClientCacheKey(HiveConf hiveConf, final int threadId) throws IOException, LoginException { - this.metaStoreURIs = hiveConf.getVar(HiveConf.ConfVars.METASTOREURIS); + this.metaStoreURIs = hiveConf.getVar(HiveConf.ConfVars.METASTORE_URIS); ugi = Utils.getUGI(); this.hiveConf = hiveConf; this.threadId = threadId; diff --git a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/DynamicPartitionFileRecordWriterContainer.java b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/DynamicPartitionFileRecordWriterContainer.java index cda8770a2c98..ceafabaa6eb0 100644 --- a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/DynamicPartitionFileRecordWriterContainer.java +++ b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/DynamicPartitionFileRecordWriterContainer.java @@ -88,7 +88,7 @@ public DynamicPartitionFileRecordWriterContainer( this.dynamicContexts = new HashMap(); this.dynamicObjectInspectors = new HashMap(); this.dynamicOutputJobInfo = new HashMap(); - this.HIVE_DEFAULT_PARTITION_VALUE = HiveConf.getVar(context.getConfiguration(), HiveConf.ConfVars.DEFAULTPARTITIONNAME); + this.HIVE_DEFAULT_PARTITION_VALUE = HiveConf.getVar(context.getConfiguration(), HiveConf.ConfVars.DEFAULT_PARTITION_NAME); } @Override @@ -149,7 +149,7 @@ protected LocalFileWriter getLocalFileWriter(HCatRecord value) throws IOExceptio throw new HCatException(ErrorType.ERROR_TOO_MANY_DYNAMIC_PTNS, "Number of dynamic partitions being created " + "exceeds configured max allowable partitions[" + maxDynamicPartitions - + "], increase parameter [" + HiveConf.ConfVars.DYNAMICPARTITIONMAXPARTS.varname + + "], increase parameter [" + HiveConf.ConfVars.DYNAMIC_PARTITION_MAX_PARTS.varname + "] if needed."); } diff --git a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputCommitterContainer.java b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputCommitterContainer.java index 476c60e53af5..de9ad252ff24 100644 --- a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputCommitterContainer.java +++ b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputCommitterContainer.java @@ -716,7 +716,7 @@ private void discoverPartitions(JobContext context) throws IOException { + "exceeds configured max allowable partitions[" + maxDynamicPartitions + "], increase parameter [" - + HiveConf.ConfVars.DYNAMICPARTITIONMAXPARTS.varname + + HiveConf.ConfVars.DYNAMIC_PARTITION_MAX_PARTS.varname + "] if needed."); } diff --git a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/HCatOutputFormat.java b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/HCatOutputFormat.java index 7167bd913e4d..9aaf67e40d84 100644 --- a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/HCatOutputFormat.java +++ b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/HCatOutputFormat.java @@ -280,14 +280,14 @@ private static int getMaxDynamicPartitions(HiveConf hConf) { if (HCatConstants.HCAT_IS_DYNAMIC_MAX_PTN_CHECK_ENABLED) { maxDynamicPartitions = hConf.getIntVar( - HiveConf.ConfVars.DYNAMICPARTITIONMAXPARTS); + HiveConf.ConfVars.DYNAMIC_PARTITION_MAX_PARTS); } return maxDynamicPartitions; } private static boolean getHarRequested(HiveConf hConf) { - return hConf.getBoolVar(HiveConf.ConfVars.HIVEARCHIVEENABLED); + return hConf.getBoolVar(HiveConf.ConfVars.HIVE_ARCHIVE_ENABLED); } } diff --git a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/rcfile/RCFileMapReduceInputFormat.java b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/rcfile/RCFileMapReduceInputFormat.java index 084185a7ed10..7cbc29d1ec39 100644 --- a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/rcfile/RCFileMapReduceInputFormat.java +++ b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/rcfile/RCFileMapReduceInputFormat.java @@ -45,7 +45,7 @@ public RecordReader createRecordReader(Inpu @Override public List getSplits(JobContext job) throws IOException { HiveConf.setLongVar(job.getConfiguration(), - HiveConf.ConfVars.MAPREDMINSPLITSIZE, SequenceFile.SYNC_INTERVAL); + HiveConf.ConfVars.MAPRED_MIN_SPLIT_SIZE, SequenceFile.SYNC_INTERVAL); return super.getSplits(job); } } diff --git a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestPermsGrp.java b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestPermsGrp.java index 2b57d8d8ae69..749409a03620 100644 --- a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestPermsGrp.java +++ b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestPermsGrp.java @@ -100,15 +100,15 @@ public void setUp() throws Exception { System.setSecurityManager(new NoExitSecurityManager()); Policy.setPolicy(new DerbyPolicy()); - hcatConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3); - hcatConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTFAILURERETRIES, 3); + hcatConf.setIntVar(HiveConf.ConfVars.METASTORE_THRIFT_CONNECTION_RETRIES, 3); + hcatConf.setIntVar(HiveConf.ConfVars.METASTORE_THRIFT_FAILURE_RETRIES, 3); hcatConf.setTimeVar(HiveConf.ConfVars.METASTORE_CLIENT_SOCKET_TIMEOUT, 60, TimeUnit.SECONDS); hcatConf.setBoolVar(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY, false); - hcatConf.set(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, + hcatConf.set(HiveConf.ConfVars.METASTORE_WAREHOUSE.varname, MetastoreConf.getVar(hcatConf, MetastoreConf.ConfVars.WAREHOUSE)); - hcatConf.set(HiveConf.ConfVars.METASTORECONNECTURLKEY.varname, + hcatConf.set(HiveConf.ConfVars.METASTORE_CONNECT_URL_KEY.varname, MetastoreConf.getVar(hcatConf, MetastoreConf.ConfVars.CONNECT_URL_KEY)); - hcatConf.set(HiveConf.ConfVars.METASTOREURIS.varname, + hcatConf.set(HiveConf.ConfVars.METASTORE_URIS.varname, MetastoreConf.getVar(hcatConf, MetastoreConf.ConfVars.THRIFT_URIS)); clientWH = new Warehouse(hcatConf); msc = new HiveMetaStoreClient(hcatConf); @@ -200,18 +200,18 @@ private int callHCatCli(String[] args) throws Exception { argsList.add("-Dhive.support.concurrency=false"); argsList .add("-Dhive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory"); - argsList.add("-D" + HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES.varname + "=3"); - argsList.add("-D" + HiveConf.ConfVars.METASTORETHRIFTFAILURERETRIES.varname + "=3"); + argsList.add("-D" + HiveConf.ConfVars.METASTORE_THRIFT_CONNECTION_RETRIES.varname + "=3"); + argsList.add("-D" + HiveConf.ConfVars.METASTORE_THRIFT_FAILURE_RETRIES.varname + "=3"); argsList.add("-D" + HiveConf.ConfVars.METASTORE_CLIENT_SOCKET_TIMEOUT.varname + "=60"); - argsList.add("-D" + HiveConf.ConfVars.METASTOREWAREHOUSE.varname + "=" + argsList.add("-D" + HiveConf.ConfVars.METASTORE_WAREHOUSE.varname + "=" + MetastoreConf.getVar(hcatConf, MetastoreConf.ConfVars.WAREHOUSE)); - argsList.add("-D" + HiveConf.ConfVars.METASTORECONNECTURLKEY.varname + "=" + argsList.add("-D" + HiveConf.ConfVars.METASTORE_CONNECT_URL_KEY.varname + "=" + MetastoreConf.getVar(hcatConf, MetastoreConf.ConfVars.CONNECT_URL_KEY)); - argsList.add("-D" + HiveConf.ConfVars.METASTOREURIS.varname + "=" + argsList.add("-D" + HiveConf.ConfVars.METASTORE_URIS.varname + "=" + MetastoreConf.getVar(hcatConf, MetastoreConf.ConfVars.THRIFT_URIS)); argsList.add("-D" + HiveConf.ConfVars.SEMANTIC_ANALYZER_HOOK.varname + "=" + HCatSemanticAnalyzer.class.getName()); - argsList.add("-D" + HiveConf.ConfVars.PREEXECHOOKS.varname + "="); - argsList.add("-D" + HiveConf.ConfVars.POSTEXECHOOKS.varname + "="); + argsList.add("-D" + HiveConf.ConfVars.PRE_EXEC_HOOKS.varname + "="); + argsList.add("-D" + HiveConf.ConfVars.POST_EXEC_HOOKS.varname + "="); argsList.add("-D" + HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname + "=false"); argsList.add("-D" + "test.warehouse.dir=" + System.getProperty("test.warehouse.dir")); diff --git a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestSemanticAnalysis.java b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestSemanticAnalysis.java index b5f29f5e40fd..546317ab00ec 100644 --- a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestSemanticAnalysis.java +++ b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestSemanticAnalysis.java @@ -65,7 +65,7 @@ public void setUpHCatDriver() throws IOException { hcatConf .setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER, "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory"); - hcatConf.set(HiveConf.ConfVars.HIVEDEFAULTRCFILESERDE.varname, + hcatConf.set(HiveConf.ConfVars.HIVE_DEFAULT_RCFILE_SERDE.varname, "org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe"); hcatConf.set(HiveConf.ConfVars.SEMANTIC_ANALYZER_HOOK.varname, HCatSemanticAnalyzer.class.getName()); diff --git a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestUseDatabase.java b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestUseDatabase.java index 63715432b721..73558f92cd71 100644 --- a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestUseDatabase.java +++ b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestUseDatabase.java @@ -44,8 +44,8 @@ public void setUp() throws Exception { HiveConf hcatConf = new HiveConf(this.getClass()); hcatConf.setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER, "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory"); - hcatConf.set(ConfVars.PREEXECHOOKS.varname, ""); - hcatConf.set(ConfVars.POSTEXECHOOKS.varname, ""); + hcatConf.set(ConfVars.PRE_EXEC_HOOKS.varname, ""); + hcatConf.set(ConfVars.POST_EXEC_HOOKS.varname, ""); hcatConf.set(ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); hcatConf.set(ConfVars.SEMANTIC_ANALYZER_HOOK.varname, HCatSemanticAnalyzer.class.getName()); diff --git a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/common/TestHiveClientCache.java b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/common/TestHiveClientCache.java index fe1d8afdc8bd..58772179a82a 100644 --- a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/common/TestHiveClientCache.java +++ b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/common/TestHiveClientCache.java @@ -83,7 +83,7 @@ public void testCacheHit() throws IOException, MetaException, LoginException { client.close(); // close shouldn't matter // Setting a non important configuration should return the same client only - hiveConf.setIntVar(HiveConf.ConfVars.DYNAMICPARTITIONMAXPARTS, 10); + hiveConf.setIntVar(HiveConf.ConfVars.DYNAMIC_PARTITION_MAX_PARTS, 10); HiveClientCache.ICacheableMetaStoreClient client2 = (HiveClientCache.ICacheableMetaStoreClient) cache.get(hiveConf); assertNotNull(client2); assertSame(client, client2); @@ -98,7 +98,7 @@ public void testCacheMiss() throws IOException, MetaException, LoginException { assertNotNull(client); // Set different uri as it is one of the criteria deciding whether to return the same client or not - hiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, " "); // URIs are checked for string equivalence, even spaces make them different + hiveConf.setVar(HiveConf.ConfVars.METASTORE_URIS, " "); // URIs are checked for string equivalence, even spaces make them different IMetaStoreClient client2 = cache.get(hiveConf); assertNotNull(client2); assertNotSame(client, client2); @@ -157,7 +157,7 @@ public IMetaStoreClient call() throws IOException, MetaException, LoginException public void testCloseAllClients() throws IOException, MetaException, LoginException { final HiveClientCache cache = new HiveClientCache(1000); HiveClientCache.ICacheableMetaStoreClient client1 = (HiveClientCache.ICacheableMetaStoreClient) cache.get(hiveConf); - hiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, " "); // URIs are checked for string equivalence, even spaces make them different + hiveConf.setVar(HiveConf.ConfVars.METASTORE_URIS, " "); // URIs are checked for string equivalence, even spaces make them different HiveClientCache.ICacheableMetaStoreClient client2 = (HiveClientCache.ICacheableMetaStoreClient) cache.get(hiveConf); cache.closeAllClientsQuietly(); assertTrue(client1.isClosed()); @@ -227,18 +227,18 @@ public LocalMetaServer() { securityManager = System.getSecurityManager(); System.setSecurityManager(new NoExitSecurityManager()); hiveConf = new HiveConf(TestHiveClientCache.class); - hiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + hiveConf.setVar(HiveConf.ConfVars.METASTORE_URIS, "thrift://localhost:" + MS_PORT); - hiveConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3); - hiveConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTFAILURERETRIES, 3); + hiveConf.setIntVar(HiveConf.ConfVars.METASTORE_THRIFT_CONNECTION_RETRIES, 3); + hiveConf.setIntVar(HiveConf.ConfVars.METASTORE_THRIFT_FAILURE_RETRIES, 3); hiveConf.set(HiveConf.ConfVars.SEMANTIC_ANALYZER_HOOK.varname, HCatSemanticAnalyzer.class.getName()); - hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); - hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); + hiveConf.set(HiveConf.ConfVars.PRE_EXEC_HOOKS.varname, ""); + hiveConf.set(HiveConf.ConfVars.POST_EXEC_HOOKS.varname, ""); hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); - System.setProperty(HiveConf.ConfVars.PREEXECHOOKS.varname, " "); - System.setProperty(HiveConf.ConfVars.POSTEXECHOOKS.varname, " "); + System.setProperty(HiveConf.ConfVars.PRE_EXEC_HOOKS.varname, " "); + System.setProperty(HiveConf.ConfVars.POST_EXEC_HOOKS.varname, " "); } public void start() throws InterruptedException { diff --git a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/data/HCatDataCheckUtil.java b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/data/HCatDataCheckUtil.java index cf5ef56b5201..08a941c5e1f1 100644 --- a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/data/HCatDataCheckUtil.java +++ b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/data/HCatDataCheckUtil.java @@ -46,8 +46,8 @@ public static IDriver instantiateDriver(MiniCluster cluster) { for (Entry e : cluster.getProperties().entrySet()) { hiveConf.set(e.getKey().toString(), e.getValue().toString()); } - hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); - hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); + hiveConf.set(HiveConf.ConfVars.PRE_EXEC_HOOKS.varname, ""); + hiveConf.set(HiveConf.ConfVars.POST_EXEC_HOOKS.varname, ""); hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); LOG.debug("Hive conf : {}", hiveConf.getAllProperties()); diff --git a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/HCatBaseTest.java b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/HCatBaseTest.java index a304e49ae879..25cb75ec41d2 100644 --- a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/HCatBaseTest.java +++ b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/HCatBaseTest.java @@ -89,12 +89,12 @@ protected void setUpHiveConf() { + File.separator + "mapred" + File.separator + "staging"); hiveConf.set("mapred.temp.dir", workDir + File.separator + this.getClass().getSimpleName() + File.separator + "mapred" + File.separator + "temp"); - hiveConf.setVar(HiveConf.ConfVars.PREEXECHOOKS, ""); - hiveConf.setVar(HiveConf.ConfVars.POSTEXECHOOKS, ""); + hiveConf.setVar(HiveConf.ConfVars.PRE_EXEC_HOOKS, ""); + hiveConf.setVar(HiveConf.ConfVars.POST_EXEC_HOOKS, ""); hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY, false); - hiveConf.setVar(HiveConf.ConfVars.METASTOREWAREHOUSE, TEST_WAREHOUSE_DIR); - hiveConf.setVar(HiveConf.ConfVars.HIVEMAPREDMODE, "nonstrict"); - hiveConf.setBoolVar(HiveConf.ConfVars.HIVEOPTIMIZEMETADATAQUERIES, true); + hiveConf.setVar(HiveConf.ConfVars.METASTORE_WAREHOUSE, TEST_WAREHOUSE_DIR); + hiveConf.setVar(HiveConf.ConfVars.HIVE_MAPRED_MODE, "nonstrict"); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_OPTIMIZE_METADATA_QUERIES, true); hiveConf .setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER, "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory"); diff --git a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatDynamicPartitioned.java b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatDynamicPartitioned.java index 424e428be545..a97162de993a 100644 --- a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatDynamicPartitioned.java +++ b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatDynamicPartitioned.java @@ -38,7 +38,6 @@ import org.apache.hive.hcatalog.data.schema.HCatFieldSchema; import org.apache.hive.hcatalog.data.schema.HCatSchemaUtils; -import org.junit.BeforeClass; import org.junit.Test; import org.slf4j.Logger; @@ -196,7 +195,7 @@ protected void runHCatDynamicPartitionedTable(boolean asSingleMapTask, public void _testHCatDynamicPartitionMaxPartitions() throws Exception { HiveConf hc = new HiveConf(this.getClass()); - int maxParts = hiveConf.getIntVar(HiveConf.ConfVars.DYNAMICPARTITIONMAXPARTS); + int maxParts = hiveConf.getIntVar(HiveConf.ConfVars.DYNAMIC_PARTITION_MAX_PARTS); LOG.info("Max partitions allowed = {}", maxParts); IOException exc = null; diff --git a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatMultiOutputFormat.java b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatMultiOutputFormat.java index e601992fc40b..d87158b23fae 100644 --- a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatMultiOutputFormat.java +++ b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatMultiOutputFormat.java @@ -156,7 +156,7 @@ public static void setup() throws Exception { warehousedir = new Path(System.getProperty("test.warehouse.dir")); HiveConf metastoreConf = new HiveConf(); - metastoreConf.setVar(HiveConf.ConfVars.METASTOREWAREHOUSE, warehousedir.toString()); + metastoreConf.setVar(HiveConf.ConfVars.METASTORE_WAREHOUSE, warehousedir.toString()); // Run hive metastore server MetaStoreTestUtils.startMetaStoreWithRetry(metastoreConf); @@ -183,23 +183,23 @@ public static void setup() throws Exception { private static void initializeSetup(HiveConf metastoreConf) throws Exception { hiveConf = new HiveConf(metastoreConf, TestHCatMultiOutputFormat.class); - hiveConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3); - hiveConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTFAILURERETRIES, 3); + hiveConf.setIntVar(HiveConf.ConfVars.METASTORE_THRIFT_CONNECTION_RETRIES, 3); + hiveConf.setIntVar(HiveConf.ConfVars.METASTORE_THRIFT_FAILURE_RETRIES, 3); hiveConf.set(HiveConf.ConfVars.SEMANTIC_ANALYZER_HOOK.varname, HCatSemanticAnalyzer.class.getName()); - hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); - hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); + hiveConf.set(HiveConf.ConfVars.PRE_EXEC_HOOKS.varname, ""); + hiveConf.set(HiveConf.ConfVars.POST_EXEC_HOOKS.varname, ""); hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); - System.setProperty(HiveConf.ConfVars.PREEXECHOOKS.varname, " "); - System.setProperty(HiveConf.ConfVars.POSTEXECHOOKS.varname, " "); - System.setProperty(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, + System.setProperty(HiveConf.ConfVars.PRE_EXEC_HOOKS.varname, " "); + System.setProperty(HiveConf.ConfVars.POST_EXEC_HOOKS.varname, " "); + System.setProperty(HiveConf.ConfVars.METASTORE_WAREHOUSE.varname, MetastoreConf.getVar(hiveConf, MetastoreConf.ConfVars.WAREHOUSE)); - System.setProperty(HiveConf.ConfVars.METASTORECONNECTURLKEY.varname, + System.setProperty(HiveConf.ConfVars.METASTORE_CONNECT_URL_KEY.varname, MetastoreConf.getVar(hiveConf, MetastoreConf.ConfVars.CONNECT_URL_KEY)); - System.setProperty(HiveConf.ConfVars.METASTOREURIS.varname, + System.setProperty(HiveConf.ConfVars.METASTORE_URIS.varname, MetastoreConf.getVar(hiveConf, MetastoreConf.ConfVars.THRIFT_URIS)); - hiveConf.set(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, warehousedir.toString()); + hiveConf.set(HiveConf.ConfVars.METASTORE_WAREHOUSE.varname, warehousedir.toString()); try { hmsc = new HiveMetaStoreClient(hiveConf); initalizeTables(); diff --git a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatPartitionPublish.java b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatPartitionPublish.java index 22a0d3f7d134..2b28f4f0d752 100644 --- a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatPartitionPublish.java +++ b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatPartitionPublish.java @@ -44,7 +44,6 @@ import org.apache.hadoop.hive.metastore.api.StorageDescriptor; import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; -import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge; import org.apache.hadoop.hive.ql.io.RCFileInputFormat; import org.apache.hadoop.hive.ql.io.RCFileOutputFormat; import org.apache.hadoop.hive.serde.serdeConstants; @@ -120,23 +119,23 @@ public static void setup() throws Exception { System.setSecurityManager(new NoExitSecurityManager()); Policy.setPolicy(new DerbyPolicy()); - hcatConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3); - hcatConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTFAILURERETRIES, 3); + hcatConf.setIntVar(HiveConf.ConfVars.METASTORE_THRIFT_CONNECTION_RETRIES, 3); + hcatConf.setIntVar(HiveConf.ConfVars.METASTORE_THRIFT_FAILURE_RETRIES, 3); hcatConf.setTimeVar(HiveConf.ConfVars.METASTORE_CLIENT_SOCKET_TIMEOUT, 120, TimeUnit.SECONDS); hcatConf.set(HiveConf.ConfVars.SEMANTIC_ANALYZER_HOOK.varname, HCatSemanticAnalyzer.class.getName()); - hcatConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); - hcatConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); + hcatConf.set(HiveConf.ConfVars.PRE_EXEC_HOOKS.varname, ""); + hcatConf.set(HiveConf.ConfVars.POST_EXEC_HOOKS.varname, ""); hcatConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); msc = new HiveMetaStoreClient(hcatConf); - System.setProperty(HiveConf.ConfVars.PREEXECHOOKS.varname, " "); - System.setProperty(HiveConf.ConfVars.POSTEXECHOOKS.varname, " "); - System.setProperty(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, + System.setProperty(HiveConf.ConfVars.PRE_EXEC_HOOKS.varname, " "); + System.setProperty(HiveConf.ConfVars.POST_EXEC_HOOKS.varname, " "); + System.setProperty(HiveConf.ConfVars.METASTORE_WAREHOUSE.varname, MetastoreConf.getVar(hcatConf, MetastoreConf.ConfVars.WAREHOUSE)); - System.setProperty(HiveConf.ConfVars.METASTORECONNECTURLKEY.varname, + System.setProperty(HiveConf.ConfVars.METASTORE_CONNECT_URL_KEY.varname, MetastoreConf.getVar(hcatConf, MetastoreConf.ConfVars.CONNECT_URL_KEY)); - System.setProperty(HiveConf.ConfVars.METASTOREURIS.varname, + System.setProperty(HiveConf.ConfVars.METASTORE_URIS.varname, MetastoreConf.getVar(hcatConf, MetastoreConf.ConfVars.THRIFT_URIS)); } diff --git a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestPassProperties.java b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestPassProperties.java index c955aa502126..a787f409eb3f 100644 --- a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestPassProperties.java +++ b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestPassProperties.java @@ -58,10 +58,10 @@ public class TestPassProperties { public void Initialize() throws Exception { hiveConf = new HiveConf(this.getClass()); - hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); - hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); + hiveConf.set(HiveConf.ConfVars.PRE_EXEC_HOOKS.varname, ""); + hiveConf.set(HiveConf.ConfVars.POST_EXEC_HOOKS.varname, ""); hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); - hiveConf.set(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, TEST_WAREHOUSE_DIR); + hiveConf.set(HiveConf.ConfVars.METASTORE_WAREHOUSE.varname, TEST_WAREHOUSE_DIR); hiveConf .setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER, "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory"); diff --git a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/rcfile/TestRCFileMapReduceInputFormat.java b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/rcfile/TestRCFileMapReduceInputFormat.java index 67193d4e50f1..759a73b49988 100644 --- a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/rcfile/TestRCFileMapReduceInputFormat.java +++ b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/rcfile/TestRCFileMapReduceInputFormat.java @@ -38,7 +38,6 @@ import org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe; import org.apache.hadoop.hive.shims.ShimLoader; import org.apache.hadoop.io.LongWritable; -import org.apache.hadoop.io.SequenceFile; import org.apache.hadoop.io.compress.CompressionCodec; import org.apache.hadoop.mapreduce.InputSplit; import org.apache.hadoop.mapreduce.Job; @@ -233,7 +232,7 @@ private void writeThenReadByRecordReader(int intervalRecordCount, jonconf.set("mapred.input.dir", testDir.toString()); JobContext context = new Job(jonconf); HiveConf.setLongVar(context.getConfiguration(), - HiveConf.ConfVars.MAPREDMAXSPLITSIZE, maxSplitSize); + HiveConf.ConfVars.MAPRED_MAX_SPLIT_SIZE, maxSplitSize); List splits = inputFormat.getSplits(context); assertEquals("splits length should be " + splitNumber, splitNumber, splits.size()); int readCount = 0; diff --git a/hcatalog/hcatalog-pig-adapter/src/main/java/org/apache/hive/hcatalog/pig/PigHCatUtil.java b/hcatalog/hcatalog-pig-adapter/src/main/java/org/apache/hive/hcatalog/pig/PigHCatUtil.java index afe6e92163b8..dbf7ac3f1a32 100644 --- a/hcatalog/hcatalog-pig-adapter/src/main/java/org/apache/hive/hcatalog/pig/PigHCatUtil.java +++ b/hcatalog/hcatalog-pig-adapter/src/main/java/org/apache/hive/hcatalog/pig/PigHCatUtil.java @@ -132,7 +132,7 @@ static public Pair getDBTableNames(String location) throws IOExc static public String getHCatServerUri(Job job) { - return job.getConfiguration().get(HiveConf.ConfVars.METASTOREURIS.varname); + return job.getConfiguration().get(HiveConf.ConfVars.METASTORE_URIS.varname); } static public String getHCatServerPrincipal(Job job) { @@ -153,7 +153,7 @@ private static IMetaStoreClient getHiveMetaClient(String serverUri, HiveConf hiveConf = new HiveConf(job.getConfiguration(), clazz); if (serverUri != null) { - hiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, serverUri.trim()); + hiveConf.setVar(HiveConf.ConfVars.METASTORE_URIS, serverUri.trim()); } if (serverKerberosPrincipal != null) { diff --git a/hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestE2EScenarios.java b/hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestE2EScenarios.java index ae292eb78c16..d5e5fc311973 100644 --- a/hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestE2EScenarios.java +++ b/hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestE2EScenarios.java @@ -84,10 +84,10 @@ public void setUp() throws Exception { } HiveConf hiveConf = new HiveConf(this.getClass()); - hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); - hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); + hiveConf.set(HiveConf.ConfVars.PRE_EXEC_HOOKS.varname, ""); + hiveConf.set(HiveConf.ConfVars.POST_EXEC_HOOKS.varname, ""); hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); - hiveConf.set(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, TEST_WAREHOUSE_DIR); + hiveConf.set(HiveConf.ConfVars.METASTORE_WAREHOUSE.varname, TEST_WAREHOUSE_DIR); hiveConf .setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER, "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory"); diff --git a/hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestHCatLoaderComplexSchema.java b/hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestHCatLoaderComplexSchema.java index b96479b826a0..b16d5c183d50 100644 --- a/hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestHCatLoaderComplexSchema.java +++ b/hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestHCatLoaderComplexSchema.java @@ -116,8 +116,8 @@ public static void setUpBeforeClass() throws Exception { + File.separator + "mapred" + File.separator + "staging"); hiveConf.set("mapred.temp.dir", workDir + File.separator + "TestHCatLoaderComplexSchema" + File.separator + "mapred" + File.separator + "temp"); - hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); - hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); + hiveConf.set(HiveConf.ConfVars.PRE_EXEC_HOOKS.varname, ""); + hiveConf.set(HiveConf.ConfVars.POST_EXEC_HOOKS.varname, ""); hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); hiveConf .setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER, diff --git a/hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestHCatLoaderEncryption.java b/hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestHCatLoaderEncryption.java index beb4fe9f4b92..0e5691a66543 100644 --- a/hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestHCatLoaderEncryption.java +++ b/hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestHCatLoaderEncryption.java @@ -159,10 +159,10 @@ public void setup() throws Exception { } HiveConf hiveConf = new HiveConf(this.getClass()); - hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); - hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); + hiveConf.set(HiveConf.ConfVars.PRE_EXEC_HOOKS.varname, ""); + hiveConf.set(HiveConf.ConfVars.POST_EXEC_HOOKS.varname, ""); hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); - hiveConf.set(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, TEST_WAREHOUSE_DIR); + hiveConf.set(HiveConf.ConfVars.METASTORE_WAREHOUSE.varname, TEST_WAREHOUSE_DIR); hiveConf .setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER, "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory"); diff --git a/hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestHCatStorerMulti.java b/hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestHCatStorerMulti.java index a0c5ce93ff27..3a2b3c15b5fc 100644 --- a/hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestHCatStorerMulti.java +++ b/hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestHCatStorerMulti.java @@ -95,11 +95,11 @@ public void setUp() throws Exception { if (driver == null) { HiveConf hiveConf = new HiveConf(this.getClass()); - hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); - hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); + hiveConf.set(HiveConf.ConfVars.PRE_EXEC_HOOKS.varname, ""); + hiveConf.set(HiveConf.ConfVars.POST_EXEC_HOOKS.varname, ""); hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); - hiveConf.set(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, TEST_WAREHOUSE_DIR); - hiveConf.setVar(HiveConf.ConfVars.HIVEMAPREDMODE, "nonstrict"); + hiveConf.set(HiveConf.ConfVars.METASTORE_WAREHOUSE.varname, TEST_WAREHOUSE_DIR); + hiveConf.setVar(HiveConf.ConfVars.HIVE_MAPRED_MODE, "nonstrict"); hiveConf .setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER, "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory"); diff --git a/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/messaging/MessageFactory.java b/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/messaging/MessageFactory.java index 782fffb516b9..c53e5afa094d 100644 --- a/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/messaging/MessageFactory.java +++ b/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/messaging/MessageFactory.java @@ -54,7 +54,7 @@ public abstract class MessageFactory { + HCAT_MESSAGE_FORMAT, DEFAULT_MESSAGE_FACTORY_IMPL); - protected static final String HCAT_SERVER_URL = hiveConf.get(HiveConf.ConfVars.METASTOREURIS.name(), ""); + protected static final String HCAT_SERVER_URL = hiveConf.get(HiveConf.ConfVars.METASTORE_URIS.name(), ""); protected static final String HCAT_SERVICE_PRINCIPAL = hiveConf.get(HiveConf.ConfVars.METASTORE_KERBEROS_PRINCIPAL.name(), ""); /** diff --git a/hcatalog/webhcat/java-client/src/test/java/org/apache/hive/hcatalog/api/TestHCatClient.java b/hcatalog/webhcat/java-client/src/test/java/org/apache/hive/hcatalog/api/TestHCatClient.java index 0420c506136d..07c9ca57a2f7 100644 --- a/hcatalog/webhcat/java-client/src/test/java/org/apache/hive/hcatalog/api/TestHCatClient.java +++ b/hcatalog/webhcat/java-client/src/test/java/org/apache/hive/hcatalog/api/TestHCatClient.java @@ -54,9 +54,7 @@ import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.hadoop.hive.serde.serdeConstants; import org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe; -import org.apache.hadoop.hive.shims.Utils; import org.apache.hadoop.mapred.TextInputFormat; -import org.apache.hadoop.security.authorize.ProxyUsers; import org.apache.hive.hcatalog.DerbyPolicy; import org.apache.hive.hcatalog.api.repl.Command; import org.apache.hive.hcatalog.api.repl.ReplicationTask; @@ -109,9 +107,9 @@ public static void tearDown() throws Exception { public static void startMetaStoreServer() throws Exception { hcatConf = new HiveConf(TestHCatClient.class); - String metastoreUri = System.getProperty("test."+HiveConf.ConfVars.METASTOREURIS.varname); + String metastoreUri = System.getProperty("test."+HiveConf.ConfVars.METASTORE_URIS.varname); if (metastoreUri != null) { - hcatConf.setVar(HiveConf.ConfVars.METASTOREURIS, metastoreUri); + hcatConf.setVar(HiveConf.ConfVars.METASTORE_URIS, metastoreUri); useExternalMS = true; return; } @@ -130,17 +128,17 @@ public static void startMetaStoreServer() throws Exception { System.setSecurityManager(new NoExitSecurityManager()); Policy.setPolicy(new DerbyPolicy()); - hcatConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + hcatConf.setVar(HiveConf.ConfVars.METASTORE_URIS, "thrift://localhost:" + msPort); - hcatConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3); + hcatConf.setIntVar(HiveConf.ConfVars.METASTORE_THRIFT_CONNECTION_RETRIES, 3); hcatConf.set(HiveConf.ConfVars.SEMANTIC_ANALYZER_HOOK.varname, HCatSemanticAnalyzer.class.getName()); - hcatConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); - hcatConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); + hcatConf.set(HiveConf.ConfVars.PRE_EXEC_HOOKS.varname, ""); + hcatConf.set(HiveConf.ConfVars.POST_EXEC_HOOKS.varname, ""); hcatConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); - System.setProperty(HiveConf.ConfVars.PREEXECHOOKS.varname, " "); - System.setProperty(HiveConf.ConfVars.POSTEXECHOOKS.varname, " "); + System.setProperty(HiveConf.ConfVars.PRE_EXEC_HOOKS.varname, " "); + System.setProperty(HiveConf.ConfVars.POST_EXEC_HOOKS.varname, " "); } public static HiveConf getConf(){ @@ -819,7 +817,7 @@ private void startReplicationTargetMetaStoreIfRequired() throws Exception { .replace("metastore", "target_metastore")); replicationTargetHCatPort = MetaStoreTestUtils.startMetaStoreWithRetry(conf); replicationTargetHCatConf = new HiveConf(hcatConf); - replicationTargetHCatConf.setVar(HiveConf.ConfVars.METASTOREURIS, + replicationTargetHCatConf.setVar(HiveConf.ConfVars.METASTORE_URIS, "thrift://localhost:" + replicationTargetHCatPort); isReplicationTargetHCatRunning = true; } diff --git a/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/AppConfig.java b/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/AppConfig.java index b94c7d715530..d13adf97cb90 100644 --- a/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/AppConfig.java +++ b/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/AppConfig.java @@ -252,7 +252,7 @@ private void init() { private void handleHiveProperties() { HiveConf hiveConf = new HiveConf();//load hive-site.xml from classpath List interestingPropNames = Arrays.asList( - HiveConf.ConfVars.METASTOREURIS.varname, + HiveConf.ConfVars.METASTORE_URIS.varname, HiveConf.ConfVars.METASTORE_USE_THRIFT_SASL.varname, HiveConf.ConfVars.METASTORE_EXECUTE_SET_UGI.varname, HiveConf.ConfVars.HIVE_EXECUTION_ENGINE.varname, diff --git a/iceberg/iceberg-catalog/src/main/java/org/apache/iceberg/hive/CachedClientPool.java b/iceberg/iceberg-catalog/src/main/java/org/apache/iceberg/hive/CachedClientPool.java index c93ce5455e9f..90eea7618df6 100644 --- a/iceberg/iceberg-catalog/src/main/java/org/apache/iceberg/hive/CachedClientPool.java +++ b/iceberg/iceberg-catalog/src/main/java/org/apache/iceberg/hive/CachedClientPool.java @@ -128,7 +128,7 @@ public R run(Action action, boolean retry) static Key extractKey(String cacheKeys, Configuration conf) { // generate key elements in a certain order, so that the Key instances are comparable List elements = Lists.newArrayList(); - elements.add(conf.get(HiveConf.ConfVars.METASTOREURIS.varname, "")); + elements.add(conf.get(HiveConf.ConfVars.METASTORE_URIS.varname, "")); elements.add(conf.get(HiveCatalog.HIVE_CONF_CATALOG, "hive")); if (cacheKeys == null || cacheKeys.isEmpty()) { return Key.of(elements); diff --git a/iceberg/iceberg-catalog/src/main/java/org/apache/iceberg/hive/HiveCatalog.java b/iceberg/iceberg-catalog/src/main/java/org/apache/iceberg/hive/HiveCatalog.java index 6c98cee6a528..de859a508672 100644 --- a/iceberg/iceberg-catalog/src/main/java/org/apache/iceberg/hive/HiveCatalog.java +++ b/iceberg/iceberg-catalog/src/main/java/org/apache/iceberg/hive/HiveCatalog.java @@ -95,11 +95,11 @@ public void initialize(String inputName, Map properties) { } if (properties.containsKey(CatalogProperties.URI)) { - this.conf.set(HiveConf.ConfVars.METASTOREURIS.varname, properties.get(CatalogProperties.URI)); + this.conf.set(HiveConf.ConfVars.METASTORE_URIS.varname, properties.get(CatalogProperties.URI)); } if (properties.containsKey(CatalogProperties.WAREHOUSE_LOCATION)) { - this.conf.set(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, + this.conf.set(HiveConf.ConfVars.METASTORE_WAREHOUSE.varname, LocationUtil.stripTrailingSlash(properties.get(CatalogProperties.WAREHOUSE_LOCATION))); } @@ -489,7 +489,7 @@ protected String defaultWarehouseLocation(TableIdentifier tableIdentifier) { } private String databaseLocation(String databaseName) { - String warehouseLocation = conf.get(HiveConf.ConfVars.METASTOREWAREHOUSE.varname); + String warehouseLocation = conf.get(HiveConf.ConfVars.METASTORE_WAREHOUSE.varname); Preconditions.checkNotNull( warehouseLocation, "Warehouse location is not set: hive.metastore.warehouse.dir=null"); warehouseLocation = LocationUtil.stripTrailingSlash(warehouseLocation); @@ -563,7 +563,7 @@ Database convertToDatabase(Namespace namespace, Map meta) { public String toString() { return MoreObjects.toStringHelper(this) .add("name", name) - .add("uri", this.conf == null ? "" : this.conf.get(HiveConf.ConfVars.METASTOREURIS.varname)) + .add("uri", this.conf == null ? "" : this.conf.get(HiveConf.ConfVars.METASTORE_URIS.varname)) .toString(); } diff --git a/iceberg/iceberg-catalog/src/test/java/org/apache/iceberg/hive/TestHiveCatalog.java b/iceberg/iceberg-catalog/src/test/java/org/apache/iceberg/hive/TestHiveCatalog.java index d65d38085a1d..aa55094f12b8 100644 --- a/iceberg/iceberg-catalog/src/test/java/org/apache/iceberg/hive/TestHiveCatalog.java +++ b/iceberg/iceberg-catalog/src/test/java/org/apache/iceberg/hive/TestHiveCatalog.java @@ -1106,7 +1106,7 @@ public void testConstructorWarehousePathWithEndSlash() { catalogWithSlash.initialize( "hive_catalog", ImmutableMap.of(CatalogProperties.WAREHOUSE_LOCATION, wareHousePath + "/")); - assertThat(catalogWithSlash.getConf().get(HiveConf.ConfVars.METASTOREWAREHOUSE.varname)) + assertThat(catalogWithSlash.getConf().get(HiveConf.ConfVars.METASTORE_WAREHOUSE.varname)) .as("Should have trailing slash stripped") .isEqualTo(wareHousePath); } diff --git a/iceberg/iceberg-catalog/src/test/java/org/apache/iceberg/hive/TestHiveClientPool.java b/iceberg/iceberg-catalog/src/test/java/org/apache/iceberg/hive/TestHiveClientPool.java index 3a1b92a12c7f..280dc02554ba 100644 --- a/iceberg/iceberg-catalog/src/test/java/org/apache/iceberg/hive/TestHiveClientPool.java +++ b/iceberg/iceberg-catalog/src/test/java/org/apache/iceberg/hive/TestHiveClientPool.java @@ -70,13 +70,13 @@ public void after() { @Test public void testConf() { HiveConf conf = createHiveConf(); - conf.set(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, "file:/mywarehouse/"); + conf.set(HiveConf.ConfVars.METASTORE_WAREHOUSE.varname, "file:/mywarehouse/"); HiveClientPool clientPool = new HiveClientPool(10, conf); HiveConf clientConf = clientPool.hiveConf(); - assertThat(clientConf.get(HiveConf.ConfVars.METASTOREWAREHOUSE.varname)) - .isEqualTo(conf.get(HiveConf.ConfVars.METASTOREWAREHOUSE.varname)); + assertThat(clientConf.get(HiveConf.ConfVars.METASTORE_WAREHOUSE.varname)) + .isEqualTo(conf.get(HiveConf.ConfVars.METASTORE_WAREHOUSE.varname)); assertThat(clientPool.poolSize()).isEqualTo(10); // 'hive.metastore.sasl.enabled' should be 'true' as defined in xml diff --git a/iceberg/iceberg-catalog/src/test/java/org/apache/iceberg/hive/TestHiveMetastore.java b/iceberg/iceberg-catalog/src/test/java/org/apache/iceberg/hive/TestHiveMetastore.java index 6fc54de9ec2a..d4ceeca67d2b 100644 --- a/iceberg/iceberg-catalog/src/test/java/org/apache/iceberg/hive/TestHiveMetastore.java +++ b/iceberg/iceberg-catalog/src/test/java/org/apache/iceberg/hive/TestHiveMetastore.java @@ -151,7 +151,7 @@ public void start(HiveConf conf, int poolSize) { this.executorService.submit(() -> server.serve()); // in Hive3, setting this as a system prop ensures that it will be picked up whenever a new HiveConf is created - System.setProperty(HiveConf.ConfVars.METASTOREURIS.varname, hiveConf.getVar(HiveConf.ConfVars.METASTOREURIS)); + System.setProperty(HiveConf.ConfVars.METASTORE_URIS.varname, hiveConf.getVar(HiveConf.ConfVars.METASTORE_URIS)); this.clientPool = new HiveClientPool(1, hiveConf); } catch (Exception e) { @@ -229,7 +229,7 @@ public R run(ClientPool.Action action) thro private TServer newThriftServer(TServerSocket socket, int poolSize, HiveConf conf) throws Exception { HiveConf serverConf = new HiveConf(conf); - serverConf.set(HiveConf.ConfVars.METASTORECONNECTURLKEY.varname, "jdbc:derby:" + DERBY_PATH + ";create=true"); + serverConf.set(HiveConf.ConfVars.METASTORE_CONNECT_URL_KEY.varname, "jdbc:derby:" + DERBY_PATH + ";create=true"); baseHandler = HMS_HANDLER_CTOR.newInstance("new db based metaserver", serverConf); IHMSHandler handler = GET_BASE_HMS_HANDLER.invoke(serverConf, baseHandler, false); @@ -244,8 +244,8 @@ private TServer newThriftServer(TServerSocket socket, int poolSize, HiveConf con } private void initConf(HiveConf conf, int port) { - conf.set(HiveConf.ConfVars.METASTOREURIS.varname, "thrift://localhost:" + port); - conf.set(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, "file:" + HIVE_WAREHOUSE_DIR.getAbsolutePath()); + conf.set(HiveConf.ConfVars.METASTORE_URIS.varname, "thrift://localhost:" + port); + conf.set(HiveConf.ConfVars.METASTORE_WAREHOUSE.varname, "file:" + HIVE_WAREHOUSE_DIR.getAbsolutePath()); conf.set(HiveConf.ConfVars.HIVE_METASTORE_WAREHOUSE_EXTERNAL.varname, "file:" + HIVE_EXTERNAL_WAREHOUSE_DIR.getAbsolutePath()); conf.set(HiveConf.ConfVars.METASTORE_TRY_DIRECT_SQL.varname, "false"); diff --git a/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergOutputCommitter.java b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergOutputCommitter.java index c4a6005818ae..ba64faa6188a 100644 --- a/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergOutputCommitter.java +++ b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergOutputCommitter.java @@ -687,7 +687,7 @@ private static FilesForCommit collectResults(int numTasks, ExecutorService execu */ @VisibleForTesting static String generateJobLocation(String location, Configuration conf, JobID jobId) { - String queryId = conf.get(HiveConf.ConfVars.HIVEQUERYID.varname); + String queryId = conf.get(HiveConf.ConfVars.HIVE_QUERY_ID.varname); return location + "/temp/" + queryId + "-" + jobId; } diff --git a/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergOutputFormat.java b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergOutputFormat.java index 7c625543b780..c356898c65eb 100644 --- a/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergOutputFormat.java +++ b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergOutputFormat.java @@ -72,7 +72,7 @@ private static HiveIcebergWriter writer(JobConf jc) { setWriterLevelConfiguration(jc, table); return WriterBuilder.builderFor(table) - .queryId(jc.get(HiveConf.ConfVars.HIVEQUERYID.varname)) + .queryId(jc.get(HiveConf.ConfVars.HIVE_QUERY_ID.varname)) .tableName(tableName) .attemptID(taskAttemptID) .poolSize(poolSize) diff --git a/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergSerDe.java b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergSerDe.java index 130b7186221d..548d33f7d93d 100644 --- a/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergSerDe.java +++ b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergSerDe.java @@ -146,8 +146,8 @@ public void initialize(@Nullable Configuration configuration, Properties serDePr // Currently ClusteredWriter is used which requires that records are ordered by partition keys. // Here we ensure that SortedDynPartitionOptimizer will kick in and do the sorting. // TODO: remove once we have both Fanout and ClusteredWriter available: HIVE-25948 - HiveConf.setIntVar(configuration, HiveConf.ConfVars.HIVEOPTSORTDYNAMICPARTITIONTHRESHOLD, 1); - HiveConf.setVar(configuration, HiveConf.ConfVars.DYNAMICPARTITIONINGMODE, "nonstrict"); + HiveConf.setIntVar(configuration, HiveConf.ConfVars.HIVE_OPT_SORT_DYNAMIC_PARTITION_THRESHOLD, 1); + HiveConf.setVar(configuration, HiveConf.ConfVars.DYNAMIC_PARTITIONING_MODE, "nonstrict"); try { this.inspector = IcebergObjectInspector.create(projectedSchema); } catch (Exception e) { diff --git a/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergStorageHandler.java b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergStorageHandler.java index 985e7d48f617..d07b820e6d04 100644 --- a/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergStorageHandler.java +++ b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergStorageHandler.java @@ -671,8 +671,8 @@ public DynamicPartitionCtx createDPContext( Table table = IcebergTableUtil.getTable(conf, tableDesc.getProperties()); DynamicPartitionCtx dpCtx = new DynamicPartitionCtx(Maps.newLinkedHashMap(), - hiveConf.getVar(HiveConf.ConfVars.DEFAULTPARTITIONNAME), - hiveConf.getIntVar(HiveConf.ConfVars.DYNAMICPARTITIONMAXPARTSPERNODE)); + hiveConf.getVar(HiveConf.ConfVars.DEFAULT_PARTITION_NAME), + hiveConf.getIntVar(HiveConf.ConfVars.DYNAMIC_PARTITION_MAX_PARTS_PER_NODE)); List, ExprNodeDesc>> customSortExprs = Lists.newLinkedList(); dpCtx.setCustomSortExpressions(customSortExprs); diff --git a/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveTableUtil.java b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveTableUtil.java index a453e5ea723a..510f562922ba 100644 --- a/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveTableUtil.java +++ b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveTableUtil.java @@ -234,7 +234,7 @@ public static RemoteIterator getFilesIterator(Path path, Conf } static String generateTableObjectLocation(String tableLocation, Configuration conf) { - return tableLocation + "/temp/" + conf.get(HiveConf.ConfVars.HIVEQUERYID.varname) + TABLE_EXTENSION; + return tableLocation + "/temp/" + conf.get(HiveConf.ConfVars.HIVE_QUERY_ID.varname) + TABLE_EXTENSION; } static void createFileForTableObject(Table table, Configuration conf) { @@ -305,7 +305,7 @@ static JobConf getPartJobConf(Configuration confs, org.apache.hadoop.hive.ql.met job.set(ColumnProjectionUtils.READ_COLUMN_NAMES_CONF_STR, Constants.ICEBERG_PARTITION_TABLE_SCHEMA); job.set(InputFormatConfig.TABLE_LOCATION, tbl.getPath().toString()); job.set(InputFormatConfig.TABLE_IDENTIFIER, tbl.getFullyQualifiedName() + ".partitions"); - HiveConf.setVar(job, HiveConf.ConfVars.HIVEFETCHOUTPUTSERDE, Constants.DELIMITED_JSON_SERDE); + HiveConf.setVar(job, HiveConf.ConfVars.HIVE_FETCH_OUTPUT_SERDE, Constants.DELIMITED_JSON_SERDE); HiveConf.setBoolVar(job, HiveConf.ConfVars.HIVE_VECTORIZATION_ENABLED, false); return job; } diff --git a/iceberg/iceberg-handler/src/test/java/org/apache/iceberg/mr/hive/HiveIcebergStorageHandlerWithEngineBase.java b/iceberg/iceberg-handler/src/test/java/org/apache/iceberg/mr/hive/HiveIcebergStorageHandlerWithEngineBase.java index 0853124b6025..b5c9e2942b45 100644 --- a/iceberg/iceberg-handler/src/test/java/org/apache/iceberg/mr/hive/HiveIcebergStorageHandlerWithEngineBase.java +++ b/iceberg/iceberg-handler/src/test/java/org/apache/iceberg/mr/hive/HiveIcebergStorageHandlerWithEngineBase.java @@ -186,9 +186,9 @@ public void before() throws IOException { // Fetch task conversion might kick in for certain queries preventing vectorization code path to be used, so // we turn it off explicitly to achieve better coverage. if (isVectorized) { - HiveConf.setVar(shell.getHiveConf(), HiveConf.ConfVars.HIVEFETCHTASKCONVERSION, "none"); + HiveConf.setVar(shell.getHiveConf(), HiveConf.ConfVars.HIVE_FETCH_TASK_CONVERSION, "none"); } else { - HiveConf.setVar(shell.getHiveConf(), HiveConf.ConfVars.HIVEFETCHTASKCONVERSION, "more"); + HiveConf.setVar(shell.getHiveConf(), HiveConf.ConfVars.HIVE_FETCH_TASK_CONVERSION, "more"); } } diff --git a/iceberg/iceberg-handler/src/test/java/org/apache/iceberg/mr/hive/TestHiveIcebergCRUD.java b/iceberg/iceberg-handler/src/test/java/org/apache/iceberg/mr/hive/TestHiveIcebergCRUD.java index acfe94126dc6..bc3c948c4ad4 100644 --- a/iceberg/iceberg-handler/src/test/java/org/apache/iceberg/mr/hive/TestHiveIcebergCRUD.java +++ b/iceberg/iceberg-handler/src/test/java/org/apache/iceberg/mr/hive/TestHiveIcebergCRUD.java @@ -618,7 +618,7 @@ public void testConcurrent2Deletes() { .run(i -> { init(shell, testTables, temp, executionEngine); HiveConf.setBoolVar(shell.getHiveConf(), HiveConf.ConfVars.HIVE_VECTORIZATION_ENABLED, isVectorized); - HiveConf.setVar(shell.getHiveConf(), HiveConf.ConfVars.HIVEFETCHTASKCONVERSION, "none"); + HiveConf.setVar(shell.getHiveConf(), HiveConf.ConfVars.HIVE_FETCH_TASK_CONVERSION, "none"); HiveConf.setVar(shell.getHiveConf(), HiveConf.ConfVars.HIVE_QUERY_REEXECUTION_STRATEGIES, RETRY_STRATEGIES_WITHOUT_WRITE_CONFLICT); shell.executeStatement(sql); @@ -649,7 +649,7 @@ public void testConcurrent2Updates() { .run(i -> { init(shell, testTables, temp, executionEngine); HiveConf.setBoolVar(shell.getHiveConf(), HiveConf.ConfVars.HIVE_VECTORIZATION_ENABLED, isVectorized); - HiveConf.setVar(shell.getHiveConf(), HiveConf.ConfVars.HIVEFETCHTASKCONVERSION, "none"); + HiveConf.setVar(shell.getHiveConf(), HiveConf.ConfVars.HIVE_FETCH_TASK_CONVERSION, "none"); HiveConf.setVar(shell.getHiveConf(), HiveConf.ConfVars.HIVE_QUERY_REEXECUTION_STRATEGIES, RETRY_STRATEGIES_WITHOUT_WRITE_CONFLICT); shell.executeStatement(sql); @@ -684,7 +684,7 @@ public void testConcurrentUpdateAndDelete() { .run(i -> { init(shell, testTables, temp, executionEngine); HiveConf.setBoolVar(shell.getHiveConf(), HiveConf.ConfVars.HIVE_VECTORIZATION_ENABLED, isVectorized); - HiveConf.setVar(shell.getHiveConf(), HiveConf.ConfVars.HIVEFETCHTASKCONVERSION, "none"); + HiveConf.setVar(shell.getHiveConf(), HiveConf.ConfVars.HIVE_FETCH_TASK_CONVERSION, "none"); HiveConf.setVar(shell.getHiveConf(), HiveConf.ConfVars.HIVE_QUERY_REEXECUTION_STRATEGIES, RETRY_STRATEGIES_WITHOUT_WRITE_CONFLICT); shell.executeStatement(sql[i]); @@ -719,7 +719,7 @@ public void testConcurrent2MergeInserts() { .run(i -> { init(shell, testTables, temp, executionEngine); HiveConf.setBoolVar(shell.getHiveConf(), HiveConf.ConfVars.HIVE_VECTORIZATION_ENABLED, isVectorized); - HiveConf.setVar(shell.getHiveConf(), HiveConf.ConfVars.HIVEFETCHTASKCONVERSION, "none"); + HiveConf.setVar(shell.getHiveConf(), HiveConf.ConfVars.HIVE_FETCH_TASK_CONVERSION, "none"); HiveConf.setVar(shell.getHiveConf(), HiveConf.ConfVars.HIVE_QUERY_REEXECUTION_STRATEGIES, RETRY_STRATEGIES_WITHOUT_WRITE_CONFLICT); shell.executeStatement(sql); diff --git a/iceberg/iceberg-handler/src/test/java/org/apache/iceberg/mr/hive/TestHiveIcebergOutputCommitter.java b/iceberg/iceberg-handler/src/test/java/org/apache/iceberg/mr/hive/TestHiveIcebergOutputCommitter.java index d0eb3ebc8f09..45f82b5a28aa 100644 --- a/iceberg/iceberg-handler/src/test/java/org/apache/iceberg/mr/hive/TestHiveIcebergOutputCommitter.java +++ b/iceberg/iceberg-handler/src/test/java/org/apache/iceberg/mr/hive/TestHiveIcebergOutputCommitter.java @@ -224,7 +224,7 @@ private JobConf jobConf(Table table, int taskNum) { JobConf conf = new JobConf(); conf.setNumMapTasks(taskNum); conf.setNumReduceTasks(0); - conf.set(HiveConf.ConfVars.HIVEQUERYID.varname, QUERY_ID); + conf.set(HiveConf.ConfVars.HIVE_QUERY_ID.varname, QUERY_ID); conf.set(InputFormatConfig.OUTPUT_TABLES, table.name()); conf.set(InputFormatConfig.OPERATION_TYPE_PREFIX + table.name(), Context.Operation.OTHER.name()); conf.set(InputFormatConfig.TABLE_CATALOG_PREFIX + table.name(), diff --git a/iceberg/iceberg-handler/src/test/java/org/apache/iceberg/mr/hive/TestHiveIcebergStorageHandlerNoScan.java b/iceberg/iceberg-handler/src/test/java/org/apache/iceberg/mr/hive/TestHiveIcebergStorageHandlerNoScan.java index 48da53e2b685..c1bbeb039893 100644 --- a/iceberg/iceberg-handler/src/test/java/org/apache/iceberg/mr/hive/TestHiveIcebergStorageHandlerNoScan.java +++ b/iceberg/iceberg-handler/src/test/java/org/apache/iceberg/mr/hive/TestHiveIcebergStorageHandlerNoScan.java @@ -1978,7 +1978,7 @@ public void checkIcebergTableLocation() throws TException, InterruptedException, String dBName = "testdb"; String tableName = "tbl"; String dbWithSuffix = "/" + dBName + ".db"; - String dbManagedLocation = shell.getHiveConf().get(HiveConf.ConfVars.METASTOREWAREHOUSE.varname) + dbWithSuffix; + String dbManagedLocation = shell.getHiveConf().get(HiveConf.ConfVars.METASTORE_WAREHOUSE.varname) + dbWithSuffix; String dbExternalLocation = shell.getHiveConf().get(HiveConf.ConfVars.HIVE_METASTORE_WAREHOUSE_EXTERNAL.varname) + dbWithSuffix; Path noExistedTblPath = new Path(dbManagedLocation + "/" + tableName); diff --git a/iceberg/iceberg-handler/src/test/java/org/apache/iceberg/mr/hive/TestHiveShell.java b/iceberg/iceberg-handler/src/test/java/org/apache/iceberg/mr/hive/TestHiveShell.java index 79e477bbe59f..8d2a9a294c7c 100644 --- a/iceberg/iceberg-handler/src/test/java/org/apache/iceberg/mr/hive/TestHiveShell.java +++ b/iceberg/iceberg-handler/src/test/java/org/apache/iceberg/mr/hive/TestHiveShell.java @@ -85,9 +85,9 @@ public void setHiveSessionValue(String key, boolean value) { public void start() { // Create a copy of the HiveConf for the metastore metastore.start(new HiveConf(hs2Conf), 20); - hs2Conf.setVar(HiveConf.ConfVars.METASTOREURIS, metastore.hiveConf().getVar(HiveConf.ConfVars.METASTOREURIS)); - hs2Conf.setVar(HiveConf.ConfVars.METASTOREWAREHOUSE, - metastore.hiveConf().getVar(HiveConf.ConfVars.METASTOREWAREHOUSE)); + hs2Conf.setVar(HiveConf.ConfVars.METASTORE_URIS, metastore.hiveConf().getVar(HiveConf.ConfVars.METASTORE_URIS)); + hs2Conf.setVar(HiveConf.ConfVars.METASTORE_WAREHOUSE, + metastore.hiveConf().getVar(HiveConf.ConfVars.METASTORE_WAREHOUSE)); hs2Conf.setVar(HiveConf.ConfVars.HIVE_METASTORE_WAREHOUSE_EXTERNAL, metastore.hiveConf().getVar(HiveConf.ConfVars.HIVE_METASTORE_WAREHOUSE_EXTERNAL)); @@ -195,13 +195,13 @@ private HiveConf initializeConf() { // Switch off optimizers in order to contain the map reduction within this JVM hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_CBO_ENABLED, true); hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_INFER_BUCKET_SORT, false); - hiveConf.setBoolVar(HiveConf.ConfVars.HIVEMETADATAONLYQUERIES, false); - hiveConf.setBoolVar(HiveConf.ConfVars.HIVEOPTINDEXFILTER, false); - hiveConf.setBoolVar(HiveConf.ConfVars.HIVECONVERTJOIN, false); - hiveConf.setBoolVar(HiveConf.ConfVars.HIVESKEWJOIN, false); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_METADATA_ONLY_QUERIES, false); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_OPT_INDEX_FILTER, false); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_CONVERT_JOIN, false); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_SKEW_JOIN, false); // Speed up test execution - hiveConf.setLongVar(HiveConf.ConfVars.HIVECOUNTERSPULLINTERVAL, 1L); + hiveConf.setLongVar(HiveConf.ConfVars.HIVE_COUNTERS_PULL_INTERVAL, 1L); hiveConf.setBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER, false); // Resource configuration diff --git a/iceberg/iceberg-handler/src/test/java/org/apache/iceberg/mr/hive/TestOptimisticRetry.java b/iceberg/iceberg-handler/src/test/java/org/apache/iceberg/mr/hive/TestOptimisticRetry.java index cd4aa88e7807..c9d0bf7e3bc1 100644 --- a/iceberg/iceberg-handler/src/test/java/org/apache/iceberg/mr/hive/TestOptimisticRetry.java +++ b/iceberg/iceberg-handler/src/test/java/org/apache/iceberg/mr/hive/TestOptimisticRetry.java @@ -55,7 +55,7 @@ public void testConcurrentOverlappingUpdates() { .run(i -> { init(shell, testTables, temp, executionEngine); HiveConf.setBoolVar(shell.getHiveConf(), HiveConf.ConfVars.HIVE_VECTORIZATION_ENABLED, isVectorized); - HiveConf.setVar(shell.getHiveConf(), HiveConf.ConfVars.HIVEFETCHTASKCONVERSION, "none"); + HiveConf.setVar(shell.getHiveConf(), HiveConf.ConfVars.HIVE_FETCH_TASK_CONVERSION, "none"); HiveConf.setVar(shell.getHiveConf(), HiveConf.ConfVars.HIVE_QUERY_REEXECUTION_STRATEGIES, RETRY_STRATEGIES); shell.executeStatement(sql); @@ -89,7 +89,7 @@ public void testNonOverlappingConcurrent2Updates() { .run(i -> { init(shell, testTables, temp, executionEngine); HiveConf.setBoolVar(shell.getHiveConf(), HiveConf.ConfVars.HIVE_VECTORIZATION_ENABLED, isVectorized); - HiveConf.setVar(shell.getHiveConf(), HiveConf.ConfVars.HIVEFETCHTASKCONVERSION, "none"); + HiveConf.setVar(shell.getHiveConf(), HiveConf.ConfVars.HIVE_FETCH_TASK_CONVERSION, "none"); HiveConf.setVar(shell.getHiveConf(), HiveConf.ConfVars.HIVE_QUERY_REEXECUTION_STRATEGIES, RETRY_STRATEGIES); shell.executeStatement(sql[i]); @@ -126,7 +126,7 @@ public void testConcurrent2MergeInserts() { .run(i -> { init(shell, testTables, temp, executionEngine); HiveConf.setBoolVar(shell.getHiveConf(), HiveConf.ConfVars.HIVE_VECTORIZATION_ENABLED, isVectorized); - HiveConf.setVar(shell.getHiveConf(), HiveConf.ConfVars.HIVEFETCHTASKCONVERSION, "none"); + HiveConf.setVar(shell.getHiveConf(), HiveConf.ConfVars.HIVE_FETCH_TASK_CONVERSION, "none"); HiveConf.setVar(shell.getHiveConf(), HiveConf.ConfVars.HIVE_QUERY_REEXECUTION_STRATEGIES, RETRY_STRATEGIES); shell.executeStatement(sql); diff --git a/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/hbase/ManyMiniCluster.java b/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/hbase/ManyMiniCluster.java index 155735c6072e..8c28c63b62b4 100644 --- a/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/hbase/ManyMiniCluster.java +++ b/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/hbase/ManyMiniCluster.java @@ -299,12 +299,12 @@ private void setUpMetastore() throws Exception { //The default org.apache.hadoop.hive.ql.hooks.PreExecutePrinter hook //is present only in the ql/test directory - hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); - hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); + hiveConf.set(HiveConf.ConfVars.PRE_EXEC_HOOKS.varname, ""); + hiveConf.set(HiveConf.ConfVars.POST_EXEC_HOOKS.varname, ""); hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); - hiveConf.set(HiveConf.ConfVars.METASTORECONNECTURLKEY.varname, + hiveConf.set(HiveConf.ConfVars.METASTORE_CONNECT_URL_KEY.varname, "jdbc:derby:" + new File(workDir + "/metastore_db") + ";create=true"); - hiveConf.set(HiveConf.ConfVars.METASTOREWAREHOUSE.toString(), + hiveConf.set(HiveConf.ConfVars.METASTORE_WAREHOUSE.toString(), new File(workDir, "warehouse").toString()); //set where derby logs File derbyLogFile = new File(workDir + "/derby.log"); diff --git a/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/hbase/TestPigHBaseStorageHandler.java b/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/hbase/TestPigHBaseStorageHandler.java index fe33f47793fb..acb37344972c 100644 --- a/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/hbase/TestPigHBaseStorageHandler.java +++ b/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/hbase/TestPigHBaseStorageHandler.java @@ -75,9 +75,9 @@ public void Initialize() throws Exception { Path whPath = new Path(fsuri.getScheme(), fsuri.getAuthority(), getTestDir()); hcatConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); - hcatConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); - hcatConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); - hcatConf.set(ConfVars.METASTOREWAREHOUSE.varname, whPath.toString()); + hcatConf.set(HiveConf.ConfVars.PRE_EXEC_HOOKS.varname, ""); + hcatConf.set(HiveConf.ConfVars.POST_EXEC_HOOKS.varname, ""); + hcatConf.set(ConfVars.METASTORE_WAREHOUSE.varname, whPath.toString()); hcatConf .setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER, "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory"); diff --git a/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/mapreduce/TestSequenceFileReadWrite.java b/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/mapreduce/TestSequenceFileReadWrite.java index c093055ecff1..eb093e105143 100644 --- a/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/mapreduce/TestSequenceFileReadWrite.java +++ b/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/mapreduce/TestSequenceFileReadWrite.java @@ -73,10 +73,10 @@ public void setup() throws Exception { warehouseDir = HCatUtil.makePathASafeFileName(dataDir + File.separator + "warehouse"); inputFileName = HCatUtil.makePathASafeFileName(dataDir + File.separator + "input.data"); hiveConf = new HiveConf(this.getClass()); - hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); - hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); + hiveConf.set(HiveConf.ConfVars.PRE_EXEC_HOOKS.varname, ""); + hiveConf.set(HiveConf.ConfVars.POST_EXEC_HOOKS.varname, ""); hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); - hiveConf.set(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, warehouseDir); + hiveConf.set(HiveConf.ConfVars.METASTORE_WAREHOUSE.varname, warehouseDir); hiveConf .setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER, "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory"); diff --git a/itests/hive-jmh/src/main/java/org/apache/hive/benchmark/vectorization/mapjoin/load/AbstractHTLoadBench.java b/itests/hive-jmh/src/main/java/org/apache/hive/benchmark/vectorization/mapjoin/load/AbstractHTLoadBench.java index b33c7da545e2..b597bed53849 100644 --- a/itests/hive-jmh/src/main/java/org/apache/hive/benchmark/vectorization/mapjoin/load/AbstractHTLoadBench.java +++ b/itests/hive-jmh/src/main/java/org/apache/hive/benchmark/vectorization/mapjoin/load/AbstractHTLoadBench.java @@ -123,8 +123,8 @@ protected void setupMapJoinHT(HiveConf hiveConf, long seed, int rowCount, TypeInfo[] smallTableValueTypeInfos, int[] smallTableRetainKeyColumnNums, SmallTableGenerationParameters smallTableGenerationParameters) throws Exception { - hiveConf.set(HiveConf.ConfVars.HIVEMAPJOINPARALELHASHTABLETHREADS.varname, LOAD_THREADS_NUM + ""); - LOG.info("Number of threads: " + hiveConf.get(HiveConf.ConfVars.HIVEMAPJOINPARALELHASHTABLETHREADS.varname)); + hiveConf.set(HiveConf.ConfVars.HIVE_MAPJOIN_PARALEL_HASHTABLE_THREADS.varname, LOAD_THREADS_NUM + ""); + LOG.info("Number of threads: " + hiveConf.get(HiveConf.ConfVars.HIVE_MAPJOIN_PARALEL_HASHTABLE_THREADS.varname)); this.rowCount = rowCount; diff --git a/itests/hive-jmh/src/main/java/org/apache/hive/benchmark/vectorization/mapjoin/load/LegacyVectorMapJoinFastHashTableLoader.java b/itests/hive-jmh/src/main/java/org/apache/hive/benchmark/vectorization/mapjoin/load/LegacyVectorMapJoinFastHashTableLoader.java index 7af9380b619d..f292cf09481e 100644 --- a/itests/hive-jmh/src/main/java/org/apache/hive/benchmark/vectorization/mapjoin/load/LegacyVectorMapJoinFastHashTableLoader.java +++ b/itests/hive-jmh/src/main/java/org/apache/hive/benchmark/vectorization/mapjoin/load/LegacyVectorMapJoinFastHashTableLoader.java @@ -56,7 +56,7 @@ public LegacyVectorMapJoinFastHashTableLoader(TezContext context, Configuration this.hconf = hconf; this.desc = (MapJoinDesc)joinOp.getConf(); this.cacheKey = joinOp.getCacheKey(); - this.htLoadCounter = this.tezContext.getTezProcessorContext().getCounters().findCounter(HiveConf.getVar(hconf, HiveConf.ConfVars.HIVECOUNTERGROUP), hconf.get("__hive.context.name", "")); + this.htLoadCounter = this.tezContext.getTezProcessorContext().getCounters().findCounter(HiveConf.getVar(hconf, HiveConf.ConfVars.HIVE_COUNTER_GROUP), hconf.get("__hive.context.name", "")); } @Override @@ -66,7 +66,7 @@ public void init(ExecMapperContext context, MapredContext mrContext, this.hconf = hconf; this.desc = joinOp.getConf(); this.cacheKey = joinOp.getCacheKey(); - String counterGroup = HiveConf.getVar(hconf, HiveConf.ConfVars.HIVECOUNTERGROUP); + String counterGroup = HiveConf.getVar(hconf, HiveConf.ConfVars.HIVE_COUNTER_GROUP); String vertexName = hconf.get(Operator.CONTEXT_NAME_KEY, ""); String counterName = Utilities.getVertexCounterName(HashTableLoaderCounters.HASHTABLE_LOAD_TIME_MS.name(), vertexName); this.htLoadCounter = tezContext.getTezProcessorContext().getCounters().findCounter(counterGroup, counterName); diff --git a/itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/JdbcWithMiniKdcSQLAuthTest.java b/itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/JdbcWithMiniKdcSQLAuthTest.java index fccf3e0209f8..ce5518f0e4a6 100644 --- a/itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/JdbcWithMiniKdcSQLAuthTest.java +++ b/itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/JdbcWithMiniKdcSQLAuthTest.java @@ -59,7 +59,7 @@ public static void beforeTestBase(String transportMode) throws Exception { hiveConf.setBoolVar(ConfVars.HIVE_AUTHORIZATION_ENABLED, true); hiveConf.setBoolVar(ConfVars.HIVE_SUPPORT_CONCURRENCY, false); hiveConf.setBoolVar(ConfVars.HIVE_SERVER2_ENABLE_DOAS, false); - hiveConf.setBoolVar(ConfVars.HIVEFETCHTASKCACHING, false); + hiveConf.setBoolVar(ConfVars.HIVE_FETCH_TASK_CACHING, false); miniHS2 = MiniHiveKdc.getMiniHS2WithKerb(miniHiveKdc, hiveConf); miniHS2.start(new HashMap()); diff --git a/itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/TestHs2HooksWithMiniKdc.java b/itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/TestHs2HooksWithMiniKdc.java index 890e4092ea4f..e370810e5e82 100644 --- a/itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/TestHs2HooksWithMiniKdc.java +++ b/itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/TestHs2HooksWithMiniKdc.java @@ -50,12 +50,12 @@ public class TestHs2HooksWithMiniKdc { @BeforeClass public static void setUpBeforeClass() throws Exception { Class.forName(MiniHS2.getJdbcDriverName()); - confOverlay.put(ConfVars.POSTEXECHOOKS.varname, PostExecHook.class.getName()); - confOverlay.put(ConfVars.PREEXECHOOKS.varname, PreExecHook.class.getName()); + confOverlay.put(ConfVars.POST_EXEC_HOOKS.varname, PostExecHook.class.getName()); + confOverlay.put(ConfVars.PRE_EXEC_HOOKS.varname, PreExecHook.class.getName()); confOverlay.put(ConfVars.SEMANTIC_ANALYZER_HOOK.varname, SemanticAnalysisHook.class.getName()); confOverlay.put(ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "" + Boolean.FALSE); - confOverlay.put(ConfVars.HIVEFETCHTASKCACHING.varname, "" + false); + confOverlay.put(ConfVars.HIVE_FETCH_TASK_CACHING.varname, "" + false); miniHiveKdc = new MiniHiveKdc(); HiveConf hiveConf = new HiveConf(); diff --git a/itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/TestJdbcWithDBTokenStore.java b/itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/TestJdbcWithDBTokenStore.java index 58bcac694215..3da7b16a0eaf 100644 --- a/itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/TestJdbcWithDBTokenStore.java +++ b/itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/TestJdbcWithDBTokenStore.java @@ -48,9 +48,9 @@ public static void beforeTest() throws Exception { String hs2Principal = miniHS2.getConfProperty(ConfVars.HIVE_SERVER2_KERBEROS_PRINCIPAL.varname); String hs2KeyTab = miniHS2.getConfProperty(ConfVars.HIVE_SERVER2_KERBEROS_KEYTAB.varname); System.out.println("HS2 principal : " + hs2Principal + " HS2 keytab : " + hs2KeyTab + " Metastore principal : " + metastorePrincipal); - System.setProperty(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, + System.setProperty(HiveConf.ConfVars.METASTORE_WAREHOUSE.varname, MetastoreConf.getVar(hiveConf, MetastoreConf.ConfVars.WAREHOUSE)); - System.setProperty(HiveConf.ConfVars.METASTORECONNECTURLKEY.varname, + System.setProperty(HiveConf.ConfVars.METASTORE_CONNECT_URL_KEY.varname, MetastoreConf.getVar(hiveConf, MetastoreConf.ConfVars.CONNECT_URL_KEY)); System.setProperty(ConfVars.METASTORE_USE_THRIFT_SASL.varname, String.valueOf(MetastoreConf.getBoolVar(hiveConf, MetastoreConf.ConfVars.USE_THRIFT_SASL))); diff --git a/itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/TestJdbcWithDBTokenStoreNoDoAs.java b/itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/TestJdbcWithDBTokenStoreNoDoAs.java index 74d8e777597e..f666077de886 100644 --- a/itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/TestJdbcWithDBTokenStoreNoDoAs.java +++ b/itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/TestJdbcWithDBTokenStoreNoDoAs.java @@ -48,11 +48,11 @@ public static void beforeTest() throws Exception { String hs2Principal = miniHS2.getConfProperty(ConfVars.HIVE_SERVER2_KERBEROS_PRINCIPAL.varname); String hs2KeyTab = miniHS2.getConfProperty(ConfVars.HIVE_SERVER2_KERBEROS_KEYTAB.varname); System.out.println("HS2 principal : " + hs2Principal + " HS2 keytab : " + hs2KeyTab + " Metastore principal : " + metastorePrincipal); - System.setProperty(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, + System.setProperty(HiveConf.ConfVars.METASTORE_WAREHOUSE.varname, MetastoreConf.getVar(hiveConf, MetastoreConf.ConfVars.WAREHOUSE)); - System.setProperty(HiveConf.ConfVars.METASTORECONNECTURLKEY.varname, + System.setProperty(HiveConf.ConfVars.METASTORE_CONNECT_URL_KEY.varname, MetastoreConf.getVar(hiveConf, MetastoreConf.ConfVars.CONNECT_URL_KEY)); - System.setProperty(HiveConf.ConfVars.METASTOREURIS.varname, + System.setProperty(HiveConf.ConfVars.METASTORE_URIS.varname, MetastoreConf.getVar(hiveConf, MetastoreConf.ConfVars.THRIFT_URIS)); System.setProperty(ConfVars.METASTORE_USE_THRIFT_SASL.varname, String.valueOf(MetastoreConf.getBoolVar(hiveConf, MetastoreConf.ConfVars.USE_THRIFT_SASL))); diff --git a/itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/TestJdbcWithMiniKdcCookie.java b/itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/TestJdbcWithMiniKdcCookie.java index 883d333dd48d..77dabb42bd27 100644 --- a/itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/TestJdbcWithMiniKdcCookie.java +++ b/itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/TestJdbcWithMiniKdcCookie.java @@ -81,7 +81,7 @@ public void setUp() throws Exception { hiveConf.setTimeVar(ConfVars.HIVE_SERVER2_THRIFT_HTTP_COOKIE_MAX_AGE, 1, TimeUnit.SECONDS); hiveConf.setBoolVar(ConfVars.HIVE_SUPPORT_CONCURRENCY, false); - hiveConf.setBoolVar(ConfVars.HIVEFETCHTASKCACHING, false); + hiveConf.setBoolVar(ConfVars.HIVE_FETCH_TASK_CACHING, false); miniHS2 = MiniHiveKdc.getMiniHS2WithKerb(miniHiveKdc, hiveConf); miniHS2.start(new HashMap()); diff --git a/itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/TestSSLWithMiniKdc.java b/itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/TestSSLWithMiniKdc.java index 7ca74efb648b..4ee239a7cbff 100644 --- a/itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/TestSSLWithMiniKdc.java +++ b/itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/TestSSLWithMiniKdc.java @@ -58,7 +58,7 @@ public static void beforeTest() throws Exception { SSLTestUtils.setMetastoreSslConf(hiveConf); hiveConf.setBoolVar(ConfVars.HIVE_SUPPORT_CONCURRENCY, false); - hiveConf.setBoolVar(ConfVars.HIVEFETCHTASKCACHING, false); + hiveConf.setBoolVar(ConfVars.HIVE_FETCH_TASK_CACHING, false); setHMSSaslConf(miniHiveKdc, hiveConf); diff --git a/itests/hive-unit-hadoop2/src/test/java/org/apache/hadoop/hive/ql/security/TestStorageBasedMetastoreAuthorizationProviderWithACL.java b/itests/hive-unit-hadoop2/src/test/java/org/apache/hadoop/hive/ql/security/TestStorageBasedMetastoreAuthorizationProviderWithACL.java index 61d2e920be7b..5f952a356951 100644 --- a/itests/hive-unit-hadoop2/src/test/java/org/apache/hadoop/hive/ql/security/TestStorageBasedMetastoreAuthorizationProviderWithACL.java +++ b/itests/hive-unit-hadoop2/src/test/java/org/apache/hadoop/hive/ql/security/TestStorageBasedMetastoreAuthorizationProviderWithACL.java @@ -26,7 +26,6 @@ import java.lang.reflect.Method; import java.net.URI; -import java.security.PrivilegedExceptionAction; import java.util.Arrays; import java.util.List; @@ -91,14 +90,14 @@ protected HiveConf createHiveConf() throws Exception { warehouseDir = new Path(new Path(fs.getUri()), "/warehouse"); fs.mkdirs(warehouseDir); - conf.setVar(HiveConf.ConfVars.METASTOREWAREHOUSE, warehouseDir.toString()); + conf.setVar(HiveConf.ConfVars.METASTORE_WAREHOUSE, warehouseDir.toString()); extWarehouseDir = new Path(new Path(fs.getUri()), "/external"); fs.mkdirs(extWarehouseDir); conf.setVar(HiveConf.ConfVars.HIVE_METASTORE_WAREHOUSE_EXTERNAL, extWarehouseDir.toString()); // Set up scratch directory Path scratchDir = new Path(new Path(fs.getUri()), "/scratchdir"); - conf.setVar(HiveConf.ConfVars.SCRATCHDIR, scratchDir.toString()); + conf.setVar(HiveConf.ConfVars.SCRATCH_DIR, scratchDir.toString()); return conf; } diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/UtilsForTest.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/UtilsForTest.java index b3dfa961a6c3..c25aa1df5262 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/UtilsForTest.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/UtilsForTest.java @@ -39,7 +39,7 @@ public class UtilsForTest { public static void setNewDerbyDbLocation(HiveConf conf, String newloc) { String newDbLoc = System.getProperty("test.tmp.dir") + newloc + "metastore_db"; - conf.setVar(ConfVars.METASTORECONNECTURLKEY, "jdbc:derby:;databaseName=" + newDbLoc + conf.setVar(ConfVars.METASTORE_CONNECT_URL_KEY, "jdbc:derby:;databaseName=" + newDbLoc + ";create=true"); } diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/hooks/TestHs2Hooks.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/hooks/TestHs2Hooks.java index 7d441b6acc03..add09aec5da2 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/hooks/TestHs2Hooks.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/hooks/TestHs2Hooks.java @@ -19,7 +19,6 @@ //The tests here are heavily based on some timing, so there is some chance to fail. package org.apache.hadoop.hive.hooks; -import java.io.Serializable; import java.lang.Override; import java.sql.Statement; import java.util.List; @@ -140,9 +139,9 @@ public void postAnalyze(HiveSemanticAnalyzerHookContext context, @BeforeClass public static void setUpBeforeClass() throws Exception { HiveConf hiveConf = new HiveConf(); - hiveConf.setVar(ConfVars.PREEXECHOOKS, + hiveConf.setVar(ConfVars.PRE_EXEC_HOOKS, PreExecHook.class.getName()); - hiveConf.setVar(ConfVars.POSTEXECHOOKS, + hiveConf.setVar(ConfVars.POST_EXEC_HOOKS, PostExecHook.class.getName()); hiveConf.setVar(ConfVars.SEMANTIC_ANALYZER_HOOK, SemanticAnalysisHook.class.getName()); diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/AbstractTestAuthorizationApiAuthorizer.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/AbstractTestAuthorizationApiAuthorizer.java index c10060f8171d..a0d5bd2c99b3 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/AbstractTestAuthorizationApiAuthorizer.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/AbstractTestAuthorizationApiAuthorizer.java @@ -59,9 +59,9 @@ protected static void setup() throws Exception { if (isRemoteMetastoreMode) { MetaStoreTestUtils.startMetaStoreWithRetry(hiveConf); } - hiveConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3); - hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); - hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); + hiveConf.setIntVar(HiveConf.ConfVars.METASTORE_THRIFT_CONNECTION_RETRIES, 3); + hiveConf.set(HiveConf.ConfVars.PRE_EXEC_HOOKS.varname, ""); + hiveConf.set(HiveConf.ConfVars.POST_EXEC_HOOKS.varname, ""); hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); msc = new HiveMetaStoreClient(hiveConf); diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreAuthorization.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreAuthorization.java index 9ddad9922652..06ba4dae0c1a 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreAuthorization.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreAuthorization.java @@ -44,7 +44,7 @@ public class TestMetaStoreAuthorization { public void setup() throws Exception { conf.setBoolVar(HiveConf.ConfVars.METASTORE_AUTHORIZATION_STORAGE_AUTH_CHECKS, true); - conf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3); + conf.setIntVar(HiveConf.ConfVars.METASTORE_THRIFT_CONNECTION_RETRIES, 3); conf.setTimeVar(ConfVars.METASTORE_CLIENT_CONNECT_RETRY_DELAY, 60, TimeUnit.SECONDS); } diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreMetrics.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreMetrics.java index 2f7a2601627c..a94d1da8cc1c 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreMetrics.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreMetrics.java @@ -42,7 +42,7 @@ public class TestMetaStoreMetrics { @BeforeClass public static void before() throws Exception { hiveConf = new HiveConf(TestMetaStoreMetrics.class); - hiveConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3); + hiveConf.setIntVar(HiveConf.ConfVars.METASTORE_THRIFT_CONNECTION_RETRIES, 3); hiveConf.setBoolVar(HiveConf.ConfVars.METASTORE_METRICS, true); hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY, false); hiveConf diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreMultipleEncryptionZones.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreMultipleEncryptionZones.java index 4dcfb2266a0c..a901d87c75eb 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreMultipleEncryptionZones.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreMultipleEncryptionZones.java @@ -84,18 +84,18 @@ public static void setUp() throws Exception { DFSTestUtil.createKey("test_key_cm", miniDFSCluster, conf); DFSTestUtil.createKey("test_key_db", miniDFSCluster, conf); hiveConf = new HiveConf(TestReplChangeManager.class); - hiveConf.setBoolean(HiveConf.ConfVars.REPLCMENABLED.varname, true); + hiveConf.setBoolean(HiveConf.ConfVars.REPL_CM_ENABLED.varname, true); hiveConf.setInt(CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY, 60); - hiveConf.set(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, + hiveConf.set(HiveConf.ConfVars.METASTORE_WAREHOUSE.varname, "hdfs://" + miniDFSCluster.getNameNode().getHostAndPort() - + HiveConf.ConfVars.METASTOREWAREHOUSE.defaultStrVal); + + HiveConf.ConfVars.METASTORE_WAREHOUSE.defaultStrVal); cmroot = "hdfs://" + miniDFSCluster.getNameNode().getHostAndPort() + "/cmroot"; cmrootFallBack = "hdfs://" + miniDFSCluster.getNameNode().getHostAndPort() + "/cmrootFallback"; cmrootEncrypted = "cmrootEncrypted"; - hiveConf.set(HiveConf.ConfVars.REPLCMDIR.varname, cmroot); - hiveConf.set(HiveConf.ConfVars.REPLCMENCRYPTEDDIR.varname, cmrootEncrypted); - hiveConf.set(HiveConf.ConfVars.REPLCMFALLBACKNONENCRYPTEDDIR.varname, cmrootFallBack); + hiveConf.set(HiveConf.ConfVars.REPL_CM_DIR.varname, cmroot); + hiveConf.set(HiveConf.ConfVars.REPL_CM_ENCRYPTED_DIR.varname, cmrootEncrypted); + hiveConf.set(HiveConf.ConfVars.REPL_CM_FALLBACK_NONENCRYPTED_DIR.varname, cmrootFallBack); initReplChangeManager(); try { @@ -1253,14 +1253,14 @@ public void recycleFailureWithDifferentEncryptionZonesForCm() throws Throwable { @Test public void testClearerEncrypted() throws Exception { HiveConf hiveConfCmClearer = new HiveConf(TestReplChangeManager.class); - hiveConfCmClearer.setBoolean(HiveConf.ConfVars.REPLCMENABLED.varname, true); + hiveConfCmClearer.setBoolean(HiveConf.ConfVars.REPL_CM_ENABLED.varname, true); hiveConfCmClearer.setInt(CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY, 60); - hiveConfCmClearer.set(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, + hiveConfCmClearer.set(HiveConf.ConfVars.METASTORE_WAREHOUSE.varname, "hdfs://" + miniDFSCluster.getNameNode().getHostAndPort() - + HiveConf.ConfVars.METASTOREWAREHOUSE.defaultStrVal); + + HiveConf.ConfVars.METASTORE_WAREHOUSE.defaultStrVal); String cmrootCmClearer = "hdfs://" + miniDFSCluster.getNameNode().getHostAndPort() + "/cmrootClearer"; - hiveConfCmClearer.set(HiveConf.ConfVars.REPLCMDIR.varname, cmrootCmClearer); + hiveConfCmClearer.set(HiveConf.ConfVars.REPL_CM_DIR.varname, cmrootCmClearer); Warehouse warehouseCmClearer = new Warehouse(hiveConfCmClearer); FileSystem cmfs = new Path(cmrootCmClearer).getFileSystem(hiveConfCmClearer); cmfs.mkdirs(warehouseCmClearer.getWhRoot()); @@ -1359,21 +1359,21 @@ public void testClearerEncrypted() throws Exception { @Test public void testCmRootAclPermissions() throws Exception { HiveConf hiveConfAclPermissions = new HiveConf(TestReplChangeManager.class); - hiveConfAclPermissions.setBoolean(HiveConf.ConfVars.REPLCMENABLED.varname, true); + hiveConfAclPermissions.setBoolean(HiveConf.ConfVars.REPL_CM_ENABLED.varname, true); hiveConfAclPermissions.setInt(CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY, 60); - hiveConfAclPermissions.set(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, + hiveConfAclPermissions.set(HiveConf.ConfVars.METASTORE_WAREHOUSE.varname, "hdfs://" + miniDFSCluster.getNameNode().getHostAndPort() - + HiveConf.ConfVars.METASTOREWAREHOUSE.defaultStrVal); + + HiveConf.ConfVars.METASTORE_WAREHOUSE.defaultStrVal); String cmRootAclPermissions = "hdfs://" + miniDFSCluster.getNameNode().getHostAndPort() + "/cmRootAclPermissions"; - hiveConfAclPermissions.set(HiveConf.ConfVars.REPLCMDIR.varname, cmRootAclPermissions); + hiveConfAclPermissions.set(HiveConf.ConfVars.REPL_CM_DIR.varname, cmRootAclPermissions); Warehouse warehouseCmPermissions = new Warehouse(hiveConfAclPermissions); FileSystem cmfs = new Path(cmRootAclPermissions).getFileSystem(hiveConfAclPermissions); cmfs.mkdirs(warehouseCmPermissions.getWhRoot()); FileSystem fsWarehouse = warehouseCmPermissions.getWhRoot().getFileSystem(hiveConfAclPermissions); //change the group of warehouse for testing - Path warehouse = new Path(hiveConfAclPermissions.get(HiveConf.ConfVars.METASTOREWAREHOUSE.varname)); + Path warehouse = new Path(hiveConfAclPermissions.get(HiveConf.ConfVars.METASTORE_WAREHOUSE.varname)); fsWarehouse.setOwner(warehouse, null, "testgroup"); long now = System.currentTimeMillis(); @@ -1434,7 +1434,7 @@ public void testCmRootAclPermissions() throws Exception { return null; }); - String cmEncrypted = hiveConf.get(HiveConf.ConfVars.REPLCMENCRYPTEDDIR.varname, cmrootEncrypted); + String cmEncrypted = hiveConf.get(HiveConf.ConfVars.REPL_CM_ENCRYPTED_DIR.varname, cmrootEncrypted); AclStatus aclStatus = fsWarehouse.getAclStatus(new Path(dirTbl1 + Path.SEPARATOR + cmEncrypted)); AclStatus aclStatus2 = fsWarehouse.getAclStatus(new Path(dirTbl2 + Path.SEPARATOR + cmEncrypted)); AclStatus aclStatus3 = fsWarehouse.getAclStatus(new Path(dirTbl3 + Path.SEPARATOR + cmEncrypted)); @@ -1501,17 +1501,17 @@ public void testCmRootAclPermissions() throws Exception { @Test public void testCmrootEncrypted() throws Exception { HiveConf encryptedHiveConf = new HiveConf(TestReplChangeManager.class); - encryptedHiveConf.setBoolean(HiveConf.ConfVars.REPLCMENABLED.varname, true); + encryptedHiveConf.setBoolean(HiveConf.ConfVars.REPL_CM_ENABLED.varname, true); encryptedHiveConf.setInt(CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY, 60); - encryptedHiveConf.set(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, + encryptedHiveConf.set(HiveConf.ConfVars.METASTORE_WAREHOUSE.varname, "hdfs://" + miniDFSCluster.getNameNode().getHostAndPort() - + HiveConf.ConfVars.METASTOREWAREHOUSE.defaultStrVal); + + HiveConf.ConfVars.METASTORE_WAREHOUSE.defaultStrVal); String cmrootdirEncrypted = "hdfs://" + miniDFSCluster.getNameNode().getHostAndPort() + "/cmrootDirEncrypted"; - encryptedHiveConf.set(HiveConf.ConfVars.REPLCMDIR.varname, cmrootdirEncrypted); + encryptedHiveConf.set(HiveConf.ConfVars.REPL_CM_DIR.varname, cmrootdirEncrypted); FileSystem cmrootdirEncryptedFs = new Path(cmrootdirEncrypted).getFileSystem(hiveConf); cmrootdirEncryptedFs.mkdirs(new Path(cmrootdirEncrypted)); - encryptedHiveConf.set(HiveConf.ConfVars.REPLCMFALLBACKNONENCRYPTEDDIR.varname, cmrootFallBack); + encryptedHiveConf.set(HiveConf.ConfVars.REPL_CM_FALLBACK_NONENCRYPTED_DIR.varname, cmrootFallBack); //Create cm in encrypted zone EncryptionZoneUtils.createEncryptionZone(new Path(cmrootdirEncrypted), "test_key_db", conf); @@ -1562,11 +1562,11 @@ public void testCmrootEncrypted() throws Exception { @Test public void testCmrootFallbackEncrypted() throws Exception { HiveConf encryptedHiveConf = new HiveConf(TestReplChangeManager.class); - encryptedHiveConf.setBoolean(HiveConf.ConfVars.REPLCMENABLED.varname, true); + encryptedHiveConf.setBoolean(HiveConf.ConfVars.REPL_CM_ENABLED.varname, true); encryptedHiveConf.setInt(CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY, 60); - encryptedHiveConf.set(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, + encryptedHiveConf.set(HiveConf.ConfVars.METASTORE_WAREHOUSE.varname, "hdfs://" + miniDFSCluster.getNameNode().getHostAndPort() - + HiveConf.ConfVars.METASTOREWAREHOUSE.defaultStrVal); + + HiveConf.ConfVars.METASTORE_WAREHOUSE.defaultStrVal); String cmrootdirEncrypted = "hdfs://" + miniDFSCluster.getNameNode().getHostAndPort() + "/cmrootIsEncrypted"; String cmRootFallbackEncrypted = "hdfs://" + miniDFSCluster.getNameNode().getHostAndPort() + "/cmrootFallbackEncrypted"; @@ -1574,8 +1574,8 @@ public void testCmrootFallbackEncrypted() throws Exception { try { cmrootdirEncryptedFs.mkdirs(new Path(cmrootdirEncrypted)); cmrootdirEncryptedFs.mkdirs(new Path(cmRootFallbackEncrypted)); - encryptedHiveConf.set(HiveConf.ConfVars.REPLCMDIR.varname, cmrootdirEncrypted); - encryptedHiveConf.set(HiveConf.ConfVars.REPLCMFALLBACKNONENCRYPTEDDIR.varname, cmRootFallbackEncrypted); + encryptedHiveConf.set(HiveConf.ConfVars.REPL_CM_DIR.varname, cmrootdirEncrypted); + encryptedHiveConf.set(HiveConf.ConfVars.REPL_CM_FALLBACK_NONENCRYPTED_DIR.varname, cmRootFallbackEncrypted); //Create cm in encrypted zone EncryptionZoneUtils.createEncryptionZone(new Path(cmrootdirEncrypted), "test_key_db", conf); @@ -1600,19 +1600,19 @@ public void testCmrootFallbackEncrypted() throws Exception { @Test public void testCmrootFallbackRelative() throws Exception { HiveConf encryptedHiveConf = new HiveConf(TestReplChangeManager.class); - encryptedHiveConf.setBoolean(HiveConf.ConfVars.REPLCMENABLED.varname, true); + encryptedHiveConf.setBoolean(HiveConf.ConfVars.REPL_CM_ENABLED.varname, true); encryptedHiveConf.setInt(CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY, 60); - encryptedHiveConf.set(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, + encryptedHiveConf.set(HiveConf.ConfVars.METASTORE_WAREHOUSE.varname, "hdfs://" + miniDFSCluster.getNameNode().getHostAndPort() - + HiveConf.ConfVars.METASTOREWAREHOUSE.defaultStrVal); + + HiveConf.ConfVars.METASTORE_WAREHOUSE.defaultStrVal); String cmrootdirEncrypted = "hdfs://" + miniDFSCluster.getNameNode().getHostAndPort() + "/cmrootIsEncrypted"; String cmRootFallbackEncrypted = "cmrootFallbackEncrypted"; FileSystem cmrootdirEncryptedFs = new Path(cmrootdirEncrypted).getFileSystem(encryptedHiveConf); try { cmrootdirEncryptedFs.mkdirs(new Path(cmrootdirEncrypted)); cmrootdirEncryptedFs.mkdirs(new Path(cmRootFallbackEncrypted)); - encryptedHiveConf.set(HiveConf.ConfVars.REPLCMDIR.varname, cmrootdirEncrypted); - encryptedHiveConf.set(HiveConf.ConfVars.REPLCMFALLBACKNONENCRYPTEDDIR.varname, cmRootFallbackEncrypted); + encryptedHiveConf.set(HiveConf.ConfVars.REPL_CM_DIR.varname, cmrootdirEncrypted); + encryptedHiveConf.set(HiveConf.ConfVars.REPL_CM_FALLBACK_NONENCRYPTED_DIR.varname, cmRootFallbackEncrypted); //Create cm in encrypted zone EncryptionZoneUtils.createEncryptionZone(new Path(cmrootdirEncrypted), "test_key_db", conf); diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetastoreVersion.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetastoreVersion.java index 981f5fb4c211..69db7180cfff 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetastoreVersion.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetastoreVersion.java @@ -70,7 +70,7 @@ public void setUp() throws Exception { DummyPreListener.class.getName()); testMetastoreDB = System.getProperty("java.io.tmpdir") + File.separator + "test_metastore-" + System.currentTimeMillis(); - System.setProperty(HiveConf.ConfVars.METASTORECONNECTURLKEY.varname, + System.setProperty(HiveConf.ConfVars.METASTORE_CONNECT_URL_KEY.varname, "jdbc:derby:" + testMetastoreDB + ";create=true"); metastoreSchemaInfo = MetaStoreSchemaInfoFactory.get(hiveConf, System.getProperty("test.tmp.dir", "target/tmp"), "derby"); diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestReplChangeManager.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestReplChangeManager.java index ebac38d10944..78304634c4ed 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestReplChangeManager.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestReplChangeManager.java @@ -97,11 +97,11 @@ private static void internalSetUpProvidePerm() throws Exception { configuration.set("dfs.client.use.datanode.hostname", "true"); permDdfs = new MiniDFSCluster.Builder(configuration).numDataNodes(2).format(true).build(); permhiveConf = new HiveConf(TestReplChangeManager.class); - permhiveConf.set(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, - "hdfs://" + permDdfs.getNameNode().getHostAndPort() + HiveConf.ConfVars.METASTOREWAREHOUSE.defaultStrVal); - permhiveConf.setBoolean(HiveConf.ConfVars.REPLCMENABLED.varname, true); + permhiveConf.set(HiveConf.ConfVars.METASTORE_WAREHOUSE.varname, + "hdfs://" + permDdfs.getNameNode().getHostAndPort() + HiveConf.ConfVars.METASTORE_WAREHOUSE.defaultStrVal); + permhiveConf.setBoolean(HiveConf.ConfVars.REPL_CM_ENABLED.varname, true); permCmroot = "hdfs://" + permDdfs.getNameNode().getHostAndPort() + "/cmroot"; - permhiveConf.set(HiveConf.ConfVars.REPLCMDIR.varname, permCmroot); + permhiveConf.set(HiveConf.ConfVars.REPL_CM_DIR.varname, permCmroot); permhiveConf.setInt(CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY, 60); permWarehouse = new Warehouse(permhiveConf); } @@ -109,11 +109,11 @@ private static void internalSetUpProvidePerm() throws Exception { private static void internalSetUp() throws Exception { m_dfs = new MiniDFSCluster.Builder(new Configuration()).numDataNodes(2).format(true).build(); hiveConf = new HiveConf(TestReplChangeManager.class); - hiveConf.set(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, - "hdfs://" + m_dfs.getNameNode().getHostAndPort() + HiveConf.ConfVars.METASTOREWAREHOUSE.defaultStrVal); - hiveConf.setBoolean(HiveConf.ConfVars.REPLCMENABLED.varname, true); + hiveConf.set(HiveConf.ConfVars.METASTORE_WAREHOUSE.varname, + "hdfs://" + m_dfs.getNameNode().getHostAndPort() + HiveConf.ConfVars.METASTORE_WAREHOUSE.defaultStrVal); + hiveConf.setBoolean(HiveConf.ConfVars.REPL_CM_ENABLED.varname, true); cmroot = "hdfs://" + m_dfs.getNameNode().getHostAndPort() + "/cmroot"; - hiveConf.set(HiveConf.ConfVars.REPLCMDIR.varname, cmroot); + hiveConf.set(HiveConf.ConfVars.REPL_CM_DIR.varname, cmroot); hiveConf.setInt(CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY, 60); warehouse = new Warehouse(hiveConf); fs = new Path(cmroot).getFileSystem(hiveConf); diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestServerSpecificConfig.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestServerSpecificConfig.java index 17542f177c1e..e2fbf0450270 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestServerSpecificConfig.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestServerSpecificConfig.java @@ -181,7 +181,7 @@ private void setHiveSiteWithRemoteMetastore() throws IOException { FileOutputStream out = new FileOutputStream(hiveSite); HiveConf.setHiveSiteLocation(oldDefaultHiveSite); HiveConf defaultHiveConf = new HiveConf(); - defaultHiveConf.setVar(ConfVars.METASTOREURIS, "dummyvalue"); + defaultHiveConf.setVar(ConfVars.METASTORE_URIS, "dummyvalue"); // reset to the hive-site.xml values for following param defaultHiveConf.set("hive.dummyparam.test.server.specific.config.override", "from.hive-site.xml"); diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/tools/metatool/TestHiveMetaTool.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/tools/metatool/TestHiveMetaTool.java index af4f4bb36196..2f3e83d9e153 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/tools/metatool/TestHiveMetaTool.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/tools/metatool/TestHiveMetaTool.java @@ -61,8 +61,7 @@ import org.junit.Assert; import static org.junit.Assert.assertTrue; import static org.junit.Assert.assertEquals; -import com.google.gson.JsonParser; -import org.json.JSONObject; + import org.junit.Before; import org.junit.After; import org.junit.Test; @@ -110,10 +109,10 @@ public void setUp() throws Exception { + File.separator + "mapred" + File.separator + "staging"); hiveConf.set("mapred.temp.dir", workDir + File.separator + this.getClass().getSimpleName() + File.separator + "mapred" + File.separator + "temp"); - hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); - hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); - hiveConf.set(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, getWarehouseDir()); - hiveConf.setVar(HiveConf.ConfVars.HIVEINPUTFORMAT, HiveInputFormat.class.getName()); + hiveConf.set(HiveConf.ConfVars.PRE_EXEC_HOOKS.varname, ""); + hiveConf.set(HiveConf.ConfVars.POST_EXEC_HOOKS.varname, ""); + hiveConf.set(HiveConf.ConfVars.METASTORE_WAREHOUSE.varname, getWarehouseDir()); + hiveConf.setVar(HiveConf.ConfVars.HIVE_INPUT_FORMAT, HiveInputFormat.class.getName()); hiveConf.setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER, "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory"); hiveConf.setBoolVar(HiveConf.ConfVars.MERGE_CARDINALITY_VIOLATION_CHECK, true); diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestAcidOnTez.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestAcidOnTez.java index 36ba35f2aea0..245fc156512c 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestAcidOnTez.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestAcidOnTez.java @@ -113,12 +113,12 @@ public String toString() { @Before public void setUp() throws Exception { hiveConf = new HiveConf(this.getClass()); - hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); - hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); - hiveConf.set(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, TEST_WAREHOUSE_DIR); + hiveConf.set(HiveConf.ConfVars.PRE_EXEC_HOOKS.varname, ""); + hiveConf.set(HiveConf.ConfVars.POST_EXEC_HOOKS.varname, ""); + hiveConf.set(HiveConf.ConfVars.METASTORE_WAREHOUSE.varname, TEST_WAREHOUSE_DIR); hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_VECTORIZATION_ENABLED, false); - hiveConf.setVar(HiveConf.ConfVars.HIVEMAPREDMODE, "nonstrict"); - hiveConf.setVar(HiveConf.ConfVars.HIVEINPUTFORMAT, HiveInputFormat.class.getName()); + hiveConf.setVar(HiveConf.ConfVars.HIVE_MAPRED_MODE, "nonstrict"); + hiveConf.setVar(HiveConf.ConfVars.HIVE_INPUT_FORMAT, HiveInputFormat.class.getName()); hiveConf.setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER, "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory"); MetastoreConf.setBoolVar(hiveConf, MetastoreConf.ConfVars.COMPACTOR_INITIATOR_ON, true); @@ -344,7 +344,7 @@ public void testNonStandardConversion01() throws Exception { * data files in directly. * * Actually Insert Into ... select ... union all ... with - * HIVE_OPTIMIZE_UNION_REMOVE (and HIVEFETCHTASKCONVERSION="none"?) will create subdirs + * HIVE_OPTIMIZE_UNION_REMOVE (and HIVE_FETCH_TASK_CONVERSION="none"?) will create subdirs * but if writing to non acid table there is a merge task on MR (but not on Tez) */ @Ignore("HIVE-17214")//this consistently works locally but never in ptest.... @@ -757,7 +757,7 @@ public void testGetSplitsLocks() throws Exception { HiveConf modConf = new HiveConf(hiveConf); setupTez(modConf); modConf.setVar(ConfVars.HIVE_EXECUTION_ENGINE, "tez"); - modConf.setVar(ConfVars.HIVEFETCHTASKCONVERSION, "more"); + modConf.setVar(ConfVars.HIVE_FETCH_TASK_CONVERSION, "more"); modConf.setVar(HiveConf.ConfVars.LLAP_DAEMON_SERVICE_HOSTS, "localhost"); // SessionState/Driver needs to be restarted with the Tez conf settings. @@ -818,7 +818,7 @@ public void testGetSplitsLocksWithMaterializedView() throws Exception { HiveConf modConf = new HiveConf(hiveConf); setupTez(modConf); modConf.setVar(ConfVars.HIVE_EXECUTION_ENGINE, "tez"); - modConf.setVar(ConfVars.HIVEFETCHTASKCONVERSION, "more"); + modConf.setVar(ConfVars.HIVE_FETCH_TASK_CONVERSION, "more"); modConf.setVar(HiveConf.ConfVars.LLAP_DAEMON_SERVICE_HOSTS, "localhost"); // SessionState/Driver needs to be restarted with the Tez conf settings. @@ -868,7 +868,7 @@ public void testCrudMajorCompactionSplitGrouper() throws Exception { // make a clone of existing hive conf HiveConf confForTez = new HiveConf(hiveConf); setupTez(confForTez); // one-time setup to make query able to run with Tez - HiveConf.setVar(confForTez, HiveConf.ConfVars.HIVEFETCHTASKCONVERSION, "none"); + HiveConf.setVar(confForTez, HiveConf.ConfVars.HIVE_FETCH_TASK_CONVERSION, "none"); runStatementOnDriver("create transactional table " + tblName + " (a int, b int) clustered by (a) into 2 buckets " + "stored as ORC TBLPROPERTIES('bucketing_version'='2', 'transactional'='true'," + " 'transactional_properties'='default')", confForTez); @@ -1020,9 +1020,9 @@ public static void setupTez(HiveConf conf) { } private void setupMapJoin(HiveConf conf) { - conf.setBoolVar(HiveConf.ConfVars.HIVECONVERTJOIN, true); - conf.setBoolVar(HiveConf.ConfVars.HIVECONVERTJOINNOCONDITIONALTASK, true); - conf.setLongVar(HiveConf.ConfVars.HIVECONVERTJOINNOCONDITIONALTASKTHRESHOLD, 100000); + conf.setBoolVar(HiveConf.ConfVars.HIVE_CONVERT_JOIN, true); + conf.setBoolVar(HiveConf.ConfVars.HIVE_CONVERT_JOIN_NOCONDITIONALTASK, true); + conf.setLongVar(HiveConf.ConfVars.HIVE_CONVERT_JOIN_NOCONDITIONAL_TASK_THRESHOLD, 100000); } private List runStatementOnDriver(String stmt) throws Exception { diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestConstraintsMerge.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestConstraintsMerge.java index 12b626d09d3d..1e1a8d74ae1a 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestConstraintsMerge.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestConstraintsMerge.java @@ -28,9 +28,7 @@ import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.tez.mapreduce.hadoop.MRJobConfig; import org.junit.After; -import org.junit.Assert; import org.junit.Before; -import org.junit.Ignore; import org.junit.Rule; import org.junit.Test; import org.junit.rules.TestName; @@ -73,12 +71,12 @@ public String toString() { @Before public void setUp() throws Exception { hiveConf = new HiveConf(this.getClass()); - hiveConf.set(ConfVars.PREEXECHOOKS.varname, ""); - hiveConf.set(ConfVars.POSTEXECHOOKS.varname, ""); - hiveConf.set(ConfVars.METASTOREWAREHOUSE.varname, TEST_WAREHOUSE_DIR); + hiveConf.set(ConfVars.PRE_EXEC_HOOKS.varname, ""); + hiveConf.set(ConfVars.POST_EXEC_HOOKS.varname, ""); + hiveConf.set(ConfVars.METASTORE_WAREHOUSE.varname, TEST_WAREHOUSE_DIR); hiveConf.setBoolVar(ConfVars.HIVE_VECTORIZATION_ENABLED, false); - hiveConf.setVar(ConfVars.HIVEMAPREDMODE, "nonstrict"); - hiveConf.setVar(ConfVars.HIVEINPUTFORMAT, HiveInputFormat.class.getName()); + hiveConf.setVar(ConfVars.HIVE_MAPRED_MODE, "nonstrict"); + hiveConf.setVar(ConfVars.HIVE_INPUT_FORMAT, HiveInputFormat.class.getName()); hiveConf .setVar(ConfVars.HIVE_AUTHORIZATION_MANAGER, "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory"); diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestDDLWithRemoteMetastoreSecondNamenode.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestDDLWithRemoteMetastoreSecondNamenode.java index 7e54dde6f926..b22a3c0f3fde 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestDDLWithRemoteMetastoreSecondNamenode.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestDDLWithRemoteMetastoreSecondNamenode.java @@ -83,10 +83,10 @@ public void setUp() throws Exception { // Test with remote metastore service int port = MetaStoreTestUtils.startMetaStoreWithRetry(); - conf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + port); - conf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3); + conf.setVar(HiveConf.ConfVars.METASTORE_URIS, "thrift://localhost:" + port); + conf.setIntVar(HiveConf.ConfVars.METASTORE_THRIFT_CONNECTION_RETRIES, 3); conf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); - conf.setVar(HiveConf.ConfVars.METASTOREWAREHOUSE, new URI(tmppath + "/warehouse").getPath()); + conf.setVar(HiveConf.ConfVars.METASTORE_WAREHOUSE, new URI(tmppath + "/warehouse").getPath()); // Initialize second mocked filesystem (implement only necessary stuff) // Physical files are resides in local file system in the similar location diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestDatabaseTableDefault.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestDatabaseTableDefault.java index 041be2d063f7..f5cbd1636962 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestDatabaseTableDefault.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestDatabaseTableDefault.java @@ -23,7 +23,6 @@ import java.io.File; import java.util.ArrayList; import java.util.List; -import java.util.Set; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.common.FileUtils; @@ -36,9 +35,7 @@ import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hadoop.hive.metastore.api.Table; import org.junit.After; -import org.junit.Assert; import org.junit.Before; -import org.junit.Rule; import org.junit.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -93,7 +90,7 @@ public void setUp() throws Exception { HiveConf.setBoolVar(hiveConf, HiveConf.ConfVars.CREATE_TABLES_AS_ACID, true); HiveConf.setBoolVar(hiveConf, HiveConf.ConfVars.HIVE_CREATE_TABLES_AS_INSERT_ONLY, true); HiveConf.setBoolVar(hiveConf, HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY, true); - hiveConf.set(HiveConf.ConfVars.HIVEDEFAULTMANAGEDFILEFORMAT.varname, "ORC"); + hiveConf.set(HiveConf.ConfVars.HIVE_DEFAULT_MANAGED_FILEFORMAT.varname, "ORC"); hiveConf.setVar(HiveConf.ConfVars.HIVE_TXN_MANAGER, DbTxnManager.class.getName()); hiveConf.set(HiveConf.ConfVars.METASTORE_CLIENT_CAPABILITIES.varname, "HIVEFULLACIDREAD,HIVEFULLACIDWRITE,HIVECACHEINVALIDATE,HIVEMANAGESTATS,HIVEMANAGEDINSERTWRITE,HIVEMANAGEDINSERTREAD"); TestTxnDbUtil.setConfValues(hiveConf); diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestMetaStoreLimitPartitionRequest.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestMetaStoreLimitPartitionRequest.java index 05a104f53ea5..470b5ccdfd0e 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestMetaStoreLimitPartitionRequest.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestMetaStoreLimitPartitionRequest.java @@ -33,7 +33,6 @@ import java.util.Set; import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.metastore.HMSHandler; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hive.jdbc.miniHS2.MiniHS2; @@ -72,7 +71,7 @@ public static void beforeTest() throws Exception { conf.setIntVar(HiveConf.ConfVars.METASTORE_LIMIT_PARTITION_REQUEST, PARTITION_REQUEST_LIMIT); conf.setBoolVar(HiveConf.ConfVars.METASTORE_INTEGER_JDO_PUSHDOWN, true); conf.setBoolVar(HiveConf.ConfVars.METASTORE_TRY_DIRECT_SQL, true); - conf.setBoolVar(HiveConf.ConfVars.DYNAMICPARTITIONING, true); + conf.setBoolVar(HiveConf.ConfVars.DYNAMIC_PARTITIONING, true); conf.setBoolVar(HiveConf.ConfVars.HIVE_CBO_ENABLED, false); miniHS2 = new MiniHS2.Builder().withConf(conf).build(); diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/history/TestHiveHistory.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/history/TestHiveHistory.java index 127de2301b16..6ae8239c667b 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/history/TestHiveHistory.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/history/TestHiveHistory.java @@ -188,7 +188,7 @@ public void testQueryloglocParentDirNotExist() throws Exception { try { String actualDir = parentTmpDir + "/test"; HiveConf conf = new HiveConf(SessionState.class); - conf.set(HiveConf.ConfVars.HIVEHISTORYFILELOC.toString(), actualDir); + conf.set(HiveConf.ConfVars.HIVE_HISTORY_FILE_LOC.toString(), actualDir); SessionState ss = new CliSessionState(conf); HiveHistory hiveHistory = new HiveHistoryImpl(ss); Path actualPath = new Path(actualDir); diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplWithReadOnlyHook.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplWithReadOnlyHook.java index 379c53bb2f7d..f81f99d2c8ea 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplWithReadOnlyHook.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplWithReadOnlyHook.java @@ -22,7 +22,6 @@ import static org.apache.hadoop.hive.common.repl.ReplConst.READ_ONLY_HOOK; import static org.junit.Assert.assertEquals; -import com.google.common.collect.ImmutableMap; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; @@ -66,7 +65,7 @@ public static void classLevelSetup() throws Exception { acidEnableConf.put(HiveConf.ConfVars.REPL_RUN_DATA_COPY_TASKS_ON_TARGET.varname, "false"); acidEnableConf.put(HiveConf.ConfVars.REPL_RETAIN_CUSTOM_LOCATIONS_FOR_DB_ON_TARGET.varname, "false"); - acidEnableConf.put(HiveConf.ConfVars.PREEXECHOOKS.varname, READ_ONLY_HOOK); + acidEnableConf.put(HiveConf.ConfVars.PRE_EXEC_HOOKS.varname, READ_ONLY_HOOK); acidEnableConf.putAll(overrides); diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationOnHDFSEncryptedZones.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationOnHDFSEncryptedZones.java index 731eb9c6bd73..92879d5ebba3 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationOnHDFSEncryptedZones.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationOnHDFSEncryptedZones.java @@ -25,7 +25,6 @@ import org.apache.hadoop.hive.shims.Utils; import org.apache.hadoop.security.UserGroupInformation; import org.junit.AfterClass; -import org.junit.Assert; import org.junit.Before; import org.junit.BeforeClass; import org.junit.Rule; @@ -119,7 +118,7 @@ public void targetAndSourceHaveDifferentEncryptionZoneKeys() throws Throwable { put(HiveConf.ConfVars.HIVE_SERVER2_ENABLE_DOAS.varname, "false"); put(HiveConf.ConfVars.HIVE_DISTCP_DOAS_USER.varname, UserGroupInformation.getCurrentUser().getUserName()); - put(HiveConf.ConfVars.REPLDIR.varname, primary.repldDir); + put(HiveConf.ConfVars.REPL_DIR.varname, primary.repldDir); }}, "test_key123"); //read should pass without raw-byte distcp @@ -162,7 +161,7 @@ public void targetAndSourceHaveSameEncryptionZoneKeys() throws Throwable { put(HiveConf.ConfVars.HIVE_SERVER2_ENABLE_DOAS.varname, "false"); put(HiveConf.ConfVars.HIVE_DISTCP_DOAS_USER.varname, UserGroupInformation.getCurrentUser().getUserName()); - put(HiveConf.ConfVars.REPLDIR.varname, primary.repldDir); + put(HiveConf.ConfVars.REPL_DIR.varname, primary.repldDir); }}, "test_key"); List dumpWithClause = Arrays.asList( diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationOptimisedBootstrap.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationOptimisedBootstrap.java index 91f8f76e1ff5..0d3178e8619b 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationOptimisedBootstrap.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationOptimisedBootstrap.java @@ -42,7 +42,6 @@ import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.metadata.HiveUtils; import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.hive.ql.parse.repl.metric.event.Status; import org.apache.hadoop.hive.ql.parse.repl.metric.event.ReplicationMetric; import org.apache.hadoop.hive.ql.parse.repl.metric.event.Stage; import org.apache.hadoop.hive.ql.parse.repl.metric.event.Metric; @@ -54,7 +53,6 @@ import org.junit.BeforeClass; import org.junit.Test; -import javax.annotation.Nullable; import java.io.File; import java.util.ArrayList; import java.util.Arrays; @@ -68,11 +66,6 @@ import static org.apache.hadoop.hive.common.repl.ReplConst.REPL_ENABLE_BACKGROUND_THREAD; import static org.apache.hadoop.hive.common.repl.ReplConst.REPL_TARGET_DB_PROPERTY; import static org.apache.hadoop.hive.common.repl.ReplConst.REPL_FAILOVER_ENDPOINT; -import static org.apache.hadoop.hive.common.repl.ReplConst.REPL_METRICS_FAILBACK_COUNT; -import static org.apache.hadoop.hive.common.repl.ReplConst.REPL_METRICS_FAILOVER_COUNT; -import static org.apache.hadoop.hive.common.repl.ReplConst.REPL_METRICS_LAST_FAILBACK_ENDTIME; -import static org.apache.hadoop.hive.common.repl.ReplConst.REPL_METRICS_LAST_FAILBACK_STARTTIME; -import static org.apache.hadoop.hive.common.repl.ReplConst.REPL_METRICS_LAST_FAILOVER_TYPE; import static org.apache.hadoop.hive.common.repl.ReplConst.TARGET_OF_REPLICATION; import static org.apache.hadoop.hive.metastore.ReplChangeManager.SOURCE_OF_REPLICATION; import static org.apache.hadoop.hive.ql.exec.repl.OptimisedBootstrapUtils.EVENT_ACK_FILE; @@ -137,7 +130,7 @@ public void tearDown() throws Throwable { @Test public void testBuildTableDiffGeneration() throws Throwable { List withClause = ReplicationTestUtils.includeExternalTableClause(true); - withClause.add("'" + HiveConf.ConfVars.REPLDIR.varname + "'='" + primary.repldDir + "'"); + withClause.add("'" + HiveConf.ConfVars.REPL_DIR.varname + "'='" + primary.repldDir + "'"); // Create two external & two managed tables and do a bootstrap dump & load. WarehouseInstance.Tuple tuple = primary.run("use " + primaryDbName) .run("create external table t1 (id int)") @@ -202,7 +195,7 @@ public void testBuildTableDiffGeneration() throws Throwable { Path newReplDir = new Path(replica.repldDir + "1"); replicaFs.mkdirs(newReplDir); withClause = ReplicationTestUtils.includeExternalTableClause(true); - withClause.add("'" + HiveConf.ConfVars.REPLDIR.varname + "'='" + newReplDir + "'"); + withClause.add("'" + HiveConf.ConfVars.REPL_DIR.varname + "'='" + newReplDir + "'"); // Do a reverse dump tuple = replica.dump(replicatedDbName, withClause); @@ -263,7 +256,7 @@ public void testEmptyDiffForControlFailover() throws Throwable { // In case of control failover both A & B will be in sync, so the table diff should be created empty, without any // error. List withClause = ReplicationTestUtils.includeExternalTableClause(true); - withClause.add("'" + HiveConf.ConfVars.REPLDIR.varname + "'='" + primary.repldDir + "'"); + withClause.add("'" + HiveConf.ConfVars.REPL_DIR.varname + "'='" + primary.repldDir + "'"); // Do a bootstrap cycle(A->B) primary.dump(primaryDbName, withClause); @@ -293,7 +286,7 @@ public void testEmptyDiffForControlFailover() throws Throwable { Path newReplDir = new Path(replica.repldDir + "rev"); replicaFs.mkdirs(newReplDir); withClause = ReplicationTestUtils.includeExternalTableClause(true); - withClause.add("'" + HiveConf.ConfVars.REPLDIR.varname + "'='" + newReplDir + "'"); + withClause.add("'" + HiveConf.ConfVars.REPL_DIR.varname + "'='" + newReplDir + "'"); // Do a reverse dump tuple = replica.dump(replicatedDbName, withClause); @@ -317,7 +310,7 @@ public void testEmptyDiffForControlFailover() throws Throwable { @Test public void testFirstIncrementalMandatory() throws Throwable { List withClause = ReplicationTestUtils.includeExternalTableClause(true); - withClause.add("'" + HiveConf.ConfVars.REPLDIR.varname + "'='" + primary.repldDir + "'"); + withClause.add("'" + HiveConf.ConfVars.REPL_DIR.varname + "'='" + primary.repldDir + "'"); // Create one external and one managed tables and do a bootstrap dump. WarehouseInstance.Tuple tuple = primary.run("use " + primaryDbName) .run("create external table t1 (id int)") @@ -343,7 +336,7 @@ public void testFirstIncrementalMandatory() throws Throwable { Path newReplDir = new Path(replica.repldDir + "1"); replicaFs.mkdirs(newReplDir); withClause = ReplicationTestUtils.includeExternalTableClause(true); - withClause.add("'" + HiveConf.ConfVars.REPLDIR.varname + "'='" + newReplDir + "'"); + withClause.add("'" + HiveConf.ConfVars.REPL_DIR.varname + "'='" + newReplDir + "'"); // Do a dump on cluster B, it should throw an exception, since the first incremental isn't done yet. try { @@ -355,13 +348,13 @@ public void testFirstIncrementalMandatory() throws Throwable { // Do a incremental cycle and check we don't get this exception. withClause = ReplicationTestUtils.includeExternalTableClause(true); - withClause.add("'" + HiveConf.ConfVars.REPLDIR.varname + "'='" + primary.repldDir + "'"); + withClause.add("'" + HiveConf.ConfVars.REPL_DIR.varname + "'='" + primary.repldDir + "'"); primary.dump(primaryDbName, withClause); replica.load(replicatedDbName, primaryDbName, withClause); // Retrigger reverse dump, this time it should be successful and event ack should get created. withClause = ReplicationTestUtils.includeExternalTableClause(true); - withClause.add("'" + HiveConf.ConfVars.REPLDIR.varname + "'='" + newReplDir + "'"); + withClause.add("'" + HiveConf.ConfVars.REPL_DIR.varname + "'='" + newReplDir + "'"); tuple = replica.dump(replicatedDbName, withClause); @@ -373,7 +366,7 @@ public void testFirstIncrementalMandatory() throws Throwable { @Test public void testFailureCasesInTableDiffGeneration() throws Throwable { List withClause = ReplicationTestUtils.includeExternalTableClause(true); - withClause.add("'" + HiveConf.ConfVars.REPLDIR.varname + "'='" + primary.repldDir + "'"); + withClause.add("'" + HiveConf.ConfVars.REPL_DIR.varname + "'='" + primary.repldDir + "'"); // Do a bootstrap cycle(A->B) primary.dump(primaryDbName, withClause); @@ -418,7 +411,7 @@ public void testFailureCasesInTableDiffGeneration() throws Throwable { Path newReplDir = new Path(replica.repldDir + "reverse"); replicaFs.mkdirs(newReplDir); withClause = ReplicationTestUtils.includeExternalTableClause(true); - withClause.add("'" + HiveConf.ConfVars.REPLDIR.varname + "'='" + newReplDir + "'"); + withClause.add("'" + HiveConf.ConfVars.REPL_DIR.varname + "'='" + newReplDir + "'"); // Trigger dump on target cluster. @@ -505,7 +498,7 @@ public void testFailureCasesInTableDiffGeneration() throws Throwable { @Test public void testReverseReplicationFailureWhenSourceDbIsDropped() throws Throwable { List withClause = ReplicationTestUtils.includeExternalTableClause(true); - withClause.add("'" + HiveConf.ConfVars.REPLDIR.varname + "'='" + primary.repldDir + "'"); + withClause.add("'" + HiveConf.ConfVars.REPL_DIR.varname + "'='" + primary.repldDir + "'"); // Do a bootstrap cycle. primary.dump(primaryDbName, withClause); @@ -542,7 +535,7 @@ public void testReverseReplicationFailureWhenSourceDbIsDropped() throws Throwabl Path newReplDir = new Path(replica.repldDir + "1"); replicaFs.mkdirs(newReplDir); withClause = ReplicationTestUtils.includeExternalTableClause(true); - withClause.add("'" + HiveConf.ConfVars.REPLDIR.varname + "'='" + newReplDir + "'"); + withClause.add("'" + HiveConf.ConfVars.REPL_DIR.varname + "'='" + newReplDir + "'"); // Do a reverse dump, this should create event_ack file tuple = replica.dump(replicatedDbName, withClause); @@ -800,7 +793,7 @@ public void testReverseBootstrapWithFailedIncremental() throws Throwable { @Test public void testOverwriteDuringBootstrap() throws Throwable { List withClause = ReplicationTestUtils.includeExternalTableClause(true); - withClause.add("'" + HiveConf.ConfVars.REPLDIR.varname + "'='" + primary.repldDir + "'"); + withClause.add("'" + HiveConf.ConfVars.REPL_DIR.varname + "'='" + primary.repldDir + "'"); // Do a bootstrap cycle. primary.dump(primaryDbName, withClause); @@ -862,7 +855,7 @@ public void testOverwriteDuringBootstrap() throws Throwable { Path newReplDir = new Path(replica.repldDir + "1"); replicaFs.mkdirs(newReplDir); withClause = ReplicationTestUtils.includeExternalTableClause(true); - withClause.add("'" + HiveConf.ConfVars.REPLDIR.varname + "'='" + newReplDir + "'"); + withClause.add("'" + HiveConf.ConfVars.REPL_DIR.varname + "'='" + newReplDir + "'"); // Do a reverse dump tuple = replica.dump(replicatedDbName, withClause); @@ -928,7 +921,7 @@ public void testOverwriteDuringBootstrap() throws Throwable { @Test public void testTblMetricRegisterDuringSecondCycleOfOptimizedBootstrap() throws Throwable { List withClause = ReplicationTestUtils.includeExternalTableClause(false); - withClause.add("'" + HiveConf.ConfVars.REPLDIR.varname + "'='" + primary.repldDir + "'"); + withClause.add("'" + HiveConf.ConfVars.REPL_DIR.varname + "'='" + primary.repldDir + "'"); WarehouseInstance.Tuple tuple = primary.run("use " + primaryDbName) .run("create table t1_managed (id int) clustered by(id) into 3 buckets stored as orc " + "tblproperties (\"transactional\"=\"true\")") @@ -968,7 +961,7 @@ public void testTblMetricRegisterDuringSecondCycleOfOptimizedBootstrap() throws Path newReplDir = new Path(replica.repldDir + "1"); replicaFs.mkdirs(newReplDir); withClause = ReplicationTestUtils.includeExternalTableClause(false); - withClause.add("'" + HiveConf.ConfVars.REPLDIR.varname + "'='" + newReplDir + "'"); + withClause.add("'" + HiveConf.ConfVars.REPL_DIR.varname + "'='" + newReplDir + "'"); // Do a reverse dump @@ -1004,7 +997,7 @@ public void testTblMetricRegisterDuringSecondCycleOfOptimizedBootstrap() throws @Test public void testTblMetricRegisterDuringSecondLoadCycleOfOptimizedBootstrap() throws Throwable { List withClause = ReplicationTestUtils.includeExternalTableClause(false); - withClause.add("'" + HiveConf.ConfVars.REPLDIR.varname + "'='" + primary.repldDir + "'"); + withClause.add("'" + HiveConf.ConfVars.REPL_DIR.varname + "'='" + primary.repldDir + "'"); WarehouseInstance.Tuple tuple = primary.run("use " + primaryDbName) .run("create table t1_managed (id int) clustered by(id) into 3 buckets stored as orc " + "tblproperties (\"transactional\"=\"true\")") @@ -1044,7 +1037,7 @@ public void testTblMetricRegisterDuringSecondLoadCycleOfOptimizedBootstrap() thr Path newReplDir = new Path(replica.repldDir + "1"); replicaFs.mkdirs(newReplDir); withClause = ReplicationTestUtils.includeExternalTableClause(false); - withClause.add("'" + HiveConf.ConfVars.REPLDIR.varname + "'='" + newReplDir + "'"); + withClause.add("'" + HiveConf.ConfVars.REPL_DIR.varname + "'='" + newReplDir + "'"); // Do a reverse dump @@ -1085,7 +1078,7 @@ public void testTblMetricRegisterDuringSecondLoadCycleOfOptimizedBootstrap() thr @NotNull private List setUpFirstIterForOptimisedBootstrap() throws Throwable { List withClause = ReplicationTestUtils.includeExternalTableClause(true); - withClause.add("'" + HiveConf.ConfVars.REPLDIR.varname + "'='" + primary.repldDir + "'"); + withClause.add("'" + HiveConf.ConfVars.REPL_DIR.varname + "'='" + primary.repldDir + "'"); // Do a bootstrap cycle. primary.dump(primaryDbName, withClause); @@ -1210,7 +1203,7 @@ private List setUpFirstIterForOptimisedBootstrap() throws Throwable { Path newReplDir = new Path(replica.repldDir + "1"); replicaFs.mkdirs(newReplDir); withClause = ReplicationTestUtils.includeExternalTableClause(true); - withClause.add("'" + HiveConf.ConfVars.REPLDIR.varname + "'='" + newReplDir + "'"); + withClause.add("'" + HiveConf.ConfVars.REPL_DIR.varname + "'='" + newReplDir + "'"); // Do a reverse dump tuple = replica.dump(replicatedDbName, withClause); diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java index 9345d34bc096..60788ad75ca4 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java @@ -29,7 +29,6 @@ import org.apache.hadoop.hive.common.repl.ReplScope; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.exec.repl.ReplAck; -import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.metadata.StringAppender; import org.apache.hadoop.hive.ql.parse.repl.metric.MetricCollector; import org.apache.hadoop.hive.ql.parse.repl.metric.event.Metadata; @@ -203,16 +202,16 @@ static void internalBeforeClassSetup(Map additionalProperties) MetastoreConf.setBoolVar(hconf, MetastoreConf.ConfVars.EVENT_DB_NOTIFICATION_API_AUTH, false); hconf.set(MetastoreConf.ConfVars.TRANSACTIONAL_EVENT_LISTENERS.getHiveName(), DBNOTIF_LISTENER_CLASSNAME); // turn on db notification listener on metastore - hconf.setBoolVar(HiveConf.ConfVars.REPLCMENABLED, true); + hconf.setBoolVar(HiveConf.ConfVars.REPL_CM_ENABLED, true); hconf.setBoolVar(HiveConf.ConfVars.FIRE_EVENTS_FOR_DML, true); - hconf.setVar(HiveConf.ConfVars.REPLCMDIR, TEST_PATH + "/cmroot/"); + hconf.setVar(HiveConf.ConfVars.REPL_CM_DIR, TEST_PATH + "/cmroot/"); proxySettingName = "hadoop.proxyuser." + Utils.getUGI().getShortUserName() + ".hosts"; hconf.set(proxySettingName, "*"); MetastoreConf.setBoolVar(hconf, MetastoreConf.ConfVars.EVENT_DB_NOTIFICATION_API_AUTH, false); - hconf.setVar(HiveConf.ConfVars.REPLDIR,TEST_PATH + "/hrepl/"); + hconf.setVar(HiveConf.ConfVars.REPL_DIR,TEST_PATH + "/hrepl/"); hconf.set(MetastoreConf.ConfVars.THRIFT_CONNECTION_RETRIES.getHiveName(), "3"); - hconf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); - hconf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); + hconf.set(HiveConf.ConfVars.PRE_EXEC_HOOKS.varname, ""); + hconf.set(HiveConf.ConfVars.POST_EXEC_HOOKS.varname, ""); hconf.set(HiveConf.ConfVars.HIVE_IN_TEST_REPL.varname, "true"); hconf.setBoolVar(HiveConf.ConfVars.HIVE_IN_TEST, true); hconf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); @@ -221,13 +220,13 @@ static void internalBeforeClassSetup(Map additionalProperties) hconf.set(HiveConf.ConfVars.METASTORE_RAW_STORE_IMPL.varname, "org.apache.hadoop.hive.metastore.InjectableBehaviourObjectStore"); hconf.set(HiveConf.ConfVars.HIVE_METASTORE_WAREHOUSE_EXTERNAL.varname, "/tmp/warehouse/external"); - hconf.setBoolVar(HiveConf.ConfVars.HIVEOPTIMIZEMETADATAQUERIES, true); + hconf.setBoolVar(HiveConf.ConfVars.HIVE_OPTIMIZE_METADATA_QUERIES, true); hconf.setBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER, true); hconf.setBoolVar(HiveConf.ConfVars.HIVE_STATS_RELIABLE, true); hconf.setBoolVar(HiveConf.ConfVars.REPL_RUN_DATA_COPY_TASKS_ON_TARGET, false); hconf.setBoolVar(HiveConf.ConfVars.REPL_BATCH_INCREMENTAL_EVENTS, false); - System.setProperty(HiveConf.ConfVars.PREEXECHOOKS.varname, " "); - System.setProperty(HiveConf.ConfVars.POSTEXECHOOKS.varname, " "); + System.setProperty(HiveConf.ConfVars.PRE_EXEC_HOOKS.varname, " "); + System.setProperty(HiveConf.ConfVars.POST_EXEC_HOOKS.varname, " "); additionalProperties.forEach((key, value) -> { hconf.set(key, value); @@ -247,7 +246,7 @@ static void internalBeforeClassSetup(Map additionalProperties) FileUtils.deleteDirectory(new File("metastore_db2")); HiveConf hconfMirrorServer = new HiveConf(); - hconfMirrorServer.set(HiveConf.ConfVars.METASTORECONNECTURLKEY.varname, "jdbc:derby:;databaseName=metastore_db2;create=true"); + hconfMirrorServer.set(HiveConf.ConfVars.METASTORE_CONNECT_URL_KEY.varname, "jdbc:derby:;databaseName=metastore_db2;create=true"); MetaStoreTestUtils.startMetaStoreWithRetry(hconfMirrorServer, true); hconfMirror = new HiveConf(hconf); MetastoreConf.setBoolVar(hconfMirror, MetastoreConf.ConfVars.EVENT_DB_NOTIFICATION_API_AUTH, false); @@ -4050,7 +4049,7 @@ public void testDeleteStagingDir() throws IOException { @Override public boolean accept(Path path) { - return path.getName().startsWith(HiveConf.getVar(hconf, HiveConf.ConfVars.STAGINGDIR)); + return path.getName().startsWith(HiveConf.getVar(hconf, HiveConf.ConfVars.STAGING_DIR)); } }; FileStatus[] statuses = fs.listStatus(path, filter); @@ -4230,7 +4229,7 @@ public void testRecycleFileDropTempTable() throws IOException { run("INSERT INTO " + dbName + ".normal values (1)", driver); run("DROP TABLE " + dbName + ".normal", driver); - String cmDir = hconf.getVar(HiveConf.ConfVars.REPLCMDIR); + String cmDir = hconf.getVar(HiveConf.ConfVars.REPL_CM_DIR); Path path = new Path(cmDir); FileSystem fs = path.getFileSystem(hconf); ContentSummary cs = fs.getContentSummary(path); @@ -4285,7 +4284,7 @@ public void testLoadCmPathMissing() throws Exception { run("DROP TABLE " + dbName + ".normal", driver); - String cmDir = hconf.getVar(HiveConf.ConfVars.REPLCMDIR); + String cmDir = hconf.getVar(HiveConf.ConfVars.REPL_CM_DIR); Path path = new Path(cmDir); FileSystem fs = path.getFileSystem(hconf); ContentSummary cs = fs.getContentSummary(path); @@ -4370,7 +4369,7 @@ public void testDDLTasksInParallel() throws Throwable{ StringAppender appender = null; LoggerConfig loggerConfig = null; try { - driverMirror.getConf().set(HiveConf.ConfVars.EXECPARALLEL.varname, "true"); + driverMirror.getConf().set(HiveConf.ConfVars.EXEC_PARALLEL.varname, "true"); logger = LogManager.getLogger("hive.ql.metadata.Hive"); oldLevel = logger.getLevel(); ctx = (LoggerContext) LogManager.getContext(false); @@ -4403,7 +4402,7 @@ public void testDDLTasksInParallel() throws Throwable{ assertEquals(count, 2); appender.reset(); } finally { - driverMirror.getConf().set(HiveConf.ConfVars.EXECPARALLEL.varname, "false"); + driverMirror.getConf().set(HiveConf.ConfVars.EXEC_PARALLEL.varname, "false"); loggerConfig.setLevel(oldLevel); ctx.updateLoggers(); appender.removeFromLogger(logger.getName()); @@ -4414,7 +4413,7 @@ public void testDDLTasksInParallel() throws Throwable{ public void testRecycleFileNonReplDatabase() throws IOException { String dbName = createDBNonRepl(testName.getMethodName(), driver); - String cmDir = hconf.getVar(HiveConf.ConfVars.REPLCMDIR); + String cmDir = hconf.getVar(HiveConf.ConfVars.REPL_CM_DIR); Path path = new Path(cmDir); FileSystem fs = path.getFileSystem(hconf); ContentSummary cs = fs.getContentSummary(path); diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcidTablesBootstrap.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcidTablesBootstrap.java index 5546f41447e2..6d9fea15fd55 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcidTablesBootstrap.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcidTablesBootstrap.java @@ -28,7 +28,6 @@ import org.apache.hadoop.hive.metastore.InjectableBehaviourObjectStore.BehaviourInjection; import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils; import org.apache.hadoop.hive.ql.ErrorMsg; -import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.junit.Assert; @@ -140,7 +139,7 @@ public Boolean apply(@Nullable CallerArguments args) { } finally { InjectableBehaviourObjectStore.resetAlterTableModifier(); } - Path baseDumpDir = new Path(primary.hiveConf.getVar(HiveConf.ConfVars.REPLDIR)); + Path baseDumpDir = new Path(primary.hiveConf.getVar(HiveConf.ConfVars.REPL_DIR)); Path nonRecoverablePath = TestReplicationScenarios.getNonRecoverablePath(baseDumpDir, primaryDbName, primary.hiveConf); if(nonRecoverablePath != null){ baseDumpDir.getFileSystem(primary.hiveConf).delete(nonRecoverablePath, true); diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcrossInstances.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcrossInstances.java index fb2ad07acb7a..893ccd48786a 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcrossInstances.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcrossInstances.java @@ -46,7 +46,6 @@ import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils; import org.apache.hadoop.hive.ql.parse.repl.PathBuilder; import org.apache.hadoop.hive.ql.parse.repl.dump.Utils; -import org.apache.hadoop.hive.ql.processors.CommandProcessorException; import org.apache.hadoop.hive.ql.util.DependencyResolver; import org.apache.hadoop.security.UserGroupInformation; import org.junit.Assert; @@ -77,7 +76,6 @@ import static org.apache.hadoop.hive.common.repl.ReplConst.SOURCE_OF_REPLICATION; import static org.apache.hadoop.hive.ql.exec.repl.ReplAck.LOAD_ACKNOWLEDGEMENT; import static org.apache.hadoop.hive.ql.exec.repl.ReplAck.NON_RECOVERABLE_MARKER; -import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.containsInAnyOrder; @@ -515,7 +513,7 @@ public void testParallelExecutionOfReplicationBootStrapLoad() throws Throwable { .run("create table t3 (rank int)") .dump(primaryDbName); - replica.hiveConf.setBoolVar(HiveConf.ConfVars.EXECPARALLEL, true); + replica.hiveConf.setBoolVar(HiveConf.ConfVars.EXEC_PARALLEL, true); replica.load(replicatedDbName, primaryDbName) .run("use " + replicatedDbName) .run("repl status " + replicatedDbName) @@ -525,7 +523,7 @@ public void testParallelExecutionOfReplicationBootStrapLoad() throws Throwable { .run("select country from t2") .verifyResults(Arrays.asList("india", "australia", "russia", "uk", "us", "france", "japan", "china")); - replica.hiveConf.setBoolVar(HiveConf.ConfVars.EXECPARALLEL, false); + replica.hiveConf.setBoolVar(HiveConf.ConfVars.EXEC_PARALLEL, false); } @Test @@ -710,8 +708,8 @@ public void testBootStrapDumpOfWarehouse() throws Throwable { public void testReplLoadFromSourceUsingWithClause() throws Throwable { HiveConf replicaConf = replica.getConf(); List withConfigs = Arrays.asList( - "'hive.metastore.warehouse.dir'='" + replicaConf.getVar(HiveConf.ConfVars.METASTOREWAREHOUSE) + "'", - "'hive.metastore.uris'='" + replicaConf.getVar(HiveConf.ConfVars.METASTOREURIS) + "'", + "'hive.metastore.warehouse.dir'='" + replicaConf.getVar(HiveConf.ConfVars.METASTORE_WAREHOUSE) + "'", + "'hive.metastore.uris'='" + replicaConf.getVar(HiveConf.ConfVars.METASTORE_URIS) + "'", "'hive.repl.replica.functions.root.dir'='" + replicaConf.getVar(HiveConf.ConfVars.REPL_FUNCTIONS_ROOT_DIR) + "'"); //////////// Bootstrap //////////// @@ -1654,7 +1652,7 @@ public Boolean apply(@Nullable CallerArguments args) { // is loaded before t2. So that scope is set to table in first iteration for table t1. In the next iteration, it // loads only remaining partitions of t2, so that the table tracker has no tasks. - Path baseDumpDir = new Path(primary.hiveConf.getVar(HiveConf.ConfVars.REPLDIR)); + Path baseDumpDir = new Path(primary.hiveConf.getVar(HiveConf.ConfVars.REPL_DIR)); Path nonRecoverablePath = getNonRecoverablePath(baseDumpDir, primaryDbName); if(nonRecoverablePath != null){ baseDumpDir.getFileSystem(primary.hiveConf).delete(nonRecoverablePath, true); @@ -1994,7 +1992,7 @@ public void testRangerReplicationRetryExhausted() throws Throwable { ErrorMsg.getErrorMsg(e.getMessage()).getErrorCode()); } //Delete non recoverable marker to fix this - Path baseDumpDir = new Path(primary.hiveConf.getVar(HiveConf.ConfVars.REPLDIR)); + Path baseDumpDir = new Path(primary.hiveConf.getVar(HiveConf.ConfVars.REPL_DIR)); Path nonRecoverablePath = getNonRecoverablePath(baseDumpDir, primaryDbName); Assert.assertNotNull(nonRecoverablePath); baseDumpDir.getFileSystem(primary.hiveConf).delete(nonRecoverablePath, true); @@ -2041,7 +2039,7 @@ public void testFailureUnsupportedAuthorizerReplication() throws Throwable { ErrorMsg.getErrorMsg(e.getMessage()).getErrorCode()); } //Delete non recoverable marker to fix this - Path baseDumpDir = new Path(primary.hiveConf.getVar(HiveConf.ConfVars.REPLDIR)); + Path baseDumpDir = new Path(primary.hiveConf.getVar(HiveConf.ConfVars.REPL_DIR)); Path nonRecoverablePath = getNonRecoverablePath(baseDumpDir, primaryDbName); Assert.assertNotNull(nonRecoverablePath); baseDumpDir.getFileSystem(primary.hiveConf).delete(nonRecoverablePath, true); @@ -2160,7 +2158,7 @@ public void testAtlasMissingConfigs() throws Throwable { ensureFailedReplOperation(getAtlasClause(confMap), HiveConf.ConfVars.REPL_ATLAS_ENDPOINT.varname, true); ensureFailedAdminRepl(getAtlasClause(confMap), true); //Delete non recoverable marker to fix this - Path baseDumpDir = new Path(primary.hiveConf.getVar(HiveConf.ConfVars.REPLDIR)); + Path baseDumpDir = new Path(primary.hiveConf.getVar(HiveConf.ConfVars.REPL_DIR)); Path nonRecoverablePath = getNonRecoverablePath(baseDumpDir, primaryDbName); Assert.assertNotNull(nonRecoverablePath); baseDumpDir.getFileSystem(primary.hiveConf).delete(nonRecoverablePath, true); @@ -2170,7 +2168,7 @@ public void testAtlasMissingConfigs() throws Throwable { ensureFailedReplOperation(getAtlasClause(confMap), HiveConf.ConfVars.REPL_ATLAS_REPLICATED_TO_DB.varname, true); ensureFailedAdminRepl(getAtlasClause(confMap), true); //Delete non recoverable marker to fix this - baseDumpDir = new Path(primary.hiveConf.getVar(HiveConf.ConfVars.REPLDIR)); + baseDumpDir = new Path(primary.hiveConf.getVar(HiveConf.ConfVars.REPL_DIR)); nonRecoverablePath = getNonRecoverablePath(baseDumpDir, primaryDbName); Assert.assertNotNull(nonRecoverablePath); baseDumpDir.getFileSystem(primary.hiveConf).delete(nonRecoverablePath, true); @@ -2178,7 +2176,7 @@ public void testAtlasMissingConfigs() throws Throwable { ensureFailedReplOperation(getAtlasClause(confMap), HiveConf.ConfVars.REPL_SOURCE_CLUSTER_NAME.varname, true); ensureFailedAdminRepl(getAtlasClause(confMap), true); //Delete non recoverable marker to fix this - baseDumpDir = new Path(primary.hiveConf.getVar(HiveConf.ConfVars.REPLDIR)); + baseDumpDir = new Path(primary.hiveConf.getVar(HiveConf.ConfVars.REPL_DIR)); nonRecoverablePath = getNonRecoverablePath(baseDumpDir, primaryDbName); Assert.assertNotNull(nonRecoverablePath); baseDumpDir.getFileSystem(primary.hiveConf).delete(nonRecoverablePath, true); @@ -2186,7 +2184,7 @@ public void testAtlasMissingConfigs() throws Throwable { ensureFailedReplOperation(getAtlasClause(confMap), HiveConf.ConfVars.REPL_TARGET_CLUSTER_NAME.varname, true); ensureFailedAdminRepl(getAtlasClause(confMap), true); //Delete non recoverable marker to fix this - baseDumpDir = new Path(primary.hiveConf.getVar(HiveConf.ConfVars.REPLDIR)); + baseDumpDir = new Path(primary.hiveConf.getVar(HiveConf.ConfVars.REPL_DIR)); nonRecoverablePath = getNonRecoverablePath(baseDumpDir, primaryDbName); Assert.assertNotNull(nonRecoverablePath); baseDumpDir.getFileSystem(primary.hiveConf).delete(nonRecoverablePath, true); @@ -2200,7 +2198,7 @@ public void testAtlasMissingConfigs() throws Throwable { ensureFailedReplOperation(getAtlasClause(confMap), HiveConf.ConfVars.REPL_ATLAS_ENDPOINT.varname, false); ensureFailedAdminRepl(getAtlasClause(confMap), false); //Delete non recoverable marker to fix this - baseDumpDir = new Path(primary.hiveConf.getVar(HiveConf.ConfVars.REPLDIR)); + baseDumpDir = new Path(primary.hiveConf.getVar(HiveConf.ConfVars.REPL_DIR)); nonRecoverablePath = getNonRecoverablePath(baseDumpDir, primaryDbName); Assert.assertNotNull(nonRecoverablePath); baseDumpDir.getFileSystem(primary.hiveConf).delete(nonRecoverablePath, true); @@ -2210,7 +2208,7 @@ public void testAtlasMissingConfigs() throws Throwable { ensureFailedReplOperation(getAtlasClause(confMap), HiveConf.ConfVars.REPL_SOURCE_CLUSTER_NAME.varname, false); ensureFailedAdminRepl(getAtlasClause(confMap), false); //Delete non recoverable marker to fix this - baseDumpDir = new Path(primary.hiveConf.getVar(HiveConf.ConfVars.REPLDIR)); + baseDumpDir = new Path(primary.hiveConf.getVar(HiveConf.ConfVars.REPL_DIR)); nonRecoverablePath = getNonRecoverablePath(baseDumpDir, primaryDbName); Assert.assertNotNull(nonRecoverablePath); baseDumpDir.getFileSystem(primary.hiveConf).delete(nonRecoverablePath, true); @@ -2218,7 +2216,7 @@ public void testAtlasMissingConfigs() throws Throwable { ensureFailedReplOperation(getAtlasClause(confMap), HiveConf.ConfVars.REPL_TARGET_CLUSTER_NAME.varname, false); ensureFailedAdminRepl(getAtlasClause(confMap), false); //Delete non recoverable marker to fix this - baseDumpDir = new Path(primary.hiveConf.getVar(HiveConf.ConfVars.REPLDIR)); + baseDumpDir = new Path(primary.hiveConf.getVar(HiveConf.ConfVars.REPL_DIR)); nonRecoverablePath = getNonRecoverablePath(baseDumpDir, primaryDbName); Assert.assertNotNull(nonRecoverablePath); baseDumpDir.getFileSystem(primary.hiveConf).delete(nonRecoverablePath, true); diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosExclusiveReplica.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosExclusiveReplica.java index 8710e2c70a0d..9645f8d03fe3 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosExclusiveReplica.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosExclusiveReplica.java @@ -96,7 +96,7 @@ public void tearDown() throws Throwable { @Test public void testTargetEventIdGenerationAfterFirstIncrementalInOptFailover() throws Throwable { List withClause = ReplicationTestUtils.includeExternalTableClause(true); - withClause.add("'" + HiveConf.ConfVars.REPLDIR.varname + "'='" + primary.repldDir + "'"); + withClause.add("'" + HiveConf.ConfVars.REPL_DIR.varname + "'='" + primary.repldDir + "'"); // Do a bootstrap cycle(A->B) primary.dump(primaryDbName, withClause); @@ -156,7 +156,7 @@ public void testTargetEventIdGenerationAfterFirstIncrementalInOptFailover() thro Path newReplDir = new Path(replica.repldDir + "reverse1"); replicaFs.mkdirs(newReplDir); withClause = ReplicationTestUtils.includeExternalTableClause(true); - withClause.add("'" + HiveConf.ConfVars.REPLDIR.varname + "'='" + newReplDir + "'"); + withClause.add("'" + HiveConf.ConfVars.REPL_DIR.varname + "'='" + newReplDir + "'"); tuple = replica.dump(replicatedDbName); @@ -177,7 +177,7 @@ public void testTargetEventIdGenerationAfterFirstIncrementalInOptFailover() thro public void testTargetEventIdGenerationInOptmisedFailover() throws Throwable { // Do a a cycle of bootstrap dump & load. List withClause = ReplicationTestUtils.includeExternalTableClause(true); - withClause.add("'" + HiveConf.ConfVars.REPLDIR.varname + "'='" + primary.repldDir + "'"); + withClause.add("'" + HiveConf.ConfVars.REPL_DIR.varname + "'='" + primary.repldDir + "'"); // Do a bootstrap cycle(A->B) primary.dump(primaryDbName, withClause); @@ -265,7 +265,7 @@ public void testTargetEventIdGenerationInOptmisedFailover() throws Throwable { Path newReplDir = new Path(replica.repldDir + "reverse01"); replicaFs.mkdirs(newReplDir); withClause = ReplicationTestUtils.includeExternalTableClause(true); - withClause.add("'" + HiveConf.ConfVars.REPLDIR.varname + "'='" + newReplDir + "'"); + withClause.add("'" + HiveConf.ConfVars.REPL_DIR.varname + "'='" + newReplDir + "'"); tuple = replica.dump(replicatedDbName, withClause); @@ -285,7 +285,7 @@ public void testTargetEventIdGenerationInOptmisedFailover() throws Throwable { public void testTargetEventIdWithNotificationsExpiredInOptimisedFailover() throws Throwable { // Do a a cycle of bootstrap dump & load. List withClause = ReplicationTestUtils.includeExternalTableClause(true); - withClause.add("'" + HiveConf.ConfVars.REPLDIR.varname + "'='" + primary.repldDir + "'"); + withClause.add("'" + HiveConf.ConfVars.REPL_DIR.varname + "'='" + primary.repldDir + "'"); // Do a bootstrap cycle(A->B) primary.dump(primaryDbName, withClause); @@ -347,7 +347,7 @@ public NotificationEventResponse apply(@Nullable NotificationEventResponse event Path newReplDir = new Path(replica.repldDir + "reverse01"); replicaFs.mkdirs(newReplDir); withClause = ReplicationTestUtils.includeExternalTableClause(true); - withClause.add("'" + HiveConf.ConfVars.REPLDIR.varname + "'='" + newReplDir + "'"); + withClause.add("'" + HiveConf.ConfVars.REPL_DIR.varname + "'='" + newReplDir + "'"); try { replica.dump(replicatedDbName, withClause); @@ -1091,7 +1091,7 @@ private void verifyTableDataExists(WarehouseInstance warehouse, Path dbDataPath, private List getStagingLocationConfig(String stagingLoc, boolean addDistCpConfigs) throws IOException { List confList = new ArrayList<>(); - confList.add("'" + HiveConf.ConfVars.REPLDIR.varname + "'='" + stagingLoc + "'"); + confList.add("'" + HiveConf.ConfVars.REPL_DIR.varname + "'='" + stagingLoc + "'"); if (addDistCpConfigs) { confList.add("'" + HiveConf.ConfVars.HIVE_EXEC_COPYFILE_MAXSIZE.varname + "'='1'"); confList.add("'" + HiveConf.ConfVars.HIVE_EXEC_COPYFILE_MAXNUMFILES.varname + "'='0'"); diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosExternalTables.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosExternalTables.java index 9eefd04e7f9a..8badc4c2895b 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosExternalTables.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosExternalTables.java @@ -1026,7 +1026,7 @@ public Boolean apply(@Nullable CallerArguments args) { InjectableBehaviourObjectStore.resetAlterTableModifier(); } - Path baseDumpDir = new Path(primary.hiveConf.getVar(HiveConf.ConfVars.REPLDIR)); + Path baseDumpDir = new Path(primary.hiveConf.getVar(HiveConf.ConfVars.REPL_DIR)); Path nonRecoverablePath = TestReplicationScenarios.getNonRecoverablePath(baseDumpDir, primaryDbName, primary.hiveConf); if(nonRecoverablePath != null){ baseDumpDir.getFileSystem(primary.hiveConf).delete(nonRecoverablePath, true); @@ -1302,7 +1302,7 @@ public void testExternalTableBaseDirMandatory() throws Throwable { ErrorMsg.getErrorMsg(e.getMessage()).getErrorCode()); } //delete non recoverable marker - Path dumpPath = new Path(primary.hiveConf.get(HiveConf.ConfVars.REPLDIR.varname), + Path dumpPath = new Path(primary.hiveConf.get(HiveConf.ConfVars.REPL_DIR.varname), Base64.getEncoder().encodeToString(primaryDbName.toLowerCase() .getBytes(StandardCharsets.UTF_8.name()))); FileSystem fs = dumpPath.getFileSystem(conf); diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosUsingSnapshots.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosUsingSnapshots.java index cdc87733581f..f941d183b82d 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosUsingSnapshots.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosUsingSnapshots.java @@ -488,7 +488,7 @@ public void testFailureScenarios() throws Throwable { // Ignore } // Check if there is a non-recoverable error or not. - Path baseDumpDir = new Path(primary.hiveConf.getVar(HiveConf.ConfVars.REPLDIR)); + Path baseDumpDir = new Path(primary.hiveConf.getVar(HiveConf.ConfVars.REPL_DIR)); Path nonRecoverablePath = TestReplicationScenarios.getNonRecoverablePath(baseDumpDir, primaryDbName, primary.hiveConf); assertTrue(fs.exists(nonRecoverablePath)); diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestScheduledReplicationScenarios.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestScheduledReplicationScenarios.java index ff7733d2b9b6..2c0a70398d42 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestScheduledReplicationScenarios.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestScheduledReplicationScenarios.java @@ -137,7 +137,7 @@ public void testAcidTablesReplLoadBootstrapIncr() throws Throwable { primary.run("create scheduled query s1_t1 every 5 seconds as repl dump " + primaryDbName); replica.run("create scheduled query s2_t1 every 5 seconds as repl load " + primaryDbName + " INTO " + replicatedDbName); - Path dumpRoot = new Path(primary.hiveConf.getVar(HiveConf.ConfVars.REPLDIR), + Path dumpRoot = new Path(primary.hiveConf.getVar(HiveConf.ConfVars.REPL_DIR), Base64.getEncoder().encodeToString(primaryDbName.toLowerCase().getBytes(StandardCharsets.UTF_8.name()))); FileSystem fs = FileSystem.get(dumpRoot.toUri(), primary.hiveConf); @@ -208,7 +208,7 @@ public void testExternalTablesReplLoadBootstrapIncr() throws Throwable { primary.run("create scheduled query s1_t2 every 5 seconds as repl dump " + primaryDbName + withClause); replica.run("create scheduled query s2_t2 every 5 seconds as repl load " + primaryDbName + " INTO " + replicatedDbName + withClause); - Path dumpRoot = new Path(primary.hiveConf.getVar(HiveConf.ConfVars.REPLDIR), + Path dumpRoot = new Path(primary.hiveConf.getVar(HiveConf.ConfVars.REPL_DIR), Base64.getEncoder().encodeToString(primaryDbName.toLowerCase().getBytes(StandardCharsets.UTF_8.name()))); FileSystem fs = FileSystem.get(dumpRoot.toUri(), primary.hiveConf); next = Integer.parseInt(ReplDumpWork.getTestInjectDumpDir()) + 1; diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestStatsReplicationScenarios.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestStatsReplicationScenarios.java index e23c542d670b..d842385f48ed 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestStatsReplicationScenarios.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestStatsReplicationScenarios.java @@ -34,7 +34,6 @@ import org.apache.hadoop.hive.metastore.InjectableBehaviourObjectStore.CallerArguments; import org.apache.hadoop.hive.ql.metadata.HiveMetaStoreClientWithLocalCache; import org.apache.hadoop.hive.shims.Utils; -import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.junit.After; @@ -46,7 +45,6 @@ import org.junit.rules.TestName; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.junit.Ignore; import org.junit.Assert; import java.io.IOException; @@ -337,7 +335,7 @@ private String dumpLoadVerify(List tableNames, String lastReplicationId, // Load, if necessary changing configuration. if (parallelLoad) { - replica.hiveConf.setBoolVar(HiveConf.ConfVars.EXECPARALLEL, true); + replica.hiveConf.setBoolVar(HiveConf.ConfVars.EXEC_PARALLEL, true); } // Fail load if for testing failure and retry scenario. Fail the load while setting @@ -350,7 +348,7 @@ private String dumpLoadVerify(List tableNames, String lastReplicationId, } } - Path baseDumpDir = new Path(primary.hiveConf.getVar(HiveConf.ConfVars.REPLDIR)); + Path baseDumpDir = new Path(primary.hiveConf.getVar(HiveConf.ConfVars.REPL_DIR)); Path nonRecoverablePath = TestReplicationScenarios.getNonRecoverablePath(baseDumpDir, primaryDbName, primary.hiveConf); if(nonRecoverablePath != null){ baseDumpDir.getFileSystem(primary.hiveConf).delete(nonRecoverablePath, true); @@ -366,7 +364,7 @@ private String dumpLoadVerify(List tableNames, String lastReplicationId, } if (parallelLoad) { - replica.hiveConf.setBoolVar(HiveConf.ConfVars.EXECPARALLEL, false); + replica.hiveConf.setBoolVar(HiveConf.ConfVars.EXEC_PARALLEL, false); } // Test statistics diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/WarehouseInstance.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/WarehouseInstance.java index e7701b54ca6e..ac656b45cff7 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/WarehouseInstance.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/WarehouseInstance.java @@ -129,35 +129,35 @@ private void initialize(String cmRoot, String externalTableWarehouseRoot, String Map overridesForHiveConf) throws Exception { hiveConf = new HiveConf(miniDFSCluster.getConfiguration(0), TestReplicationScenarios.class); - String metaStoreUri = System.getProperty("test." + HiveConf.ConfVars.METASTOREURIS.varname); + String metaStoreUri = System.getProperty("test." + HiveConf.ConfVars.METASTORE_URIS.varname); if (metaStoreUri != null) { - hiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, metaStoreUri); + hiveConf.setVar(HiveConf.ConfVars.METASTORE_URIS, metaStoreUri); return; } // hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_IN_TEST, hiveInTest); // turn on db notification listener on meta store - hiveConf.setVar(HiveConf.ConfVars.METASTOREWAREHOUSE, warehouseRoot); + hiveConf.setVar(HiveConf.ConfVars.METASTORE_WAREHOUSE, warehouseRoot); hiveConf.setVar(HiveConf.ConfVars.HIVE_METASTORE_WAREHOUSE_EXTERNAL, externalTableWarehouseRoot); hiveConf.setVar(HiveConf.ConfVars.METASTORE_TRANSACTIONAL_EVENT_LISTENERS, LISTENER_CLASS); - hiveConf.setBoolVar(HiveConf.ConfVars.REPLCMENABLED, true); + hiveConf.setBoolVar(HiveConf.ConfVars.REPL_CM_ENABLED, true); hiveConf.setBoolVar(HiveConf.ConfVars.FIRE_EVENTS_FOR_DML, true); - hiveConf.setVar(HiveConf.ConfVars.REPLCMDIR, cmRoot); + hiveConf.setVar(HiveConf.ConfVars.REPL_CM_DIR, cmRoot); hiveConf.setVar(HiveConf.ConfVars.REPL_FUNCTIONS_ROOT_DIR, functionsRoot); hiveConf.setBoolVar(HiveConf.ConfVars.REPL_DUMP_METADATA_ONLY_FOR_EXTERNAL_TABLE, false); - hiveConf.setVar(HiveConf.ConfVars.METASTORECONNECTURLKEY, + hiveConf.setVar(HiveConf.ConfVars.METASTORE_CONNECT_URL_KEY, "jdbc:derby:memory:${test.tmp.dir}/APP;create=true"); - hiveConf.setVar(HiveConf.ConfVars.REPLDIR, this.repldDir); - hiveConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3); - hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); - hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); + hiveConf.setVar(HiveConf.ConfVars.REPL_DIR, this.repldDir); + hiveConf.setIntVar(HiveConf.ConfVars.METASTORE_THRIFT_CONNECTION_RETRIES, 3); + hiveConf.set(HiveConf.ConfVars.PRE_EXEC_HOOKS.varname, ""); + hiveConf.set(HiveConf.ConfVars.POST_EXEC_HOOKS.varname, ""); if (!hiveConf.getVar(HiveConf.ConfVars.HIVE_TXN_MANAGER).equals("org.apache.hadoop.hive.ql.lockmgr.DbTxnManager")) { hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); } hiveConf.set(HiveConf.ConfVars.METASTORE_RAW_STORE_IMPL.varname, "org.apache.hadoop.hive.metastore.InjectableBehaviourObjectStore"); - System.setProperty(HiveConf.ConfVars.PREEXECHOOKS.varname, " "); - System.setProperty(HiveConf.ConfVars.POSTEXECHOOKS.varname, " "); + System.setProperty(HiveConf.ConfVars.PRE_EXEC_HOOKS.varname, " "); + System.setProperty(HiveConf.ConfVars.POST_EXEC_HOOKS.varname, " "); for (Map.Entry entry : overridesForHiveConf.entrySet()) { hiveConf.set(entry.getKey(), entry.getValue()); @@ -181,14 +181,14 @@ private void initialize(String cmRoot, String externalTableWarehouseRoot, String */ - /*hiveConf.setVar(HiveConf.ConfVars.METASTORECONNECTURLKEY, "jdbc:mysql://localhost:3306/APP"); + /*hiveConf.setVar(HiveConf.ConfVars.METASTORE_CONNECT_URL_KEY, "jdbc:mysql://localhost:3306/APP"); hiveConf.setVar(HiveConf.ConfVars.METASTORE_CONNECTION_DRIVER, "com.mysql.jdbc.Driver"); - hiveConf.setVar(HiveConf.ConfVars.METASTOREPWD, "hivepassword"); + hiveConf.setVar(HiveConf.ConfVars.METASTORE_PWD, "hivepassword"); hiveConf.setVar(HiveConf.ConfVars.METASTORE_CONNECTION_USER_NAME, "hiveuser");*/ - /*hiveConf.setVar(HiveConf.ConfVars.METASTORECONNECTURLKEY,"jdbc:postgresql://localhost/app"); + /*hiveConf.setVar(HiveConf.ConfVars.METASTORE_CONNECT_URL_KEY,"jdbc:postgresql://localhost/app"); hiveConf.setVar(HiveConf.ConfVars.METASTORE_CONNECTION_DRIVER, "org.postgresql.Driver"); - hiveConf.setVar(HiveConf.ConfVars.METASTOREPWD, "password"); + hiveConf.setVar(HiveConf.ConfVars.METASTORE_PWD, "password"); hiveConf.setVar(HiveConf.ConfVars.METASTORE_CONNECTION_USER_NAME, "postgres");*/ driver = DriverFactory.newDriver(hiveConf); diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/StorageBasedMetastoreTestBase.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/StorageBasedMetastoreTestBase.java index b3383d923bec..b2e2678a8165 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/StorageBasedMetastoreTestBase.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/StorageBasedMetastoreTestBase.java @@ -30,7 +30,6 @@ import org.apache.hadoop.hive.metastore.HiveMetaStoreClient; import org.apache.hadoop.hive.metastore.MetaStoreTestUtils; import org.apache.hadoop.hive.metastore.api.Database; -import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge; import org.apache.hadoop.hive.ql.DriverFactory; import org.apache.hadoop.hive.ql.IDriver; import org.apache.hadoop.hive.ql.security.authorization.AuthorizationPreEventListener; @@ -77,11 +76,11 @@ public void setUp() throws Exception { // Turn off client-side authorization clientHiveConf.setBoolVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_ENABLED,false); - clientHiveConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3); + clientHiveConf.setIntVar(HiveConf.ConfVars.METASTORE_THRIFT_CONNECTION_RETRIES, 3); clientHiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); - clientHiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); - clientHiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); + clientHiveConf.set(HiveConf.ConfVars.PRE_EXEC_HOOKS.varname, ""); + clientHiveConf.set(HiveConf.ConfVars.POST_EXEC_HOOKS.varname, ""); ugi = Utils.getUGI(); diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestAuthorizationPreEventListener.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestAuthorizationPreEventListener.java index b78c348c5203..0235aef1d54b 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestAuthorizationPreEventListener.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestAuthorizationPreEventListener.java @@ -30,7 +30,6 @@ import org.apache.hadoop.hive.metastore.api.Partition; import org.apache.hadoop.hive.metastore.api.SerDeInfo; import org.apache.hadoop.hive.metastore.api.Table; -import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge; import org.apache.hadoop.hive.ql.DriverFactory; import org.apache.hadoop.hive.ql.IDriver; import org.apache.hadoop.hive.ql.security.DummyHiveMetastoreAuthorizationProvider.AuthCallContext; @@ -68,12 +67,12 @@ public void setUp() throws Exception { clientHiveConf = new HiveConf(this.getClass()); - clientHiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + port); - clientHiveConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3); + clientHiveConf.setVar(HiveConf.ConfVars.METASTORE_URIS, "thrift://localhost:" + port); + clientHiveConf.setIntVar(HiveConf.ConfVars.METASTORE_THRIFT_CONNECTION_RETRIES, 3); clientHiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); - clientHiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); - clientHiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); + clientHiveConf.set(HiveConf.ConfVars.PRE_EXEC_HOOKS.varname, ""); + clientHiveConf.set(HiveConf.ConfVars.POST_EXEC_HOOKS.varname, ""); SessionState.start(new CliSessionState(clientHiveConf)); diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestClientSideAuthorizationProvider.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestClientSideAuthorizationProvider.java index 948ab4d10370..120d967a4754 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestClientSideAuthorizationProvider.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestClientSideAuthorizationProvider.java @@ -80,13 +80,13 @@ public void setUp() throws Exception { clientHiveConf.set(HiveConf.ConfVars.HIVE_AUTHENTICATOR_MANAGER.varname, InjectableDummyAuthenticator.class.getName()); clientHiveConf.set(HiveConf.ConfVars.HIVE_AUTHORIZATION_TABLE_OWNER_GRANTS.varname, ""); - clientHiveConf.setVar(HiveConf.ConfVars.HIVEMAPREDMODE, "nonstrict"); - clientHiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + port); - clientHiveConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3); + clientHiveConf.setVar(HiveConf.ConfVars.HIVE_MAPRED_MODE, "nonstrict"); + clientHiveConf.setVar(HiveConf.ConfVars.METASTORE_URIS, "thrift://localhost:" + port); + clientHiveConf.setIntVar(HiveConf.ConfVars.METASTORE_THRIFT_CONNECTION_RETRIES, 3); clientHiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); - clientHiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); - clientHiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); + clientHiveConf.set(HiveConf.ConfVars.PRE_EXEC_HOOKS.varname, ""); + clientHiveConf.set(HiveConf.ConfVars.POST_EXEC_HOOKS.varname, ""); ugi = Utils.getUGI(); diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestMetastoreAuthorizationProvider.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestMetastoreAuthorizationProvider.java index 3fa1d0d5b50d..72a953fea425 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestMetastoreAuthorizationProvider.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestMetastoreAuthorizationProvider.java @@ -113,12 +113,12 @@ public void setUp() throws Exception { // Turn off client-side authorization clientHiveConf.setBoolVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_ENABLED,false); - clientHiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + port); - clientHiveConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3); + clientHiveConf.setVar(HiveConf.ConfVars.METASTORE_URIS, "thrift://localhost:" + port); + clientHiveConf.setIntVar(HiveConf.ConfVars.METASTORE_THRIFT_CONNECTION_RETRIES, 3); clientHiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); - clientHiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); - clientHiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); + clientHiveConf.set(HiveConf.ConfVars.PRE_EXEC_HOOKS.varname, ""); + clientHiveConf.set(HiveConf.ConfVars.POST_EXEC_HOOKS.varname, ""); ugi = Utils.getUGI(); @@ -175,7 +175,7 @@ public void testSimplePrivileges() throws Exception { String tblName = getTestTableName(); String userName = setupUser(); String loc = clientHiveConf.get(HiveConf.ConfVars.HIVE_METASTORE_WAREHOUSE_EXTERNAL.varname) + "/" + dbName; - String mLoc = clientHiveConf.get(HiveConf.ConfVars.METASTOREWAREHOUSE.varname) + "/" + dbName; + String mLoc = clientHiveConf.get(HiveConf.ConfVars.METASTORE_WAREHOUSE.varname) + "/" + dbName; allowCreateDatabase(userName); driver.run("create database " + dbName + " location '" + loc + "' managedlocation '" + mLoc + "'"); Database db = msc.getDatabase(dbName); diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestMetastoreClientSideAuthorizationProvider.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestMetastoreClientSideAuthorizationProvider.java index dbd71cb0ceb4..b166df33b8bd 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestMetastoreClientSideAuthorizationProvider.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestMetastoreClientSideAuthorizationProvider.java @@ -63,13 +63,13 @@ public void setUp() throws Exception { clientHiveConf.set(HiveConf.ConfVars.HIVE_AUTHENTICATOR_MANAGER.varname, InjectableDummyAuthenticator.class.getName()); clientHiveConf.set(HiveConf.ConfVars.HIVE_AUTHORIZATION_TABLE_OWNER_GRANTS.varname, ""); - clientHiveConf.setVar(HiveConf.ConfVars.HIVEMAPREDMODE, "nonstrict"); - clientHiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + port); - clientHiveConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3); + clientHiveConf.setVar(HiveConf.ConfVars.HIVE_MAPRED_MODE, "nonstrict"); + clientHiveConf.setVar(HiveConf.ConfVars.METASTORE_URIS, "thrift://localhost:" + port); + clientHiveConf.setIntVar(HiveConf.ConfVars.METASTORE_THRIFT_CONNECTION_RETRIES, 3); clientHiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); - clientHiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); - clientHiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); + clientHiveConf.set(HiveConf.ConfVars.PRE_EXEC_HOOKS.varname, ""); + clientHiveConf.set(HiveConf.ConfVars.POST_EXEC_HOOKS.varname, ""); ugi = Utils.getUGI(); diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestMultiAuthorizationPreEventListener.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestMultiAuthorizationPreEventListener.java index d1e80698efa9..1a5a840c8680 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestMultiAuthorizationPreEventListener.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestMultiAuthorizationPreEventListener.java @@ -27,7 +27,6 @@ import org.apache.hadoop.hive.metastore.HiveMetaStoreClient; import org.apache.hadoop.hive.metastore.MetaStoreTestUtils; import org.apache.hadoop.hive.metastore.api.Database; -import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge; import org.apache.hadoop.hive.ql.DriverFactory; import org.apache.hadoop.hive.ql.IDriver; import org.apache.hadoop.hive.ql.security.DummyHiveMetastoreAuthorizationProvider.AuthCallContext; @@ -63,7 +62,7 @@ public static void setUp() throws Exception { clientHiveConf = new HiveConf(); - clientHiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + port); + clientHiveConf.setVar(HiveConf.ConfVars.METASTORE_URIS, "thrift://localhost:" + port); clientHiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); SessionState.start(new CliSessionState(clientHiveConf)); diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/authorization/plugin/TestHiveAuthorizerCheckInvocation.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/authorization/plugin/TestHiveAuthorizerCheckInvocation.java index b7148fa98e1e..0d939af9de46 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/authorization/plugin/TestHiveAuthorizerCheckInvocation.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/authorization/plugin/TestHiveAuthorizerCheckInvocation.java @@ -107,7 +107,7 @@ public static void beforeTest() throws Exception { conf.setBoolVar(ConfVars.HIVE_SUPPORT_CONCURRENCY, true); conf.setVar(ConfVars.HIVE_TXN_MANAGER, DbTxnManager.class.getName()); conf.setBoolVar(ConfVars.HIVE_QUERY_RESULTS_CACHE_ENABLED, true); - conf.setVar(HiveConf.ConfVars.HIVEMAPREDMODE, "nonstrict"); + conf.setVar(HiveConf.ConfVars.HIVE_MAPRED_MODE, "nonstrict"); conf.setBoolVar(ConfVars.HIVE_TEST_AUTHORIZATION_SQLSTD_HS2_MODE, true); conf.setBoolVar(ConfVars.HIVE_ZOOKEEPER_KILLQUERY_ENABLE, false); diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/session/TestClearDanglingScratchDir.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/session/TestClearDanglingScratchDir.java index 8645812ab95f..8e09d9697f4a 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/session/TestClearDanglingScratchDir.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/session/TestClearDanglingScratchDir.java @@ -31,7 +31,6 @@ import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.exec.Utilities; -import org.apache.hadoop.util.Shell; import org.junit.AfterClass; import org.junit.Assert; import org.junit.BeforeClass; @@ -56,12 +55,12 @@ static public void oneTimeSetup() throws Exception { conf.set(HiveConf.ConfVars.HIVE_SCRATCH_DIR_LOCK.toString(), "true"); conf.set(HiveConf.ConfVars.METASTORE_AUTO_CREATE_ALL.toString(), "true"); LoggerFactory.getLogger("SessionState"); - conf.setVar(HiveConf.ConfVars.METASTOREWAREHOUSE, + conf.setVar(HiveConf.ConfVars.METASTORE_WAREHOUSE, new Path(System.getProperty("test.tmp.dir"), "warehouse").toString()); conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, m_dfs.getFileSystem().getUri().toString()); - scratchDir = new Path(HiveConf.getVar(conf, HiveConf.ConfVars.SCRATCHDIR)); + scratchDir = new Path(HiveConf.getVar(conf, HiveConf.ConfVars.SCRATCH_DIR)); m_dfs.getFileSystem().mkdirs(scratchDir); m_dfs.getFileSystem().setPermission(scratchDir, new FsPermission("777")); } @@ -163,7 +162,7 @@ public void testLocalDanglingFilesCleaning() throws Exception { // Simulating hdfs dangling dir and its inuse.lck file // Note: Give scratch dirs all the write permissions FsPermission allPermissions = new FsPermission((short)00777); - customScratchDir = new Path(HiveConf.getVar(conf, HiveConf.ConfVars.SCRATCHDIR)); + customScratchDir = new Path(HiveConf.getVar(conf, HiveConf.ConfVars.SCRATCH_DIR)); Utilities.createDirsWithPermission(conf, customScratchDir, allPermissions, true); Path hdfsRootDir = new Path(customScratchDir + l + userName + l + hdfs); Path hdfsSessionDir = new Path(hdfsRootDir + l + userName + l + appId); @@ -171,7 +170,7 @@ public void testLocalDanglingFilesCleaning() throws Exception { fs.create(hdfsSessionLock); // Simulating local dangling files - customLocalTmpDir = new Path (HiveConf.getVar(conf, HiveConf.ConfVars.LOCALSCRATCHDIR)); + customLocalTmpDir = new Path (HiveConf.getVar(conf, HiveConf.ConfVars.LOCAL_SCRATCH_DIR)); Path localSessionDir = new Path(customLocalTmpDir + l + appId); Path localPipeOutFileRemove = new Path(customLocalTmpDir + l + appId + "-started-with-session-name.pipeout"); diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorOnTezTest.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorOnTezTest.java index 5a3324831e0d..03c6f70e0fe1 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorOnTezTest.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorOnTezTest.java @@ -45,7 +45,6 @@ import org.junit.ClassRule; import org.junit.rules.TemporaryFolder; -import java.io.EOFException; import java.io.File; import java.io.IOException; import java.util.Collections; @@ -102,11 +101,11 @@ protected void setupWithConf(HiveConf hiveConf) throws Exception { if (!(new File(TEST_WAREHOUSE_DIR).mkdirs())) { throw new RuntimeException("Could not create " + TEST_WAREHOUSE_DIR); } - hiveConf.setVar(HiveConf.ConfVars.PREEXECHOOKS, ""); - hiveConf.setVar(HiveConf.ConfVars.POSTEXECHOOKS, ""); - hiveConf.setVar(HiveConf.ConfVars.METASTOREWAREHOUSE, TEST_WAREHOUSE_DIR); - hiveConf.setVar(HiveConf.ConfVars.HIVEINPUTFORMAT, HiveInputFormat.class.getName()); - hiveConf.setVar(HiveConf.ConfVars.HIVEFETCHTASKCONVERSION, "none"); + hiveConf.setVar(HiveConf.ConfVars.PRE_EXEC_HOOKS, ""); + hiveConf.setVar(HiveConf.ConfVars.POST_EXEC_HOOKS, ""); + hiveConf.setVar(HiveConf.ConfVars.METASTORE_WAREHOUSE, TEST_WAREHOUSE_DIR); + hiveConf.setVar(HiveConf.ConfVars.HIVE_INPUT_FORMAT, HiveInputFormat.class.getName()); + hiveConf.setVar(HiveConf.ConfVars.HIVE_FETCH_TASK_CONVERSION, "none"); MetastoreConf.setTimeVar(hiveConf, MetastoreConf.ConfVars.TXN_OPENTXN_TIMEOUT, 2, TimeUnit.SECONDS); MetastoreConf.setBoolVar(hiveConf, MetastoreConf.ConfVars.COMPACTOR_INITIATOR_ON, true); MetastoreConf.setBoolVar(hiveConf, MetastoreConf.ConfVars.COMPACTOR_CLEANER_ON, true); diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCleanerWithReplication.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCleanerWithReplication.java index 4a7bb34bad1a..67af2443b259 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCleanerWithReplication.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCleanerWithReplication.java @@ -52,9 +52,9 @@ public class TestCleanerWithReplication extends CompactorTest { public void setup() throws Exception { HiveConf conf = new HiveConf(); conf.set("fs.defaultFS", miniDFSCluster.getFileSystem().getUri().toString()); - conf.setBoolVar(HiveConf.ConfVars.REPLCMENABLED, true); + conf.setBoolVar(HiveConf.ConfVars.REPL_CM_ENABLED, true); setup(conf); - cmRootDirectory = new Path(conf.get(HiveConf.ConfVars.REPLCMDIR.varname)); + cmRootDirectory = new Path(conf.get(HiveConf.ConfVars.REPL_CM_DIR.varname)); if (!fs.exists(cmRootDirectory)) { fs.mkdirs(cmRootDirectory); } diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactorBase.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactorBase.java index a57a817e1612..3027028a0798 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactorBase.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactorBase.java @@ -81,12 +81,12 @@ public void setup() throws Exception { } HiveConf hiveConf = new HiveConf(this.getClass()); - hiveConf.setVar(HiveConf.ConfVars.PREEXECHOOKS, ""); - hiveConf.setVar(HiveConf.ConfVars.POSTEXECHOOKS, ""); - hiveConf.setVar(HiveConf.ConfVars.METASTOREWAREHOUSE, TEST_WAREHOUSE_DIR); - hiveConf.setVar(HiveConf.ConfVars.HIVEINPUTFORMAT, HiveInputFormat.class.getName()); + hiveConf.setVar(HiveConf.ConfVars.PRE_EXEC_HOOKS, ""); + hiveConf.setVar(HiveConf.ConfVars.POST_EXEC_HOOKS, ""); + hiveConf.setVar(HiveConf.ConfVars.METASTORE_WAREHOUSE, TEST_WAREHOUSE_DIR); + hiveConf.setVar(HiveConf.ConfVars.HIVE_INPUT_FORMAT, HiveInputFormat.class.getName()); hiveConf.setBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER, false); - hiveConf.setBoolVar(HiveConf.ConfVars.HIVEOPTIMIZEMETADATAQUERIES, false); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_OPTIMIZE_METADATA_QUERIES, false); MetastoreConf.setBoolVar(hiveConf, MetastoreConf.ConfVars.COMPACTOR_INITIATOR_ON, true); MetastoreConf.setBoolVar(hiveConf, MetastoreConf.ConfVars.COMPACTOR_CLEANER_ON, true); MetastoreConf.setBoolVar(hiveConf, MetastoreConf.ConfVars.COMPACTOR_CLEAN_ABORTS_USING_CLEANER, true); diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCrudCompactorOnTez.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCrudCompactorOnTez.java index 1342b25916b4..a1885423a9d9 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCrudCompactorOnTez.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCrudCompactorOnTez.java @@ -885,10 +885,10 @@ public void testMajorCompactionNotPartitionedWithoutBuckets() throws Exception { Assert.assertEquals("pre-compaction bucket 0", expectedRsBucket0, testDataProvider.getBucketData(tblName, "536870912")); - conf.setVar(HiveConf.ConfVars.PREEXECHOOKS, HiveProtoLoggingHook.class.getName()); + conf.setVar(HiveConf.ConfVars.PRE_EXEC_HOOKS, HiveProtoLoggingHook.class.getName()); // Run major compaction and cleaner CompactorTestUtil.runCompaction(conf, dbName, tblName, CompactionType.MAJOR, true); - conf.setVar(HiveConf.ConfVars.PREEXECHOOKS, StringUtils.EMPTY); + conf.setVar(HiveConf.ConfVars.PRE_EXEC_HOOKS, StringUtils.EMPTY); CompactorTestUtil.runCleaner(conf); verifySuccessfulCompaction(1); @@ -2215,8 +2215,8 @@ public void testCompactionWithSchemaEvolutionAndBuckets() throws Exception { @Test public void testCompactionWithSchemaEvolutionNoBucketsMultipleReducers() throws Exception { HiveConf hiveConf = new HiveConf(conf); - hiveConf.setIntVar(HiveConf.ConfVars.MAXREDUCERS, 2); - hiveConf.setIntVar(HiveConf.ConfVars.HADOOPNUMREDUCERS, 2); + hiveConf.setIntVar(HiveConf.ConfVars.MAX_REDUCERS, 2); + hiveConf.setIntVar(HiveConf.ConfVars.HADOOP_NUM_REDUCERS, 2); driver = DriverFactory.newDriver(hiveConf); String dbName = "default"; String tblName = "testCompactionWithSchemaEvolutionNoBucketsMultipleReducers"; @@ -2915,10 +2915,10 @@ public void testCompactionWithCreateTableProps() throws Exception { CompactionRequest rqst = new CompactionRequest(dbName, tblName, CompactionType.MAJOR); CompactionResponse resp = txnHandler.compact(rqst); - conf.setVar(HiveConf.ConfVars.PREEXECHOOKS, HiveProtoLoggingHook.class.getName()); + conf.setVar(HiveConf.ConfVars.PRE_EXEC_HOOKS, HiveProtoLoggingHook.class.getName()); // Run major compaction and cleaner runWorker(conf); - conf.setVar(HiveConf.ConfVars.PREEXECHOOKS, StringUtils.EMPTY); + conf.setVar(HiveConf.ConfVars.PRE_EXEC_HOOKS, StringUtils.EMPTY); CompactorTestUtil.runCleaner(conf); @@ -2981,10 +2981,10 @@ public void testCompactionWithAlterTableProps() throws Exception { // Get all data before compaction is run List expectedData = testDP.getAllData(tblName); - conf.setVar(HiveConf.ConfVars.PREEXECHOOKS, HiveProtoLoggingHook.class.getName()); + conf.setVar(HiveConf.ConfVars.PRE_EXEC_HOOKS, HiveProtoLoggingHook.class.getName()); // Run major compaction and cleaner runWorker(conf); - conf.setVar(HiveConf.ConfVars.PREEXECHOOKS, StringUtils.EMPTY); + conf.setVar(HiveConf.ConfVars.PRE_EXEC_HOOKS, StringUtils.EMPTY); CompactorTestUtil.runCleaner(conf); diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestMRCompactorOnTez.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestMRCompactorOnTez.java index 14197a5326d2..90d3cb1cc55a 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestMRCompactorOnTez.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestMRCompactorOnTez.java @@ -59,10 +59,10 @@ public void testCompactorGatherStats() throws Exception{ executeStatementOnDriver("analyze table " + dbName + "." + tableName + " compute statistics for columns", driver); executeStatementOnDriver("insert into " + dbName + "." + tableName + " values(2)", driver); - conf.setVar(HiveConf.ConfVars.PREEXECHOOKS, HiveProtoLoggingHook.class.getName()); + conf.setVar(HiveConf.ConfVars.PRE_EXEC_HOOKS, HiveProtoLoggingHook.class.getName()); // Run major compaction and cleaner CompactorTestUtil.runCompaction(conf, dbName, tableName, CompactionType.MAJOR, false); - conf.setVar(HiveConf.ConfVars.PREEXECHOOKS, StringUtils.EMPTY); + conf.setVar(HiveConf.ConfVars.PRE_EXEC_HOOKS, StringUtils.EMPTY); CompactorTestUtil.runCleaner(conf); verifySuccessfulCompaction(1); @@ -76,10 +76,10 @@ public void testCompactorGatherStats() throws Exception{ executeStatementOnDriver("alter table " + dbName + "." + tableName + " set tblproperties('compactor.mapred.job.queue.name'='" + CUSTOM_COMPACTION_QUEUE + "')", driver); - conf.setVar(HiveConf.ConfVars.PREEXECHOOKS, HiveProtoLoggingHook.class.getName()); + conf.setVar(HiveConf.ConfVars.PRE_EXEC_HOOKS, HiveProtoLoggingHook.class.getName()); // Run major compaction and cleaner CompactorTestUtil.runCompaction(conf, dbName, tableName, CompactionType.MAJOR, false); - conf.setVar(HiveConf.ConfVars.PREEXECHOOKS, StringUtils.EMPTY); + conf.setVar(HiveConf.ConfVars.PRE_EXEC_HOOKS, StringUtils.EMPTY); CompactorTestUtil.runCleaner(conf); verifySuccessfulCompaction(2); @@ -90,10 +90,10 @@ public void testCompactorGatherStats() throws Exception{ assertEquals("Value should contain new data", 1, colStats.get(0).getStatsData().getLongStats().getLowValue()); executeStatementOnDriver("insert into " + dbName + "." + tableName + " values(4)", driver); - conf.setVar(HiveConf.ConfVars.PREEXECHOOKS, HiveProtoLoggingHook.class.getName()); + conf.setVar(HiveConf.ConfVars.PRE_EXEC_HOOKS, HiveProtoLoggingHook.class.getName()); CompactorTestUtil.runCompaction(conf, dbName, tableName, CompactionType.MAJOR, false, Collections.singletonMap("compactor.mapred.job.queue.name", CUSTOM_COMPACTION_QUEUE)); - conf.setVar(HiveConf.ConfVars.PREEXECHOOKS, StringUtils.EMPTY); + conf.setVar(HiveConf.ConfVars.PRE_EXEC_HOOKS, StringUtils.EMPTY); CompactorTestUtil.runCleaner(conf); verifySuccessfulCompaction(3); diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestMmCompactorOnTez.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestMmCompactorOnTez.java index 0a57c4588337..55d36ed57bcc 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestMmCompactorOnTez.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestMmCompactorOnTez.java @@ -85,11 +85,11 @@ public TestMmCompactorOnTez() { CompactorTestUtil.getBaseOrDeltaNames(fs, AcidUtils.deltaFileFilter, table, null)); if (isTez(conf)) { - conf.setVar(HiveConf.ConfVars.PREEXECHOOKS, HiveProtoLoggingHook.class.getName()); + conf.setVar(HiveConf.ConfVars.PRE_EXEC_HOOKS, HiveProtoLoggingHook.class.getName()); } // Run a compaction CompactorTestUtil.runCompaction(conf, dbName, tableName, CompactionType.MINOR, true); - conf.setVar(HiveConf.ConfVars.PREEXECHOOKS, StringUtils.EMPTY); + conf.setVar(HiveConf.ConfVars.PRE_EXEC_HOOKS, StringUtils.EMPTY); CompactorTestUtil.runCleaner(conf); verifySuccessulTxn(1); @@ -480,8 +480,8 @@ private void testMmMinorCompactionPartitionedWithBuckets(String fileFormat) thro @Test public void testMmMinorCompactionWithSchemaEvolutionNoBucketsMultipleReducers() throws Exception { HiveConf hiveConf = new HiveConf(conf); - hiveConf.setIntVar(HiveConf.ConfVars.MAXREDUCERS, 2); - hiveConf.setIntVar(HiveConf.ConfVars.HADOOPNUMREDUCERS, 2); + hiveConf.setIntVar(HiveConf.ConfVars.MAX_REDUCERS, 2); + hiveConf.setIntVar(HiveConf.ConfVars.HADOOP_NUM_REDUCERS, 2); driver = DriverFactory.newDriver(hiveConf); String dbName = "default"; String tblName = "testMmMinorCompactionWithSchemaEvolutionNoBucketsMultipleReducers"; @@ -638,7 +638,7 @@ private void verifyAllContents(String tblName, TestDataProvider dataProvider, * Set to true to cause all transactions to be rolled back, until set back to false. */ private static void rollbackAllTxns(boolean val, IDriver driver) { - driver.getConf().setBoolVar(HiveConf.ConfVars.HIVETESTMODEROLLBACKTXN, val); + driver.getConf().setBoolVar(HiveConf.ConfVars.HIVE_TEST_MODE_ROLLBACK_TXN, val); } private boolean isTez(HiveConf conf){ diff --git a/itests/hive-unit/src/test/java/org/apache/hive/beeline/TestBeeLineWithArgs.java b/itests/hive-unit/src/test/java/org/apache/hive/beeline/TestBeeLineWithArgs.java index e8ebf251297d..08626809fb8b 100644 --- a/itests/hive-unit/src/test/java/org/apache/hive/beeline/TestBeeLineWithArgs.java +++ b/itests/hive-unit/src/test/java/org/apache/hive/beeline/TestBeeLineWithArgs.java @@ -92,7 +92,7 @@ public static void preTests() throws Exception { HiveConf hiveConf = UtilsForTest.getHiveOnTezConfFromDir("../../data/conf/tez/"); hiveConf.setVar(HiveConf.ConfVars.HIVE_LOCK_MANAGER, "org.apache.hadoop.hive.ql.lockmgr.EmbeddedLockManager"); - hiveConf.setBoolVar(HiveConf.ConfVars.HIVEOPTIMIZEMETADATAQUERIES, false); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_OPTIMIZE_METADATA_QUERIES, false); hiveConf.set(ConfVars.HIVE_SERVER2_LOGGING_OPERATION_LEVEL.varname, "verbose"); miniHS2 = new MiniHS2(hiveConf, MiniClusterType.TEZ); diff --git a/itests/hive-unit/src/test/java/org/apache/hive/beeline/TestHplSqlViaBeeLine.java b/itests/hive-unit/src/test/java/org/apache/hive/beeline/TestHplSqlViaBeeLine.java index 31c29baccf02..2e3a3c945a80 100644 --- a/itests/hive-unit/src/test/java/org/apache/hive/beeline/TestHplSqlViaBeeLine.java +++ b/itests/hive-unit/src/test/java/org/apache/hive/beeline/TestHplSqlViaBeeLine.java @@ -54,7 +54,7 @@ public static void preTests() throws Exception { hiveConf.setVar(HiveConf.ConfVars.HIVE_LOCK_MANAGER, "org.apache.hadoop.hive.ql.lockmgr.EmbeddedLockManager"); hiveConf.setIntVar(HiveConf.ConfVars.HIVE_SERVER2_THRIFT_RESULTSET_DEFAULT_FETCH_SIZE, 10); - hiveConf.setBoolVar(HiveConf.ConfVars.HIVEOPTIMIZEMETADATAQUERIES, false); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_OPTIMIZE_METADATA_QUERIES, false); hiveConf.set(HiveConf.ConfVars.HIVE_SERVER2_LOGGING_OPERATION_LEVEL.varname, "verbose"); miniHS2 = new MiniHS2(hiveConf, MiniHS2.MiniClusterType.TEZ); Map confOverlay = new HashMap<>(); diff --git a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java index d6f285498a1e..821f504bac22 100644 --- a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java +++ b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java @@ -205,12 +205,12 @@ public static void setUpBeforeClass() throws Exception { // Create test database and base tables once for all the test Class.forName(driverName); System.setProperty(ConfVars.HIVE_SERVER2_LOGGING_OPERATION_LEVEL.varname, "verbose"); - System.setProperty(ConfVars.HIVEMAPREDMODE.varname, "nonstrict"); + System.setProperty(ConfVars.HIVE_MAPRED_MODE.varname, "nonstrict"); System.setProperty(ConfVars.HIVE_AUTHORIZATION_MANAGER.varname, "org.apache.hadoop.hive.ql.security.authorization.DefaultHiveAuthorizationProvider"); System.setProperty(ConfVars.HIVE_SERVER2_PARALLEL_OPS_IN_SESSION.varname, "false"); - System.setProperty(ConfVars.REPLCMENABLED.varname, "true"); - System.setProperty(ConfVars.REPLCMDIR.varname, "cmroot"); + System.setProperty(ConfVars.REPL_CM_ENABLED.varname, "true"); + System.setProperty(ConfVars.REPL_CM_DIR.varname, "cmroot"); con = getConnection(defaultDbName + ";create=true"); Statement stmt = con.createStatement(); assertNotNull("Statement is null", stmt); @@ -2100,7 +2100,7 @@ public void testSetCommand() throws SQLException { String rline = res.getString(1); assertFalse( "set output must not contain hidden variables such as the metastore password:" + rline, - rline.contains(HiveConf.ConfVars.METASTOREPWD.varname) + rline.contains(HiveConf.ConfVars.METASTORE_PWD.varname) && !(rline.contains(HiveConf.ConfVars.HIVE_CONF_HIDDEN_LIST.varname))); // the only conf allowed to have the metastore pwd keyname is the hidden list configuration // value @@ -2347,7 +2347,7 @@ public void testFetchFirstSetCmds() throws Exception { */ @Test public void testFetchFirstDfsCmds() throws Exception { - String wareHouseDir = conf.get(HiveConf.ConfVars.METASTOREWAREHOUSE.varname); + String wareHouseDir = conf.get(HiveConf.ConfVars.METASTORE_WAREHOUSE.varname); execFetchFirst("dfs -ls " + wareHouseDir, DfsProcessor.DFS_RESULT_HEADER, false); } diff --git a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniHS2.java b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniHS2.java index 2436f5f9d8f1..fcf666da13df 100644 --- a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniHS2.java +++ b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniHS2.java @@ -67,7 +67,6 @@ import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; -import org.apache.hadoop.hive.metastore.ObjectStore; import org.apache.hadoop.hive.metastore.PersistenceManagerProvider; import org.apache.hadoop.hive.ql.QueryPlan; import org.apache.hadoop.hive.ql.exec.FunctionRegistry; @@ -85,7 +84,6 @@ import org.junit.AfterClass; import org.junit.Assert; import org.junit.BeforeClass; -import org.junit.Ignore; import org.junit.Test; import static org.apache.hadoop.hive.common.repl.ReplConst.SOURCE_OF_REPLICATION; @@ -219,7 +217,7 @@ private static void startMiniHS2(HiveConf conf, boolean httpMode) throws Excepti conf.setBoolVar(ConfVars.HIVE_SERVER2_LOGGING_OPERATION_ENABLED, false); conf.setBoolVar(ConfVars.HIVESTATSCOLAUTOGATHER, false); // store post-exec hooks calls so we can look at them later - conf.setVar(ConfVars.POSTEXECHOOKS, ReadableHook.class.getName() + "," + + conf.setVar(ConfVars.POST_EXEC_HOOKS, ReadableHook.class.getName() + "," + LineageLogger.class.getName()); MiniHS2.Builder builder = new MiniHS2.Builder().withConf(conf).cleanupLocalDirOnStartup(false); if (httpMode) { @@ -807,15 +805,15 @@ public void testSessionScratchDirs() throws Exception { // FS FileSystem fs = miniHS2.getLocalFS(); FsPermission expectedFSPermission = new FsPermission(HiveConf.getVar(conf, - HiveConf.ConfVars.SCRATCHDIRPERMISSION)); + HiveConf.ConfVars.SCRATCH_DIR_PERMISSION)); // Verify scratch dir paths and permission // HDFS scratch dir - scratchDirPath = new Path(HiveConf.getVar(conf, HiveConf.ConfVars.SCRATCHDIR) + "/" + userName); + scratchDirPath = new Path(HiveConf.getVar(conf, HiveConf.ConfVars.SCRATCH_DIR) + "/" + userName); verifyScratchDir(conf, fs, scratchDirPath, expectedFSPermission, userName, false); // Local scratch dir - scratchDirPath = new Path(HiveConf.getVar(conf, HiveConf.ConfVars.LOCALSCRATCHDIR)); + scratchDirPath = new Path(HiveConf.getVar(conf, HiveConf.ConfVars.LOCAL_SCRATCH_DIR)); verifyScratchDir(conf, fs, scratchDirPath, expectedFSPermission, userName, true); // Downloaded resources dir @@ -831,11 +829,11 @@ public void testSessionScratchDirs() throws Exception { // Verify scratch dir paths and permission // HDFS scratch dir - scratchDirPath = new Path(HiveConf.getVar(conf, HiveConf.ConfVars.SCRATCHDIR) + "/" + userName); + scratchDirPath = new Path(HiveConf.getVar(conf, HiveConf.ConfVars.SCRATCH_DIR) + "/" + userName); verifyScratchDir(conf, fs, scratchDirPath, expectedFSPermission, userName, false); // Local scratch dir - scratchDirPath = new Path(HiveConf.getVar(conf, HiveConf.ConfVars.LOCALSCRATCHDIR)); + scratchDirPath = new Path(HiveConf.getVar(conf, HiveConf.ConfVars.LOCAL_SCRATCH_DIR)); verifyScratchDir(conf, fs, scratchDirPath, expectedFSPermission, userName, true); // Downloaded resources dir @@ -982,7 +980,7 @@ public void testRootScratchDir() throws Exception { FsPermission expectedFSPermission = new FsPermission((short)00733); // Verify scratch dir paths and permission // HDFS scratch dir - scratchDirPath = new Path(HiveConf.getVar(conf, HiveConf.ConfVars.SCRATCHDIR)); + scratchDirPath = new Path(HiveConf.getVar(conf, HiveConf.ConfVars.SCRATCH_DIR)); verifyScratchDir(conf, fs, scratchDirPath, expectedFSPermission, userName, false); conn.close(); @@ -992,7 +990,7 @@ public void testRootScratchDir() throws Exception { conf.set("hive.exec.scratchdir", tmpDir + "/level1/level2/level3"); startMiniHS2(conf); conn = getConnection(miniHS2.getJdbcURL(testDbName), userName, "password"); - scratchDirPath = new Path(HiveConf.getVar(conf, HiveConf.ConfVars.SCRATCHDIR)); + scratchDirPath = new Path(HiveConf.getVar(conf, HiveConf.ConfVars.SCRATCH_DIR)); verifyScratchDir(conf, fs, scratchDirPath, expectedFSPermission, userName, false); conn.close(); diff --git a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/cbo_rp_TestJdbcDriver2.java b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/cbo_rp_TestJdbcDriver2.java index b43a1b7586de..1942dc194f13 100644 --- a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/cbo_rp_TestJdbcDriver2.java +++ b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/cbo_rp_TestJdbcDriver2.java @@ -58,7 +58,6 @@ import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.metastore.TableType; import org.apache.hadoop.hive.ql.exec.UDF; -import org.apache.hadoop.hive.ql.parse.SemanticAnalyzer; import org.apache.hadoop.hive.ql.processors.DfsProcessor; import org.apache.hive.common.util.HiveVersionInfo; import org.apache.hive.jdbc.Utils.JdbcConnectionParams; @@ -2059,7 +2058,7 @@ public void testFetchFirstSetCmds() throws Exception { */ @Test public void testFetchFirstDfsCmds() throws Exception { - String wareHouseDir = conf.get(HiveConf.ConfVars.METASTOREWAREHOUSE.varname); + String wareHouseDir = conf.get(HiveConf.ConfVars.METASTORE_WAREHOUSE.varname); execFetchFirst("dfs -ls " + wareHouseDir, DfsProcessor.DFS_RESULT_HEADER, false); } diff --git a/itests/hive-unit/src/test/java/org/apache/hive/service/TestDFSErrorHandling.java b/itests/hive-unit/src/test/java/org/apache/hive/service/TestDFSErrorHandling.java index f088bc651c9f..6321e049f5cb 100644 --- a/itests/hive-unit/src/test/java/org/apache/hive/service/TestDFSErrorHandling.java +++ b/itests/hive-unit/src/test/java/org/apache/hive/service/TestDFSErrorHandling.java @@ -29,10 +29,8 @@ import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; -import org.apache.hadoop.hive.ql.ErrorMsg; import org.apache.hadoop.hive.shims.HadoopShims.MiniDFSShim; import org.apache.hive.jdbc.miniHS2.MiniHS2; -import org.apache.hive.service.cli.HiveSQLException; import org.junit.AfterClass; import org.junit.Assert; import org.junit.BeforeClass; @@ -98,7 +96,7 @@ public void testAccessDenied() throws Exception { miniHS2 != null && miniHS2.isStarted()); Class.forName(MiniHS2.getJdbcDriverName()); - Path scratchDir = new Path(HiveConf.getVar(hiveConf, HiveConf.ConfVars.SCRATCHDIR)); + Path scratchDir = new Path(HiveConf.getVar(hiveConf, HiveConf.ConfVars.SCRATCH_DIR)); MiniDFSShim dfs = miniHS2.getDfs(); FileSystem fs = dfs.getFileSystem(); diff --git a/itests/hive-unit/src/test/java/org/apache/hive/service/cli/TestEmbeddedThriftBinaryCLIService.java b/itests/hive-unit/src/test/java/org/apache/hive/service/cli/TestEmbeddedThriftBinaryCLIService.java index 819838d09120..850fa243d9da 100644 --- a/itests/hive-unit/src/test/java/org/apache/hive/service/cli/TestEmbeddedThriftBinaryCLIService.java +++ b/itests/hive-unit/src/test/java/org/apache/hive/service/cli/TestEmbeddedThriftBinaryCLIService.java @@ -39,7 +39,7 @@ public static void setUpBeforeClass() throws Exception { service = new EmbeddedThriftBinaryCLIService(); HiveConf conf = new HiveConf(); conf.setBoolean("datanucleus.schema.autoCreateTables", true); - conf.setVar(HiveConf.ConfVars.HIVEMAPREDMODE, "nonstrict"); + conf.setVar(HiveConf.ConfVars.HIVE_MAPRED_MODE, "nonstrict"); UtilsForTest.expandHiveConfParams(conf); service.init(conf); client = new ThriftCLIServiceClient(service); diff --git a/itests/hive-unit/src/test/java/org/apache/hive/service/server/TestHS2ClearDanglingScratchDir.java b/itests/hive-unit/src/test/java/org/apache/hive/service/server/TestHS2ClearDanglingScratchDir.java index 1adfdebdf2be..ff36e24b2d13 100644 --- a/itests/hive-unit/src/test/java/org/apache/hive/service/server/TestHS2ClearDanglingScratchDir.java +++ b/itests/hive-unit/src/test/java/org/apache/hive/service/server/TestHS2ClearDanglingScratchDir.java @@ -27,7 +27,6 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hadoop.hive.shims.Utils; -import org.apache.hadoop.util.Shell; import org.junit.Assert; import org.junit.Test; @@ -40,13 +39,13 @@ public void testScratchDirCleared() throws Exception { conf.set(HiveConf.ConfVars.HIVE_SCRATCH_DIR_LOCK.toString(), "true"); conf.set(HiveConf.ConfVars.HIVE_SERVER2_CLEAR_DANGLING_SCRATCH_DIR.toString(), "true"); - Path scratchDir = new Path(HiveConf.getVar(conf, HiveConf.ConfVars.SCRATCHDIR)); + Path scratchDir = new Path(HiveConf.getVar(conf, HiveConf.ConfVars.SCRATCH_DIR)); m_dfs.getFileSystem().mkdirs(scratchDir); m_dfs.getFileSystem().setPermission(scratchDir, new FsPermission("777")); // Fake two live session SessionState.start(conf); - conf.setVar(HiveConf.ConfVars.HIVESESSIONID, UUID.randomUUID().toString()); + conf.setVar(HiveConf.ConfVars.HIVE_SESSION_ID, UUID.randomUUID().toString()); SessionState.start(conf); // Fake dead session diff --git a/itests/hive-unit/src/test/java/org/apache/hive/service/server/TestHS2SessionHive.java b/itests/hive-unit/src/test/java/org/apache/hive/service/server/TestHS2SessionHive.java index 15cfb190d5e0..79ddd77556cd 100644 --- a/itests/hive-unit/src/test/java/org/apache/hive/service/server/TestHS2SessionHive.java +++ b/itests/hive-unit/src/test/java/org/apache/hive/service/server/TestHS2SessionHive.java @@ -186,7 +186,7 @@ public static void setupBeforeClass() throws Exception { throw e; } - miniHS2.getHiveConf().setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + miniHS2.getHmsPort()); + miniHS2.getHiveConf().setVar(HiveConf.ConfVars.METASTORE_URIS, "thrift://localhost:" + miniHS2.getHmsPort()); try (Connection conn = DriverManager. getConnection(miniHS2.getJdbcURL(), System.getProperty("user.name"), ""); diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestMetaStoreHandler.java b/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestMetaStoreHandler.java index 184e56f158c1..e8827bda9007 100644 --- a/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestMetaStoreHandler.java +++ b/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestMetaStoreHandler.java @@ -60,7 +60,7 @@ public boolean isDerby() { } public QTestMetaStoreHandler setMetaStoreConfiguration(HiveConf conf) { - conf.setVar(ConfVars.METASTOREDBTYPE, getDbTypeConfString()); + conf.setVar(ConfVars.METASTORE_DB_TYPE, getDbTypeConfString()); MetastoreConf.setVar(conf, MetastoreConf.ConfVars.CONNECT_URL_KEY, rule.getJdbcUrl()); MetastoreConf.setVar(conf, MetastoreConf.ConfVars.CONNECTION_DRIVER, rule.getJdbcDriver()); diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestMiniClusters.java b/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestMiniClusters.java index 38530f047e3f..e94a842d7636 100644 --- a/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestMiniClusters.java +++ b/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestMiniClusters.java @@ -420,7 +420,7 @@ private void createRemoteDirs(HiveConf conf) { // Create remote dirs once. if (getMr() != null) { assert fs != null; - Path warehousePath = fs.makeQualified(new Path(conf.getVar(ConfVars.METASTOREWAREHOUSE))); + Path warehousePath = fs.makeQualified(new Path(conf.getVar(ConfVars.METASTORE_WAREHOUSE))); assert warehousePath != null; Path hiveJarPath = fs.makeQualified(new Path(conf.getVar(ConfVars.HIVE_JAR_DIRECTORY))); assert hiveJarPath != null; @@ -570,13 +570,13 @@ private void setFsRelatedProperties(HiveConf conf, boolean isLocalFs, FileSystem conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, fsUriString); // Remote dirs - conf.setVar(ConfVars.METASTOREWAREHOUSE, warehousePath.toString()); + conf.setVar(ConfVars.METASTORE_WAREHOUSE, warehousePath.toString()); conf.setVar(ConfVars.HIVE_JAR_DIRECTORY, jarPath.toString()); conf.setVar(ConfVars.HIVE_USER_INSTALL_DIR, userInstallPath.toString()); - // ConfVars.SCRATCHDIR - {test.tmp.dir}/scratchdir + // ConfVars.SCRATCH_DIR - {test.tmp.dir}/scratchdir // Local dirs - // ConfVars.LOCALSCRATCHDIR - {test.tmp.dir}/localscratchdir + // ConfVars.LOCAL_SCRATCH_DIR - {test.tmp.dir}/localscratchdir // TODO Make sure to cleanup created dirs. } diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java b/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java index e9c86372bc98..180c6e70d6b6 100644 --- a/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java +++ b/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java @@ -22,7 +22,6 @@ import java.io.BufferedOutputStream; import java.io.File; -import java.util.LinkedHashSet; import java.io.FileNotFoundException; import java.io.FileOutputStream; import java.io.FileWriter; @@ -570,7 +569,7 @@ public void postInit() throws Exception { sem = new SemanticAnalyzer(new QueryState.Builder().withHiveConf(conf).build()); - testWarehouse = conf.getVar(HiveConf.ConfVars.METASTOREWAREHOUSE); + testWarehouse = conf.getVar(HiveConf.ConfVars.METASTORE_WAREHOUSE); db = Hive.get(conf); pd = new ParseDriver(); @@ -750,7 +749,7 @@ private CommandProcessorResponse executeClientInternal(String commands) throws C * if you want to use another hive cmd after the failure to sanity check the state of the system. */ private boolean ignoreErrors() { - return conf.getBoolVar(HiveConf.ConfVars.CLIIGNOREERRORS); + return conf.getBoolVar(HiveConf.ConfVars.CLI_IGNORE_ERRORS); } boolean isHiveCommand(String command) { @@ -775,7 +774,7 @@ private CommandProcessorResponse executeTestCommand(String command) throws Comma //replace ${hiveconf:hive.metastore.warehouse.dir} with actual dir if existed. //we only want the absolute path, so remove the header, such as hdfs://localhost:57145 String wareHouseDir = - SessionState.get().getConf().getVar(ConfVars.METASTOREWAREHOUSE).replaceAll("^[a-zA-Z]+://.*?:\\d+", ""); + SessionState.get().getConf().getVar(ConfVars.METASTORE_WAREHOUSE).replaceAll("^[a-zA-Z]+://.*?:\\d+", ""); commandArgs = commandArgs.replaceAll("\\$\\{hiveconf:hive\\.metastore\\.warehouse\\.dir\\}", wareHouseDir); if (SessionState.get() != null) { diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/ql/hooks/MapJoinCounterHook.java b/itests/util/src/main/java/org/apache/hadoop/hive/ql/hooks/MapJoinCounterHook.java index 43cb8c9615cf..6e5262213444 100644 --- a/itests/util/src/main/java/org/apache/hadoop/hive/ql/hooks/MapJoinCounterHook.java +++ b/itests/util/src/main/java/org/apache/hadoop/hive/ql/hooks/MapJoinCounterHook.java @@ -30,7 +30,7 @@ public class MapJoinCounterHook implements ExecuteWithHookContext { public void run(HookContext hookContext) { HiveConf conf = hookContext.getConf(); - boolean enableConvert = HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVECONVERTJOIN); + boolean enableConvert = HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_CONVERT_JOIN); if (!enableConvert) { return; } diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/ql/stats/DummyStatsAggregator.java b/itests/util/src/main/java/org/apache/hadoop/hive/ql/stats/DummyStatsAggregator.java index a4ac92b62570..e705c12f8db7 100644 --- a/itests/util/src/main/java/org/apache/hadoop/hive/ql/stats/DummyStatsAggregator.java +++ b/itests/util/src/main/java/org/apache/hadoop/hive/ql/stats/DummyStatsAggregator.java @@ -34,7 +34,7 @@ public class DummyStatsAggregator implements StatsAggregator { // denotes the method which needs to throw an error. @Override public boolean connect(StatsCollectionContext scc) { - errorMethod = HiveConf.getVar(scc.getHiveConf(), HiveConf.ConfVars.HIVETESTMODEDUMMYSTATAGGR); + errorMethod = HiveConf.getVar(scc.getHiveConf(), HiveConf.ConfVars.HIVE_TEST_MODE_DUMMY_STAT_AGGR); if (errorMethod.equalsIgnoreCase("connect")) { return false; } diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/ql/stats/DummyStatsPublisher.java b/itests/util/src/main/java/org/apache/hadoop/hive/ql/stats/DummyStatsPublisher.java index cc80fc2b3e6a..41e475fd7a17 100644 --- a/itests/util/src/main/java/org/apache/hadoop/hive/ql/stats/DummyStatsPublisher.java +++ b/itests/util/src/main/java/org/apache/hadoop/hive/ql/stats/DummyStatsPublisher.java @@ -37,7 +37,7 @@ public class DummyStatsPublisher implements StatsPublisher { // denotes the method which needs to throw an error. @Override public boolean init(StatsCollectionContext context) { - errorMethod = HiveConf.getVar(context.getHiveConf(), HiveConf.ConfVars.HIVETESTMODEDUMMYSTATPUB); + errorMethod = HiveConf.getVar(context.getHiveConf(), HiveConf.ConfVars.HIVE_TEST_MODE_DUMMY_STAT_PUB); if (errorMethod.equalsIgnoreCase("init")) { return false; } @@ -47,7 +47,7 @@ public boolean init(StatsCollectionContext context) { @Override public boolean connect(StatsCollectionContext context) { - errorMethod = HiveConf.getVar(context.getHiveConf(), HiveConf.ConfVars.HIVETESTMODEDUMMYSTATPUB); + errorMethod = HiveConf.getVar(context.getHiveConf(), HiveConf.ConfVars.HIVE_TEST_MODE_DUMMY_STAT_PUB); if (errorMethod.equalsIgnoreCase("connect")) { return false; } diff --git a/itests/util/src/main/java/org/apache/hive/jdbc/miniHS2/AbstractHiveService.java b/itests/util/src/main/java/org/apache/hive/jdbc/miniHS2/AbstractHiveService.java index d21b76418607..069d58c6a0b0 100644 --- a/itests/util/src/main/java/org/apache/hive/jdbc/miniHS2/AbstractHiveService.java +++ b/itests/util/src/main/java/org/apache/hive/jdbc/miniHS2/AbstractHiveService.java @@ -90,13 +90,13 @@ public void clearProperties() { * @return */ public Path getWareHouseDir() { - return new Path(hiveConf.getVar(ConfVars.METASTOREWAREHOUSE)); + return new Path(hiveConf.getVar(ConfVars.METASTORE_WAREHOUSE)); } public void setWareHouseDir(String wareHouseURI) { verifyNotStarted(); - System.setProperty(ConfVars.METASTOREWAREHOUSE.varname, wareHouseURI); - hiveConf.setVar(ConfVars.METASTOREWAREHOUSE, wareHouseURI); + System.setProperty(ConfVars.METASTORE_WAREHOUSE.varname, wareHouseURI); + hiveConf.setVar(ConfVars.METASTORE_WAREHOUSE, wareHouseURI); } /** diff --git a/itests/util/src/main/java/org/apache/hive/jdbc/miniHS2/MiniHS2.java b/itests/util/src/main/java/org/apache/hive/jdbc/miniHS2/MiniHS2.java index 9e95d3b2db92..8076a0718ed7 100644 --- a/itests/util/src/main/java/org/apache/hive/jdbc/miniHS2/MiniHS2.java +++ b/itests/util/src/main/java/org/apache/hive/jdbc/miniHS2/MiniHS2.java @@ -348,12 +348,12 @@ private MiniHS2(HiveConf hiveConf, MiniClusterType miniClusterType, boolean useM Path scratchDir = new Path(baseFsDir, "scratch"); // Create root scratchdir with write all, so that user impersonation has no issues. Utilities.createDirsWithPermission(hiveConf, scratchDir, WRITE_ALL_PERM, true); - System.setProperty(HiveConf.ConfVars.SCRATCHDIR.varname, scratchDir.toString()); - hiveConf.setVar(ConfVars.SCRATCHDIR, scratchDir.toString()); + System.setProperty(HiveConf.ConfVars.SCRATCH_DIR.varname, scratchDir.toString()); + hiveConf.setVar(ConfVars.SCRATCH_DIR, scratchDir.toString()); String localScratchDir = baseDir.getPath() + File.separator + "scratch"; - System.setProperty(HiveConf.ConfVars.LOCALSCRATCHDIR.varname, localScratchDir); - hiveConf.setVar(ConfVars.LOCALSCRATCHDIR, localScratchDir); + System.setProperty(HiveConf.ConfVars.LOCAL_SCRATCH_DIR.varname, localScratchDir); + hiveConf.setVar(ConfVars.LOCAL_SCRATCH_DIR, localScratchDir); } public MiniHS2(HiveConf hiveConf) throws Exception { diff --git a/jdbc-handler/src/main/java/org/apache/hive/storage/jdbc/conf/JdbcStorageConfigManager.java b/jdbc-handler/src/main/java/org/apache/hive/storage/jdbc/conf/JdbcStorageConfigManager.java index f7f3dd65ddf4..94cd398a9d41 100644 --- a/jdbc-handler/src/main/java/org/apache/hive/storage/jdbc/conf/JdbcStorageConfigManager.java +++ b/jdbc-handler/src/main/java/org/apache/hive/storage/jdbc/conf/JdbcStorageConfigManager.java @@ -192,11 +192,11 @@ private static void resolveMetadata(Properties props) throws HiveException, IOEx } private static String getMetastoreDatabaseType(HiveConf conf) { - return conf.getVar(HiveConf.ConfVars.METASTOREDBTYPE); + return conf.getVar(HiveConf.ConfVars.METASTORE_DB_TYPE); } private static String getMetastoreConnectionURL(HiveConf conf) { - return conf.getVar(HiveConf.ConfVars.METASTORECONNECTURLKEY); + return conf.getVar(HiveConf.ConfVars.METASTORE_CONNECT_URL_KEY); } private static String getMetastoreDriver(HiveConf conf) { @@ -209,6 +209,6 @@ private static String getMetastoreJdbcUser(HiveConf conf) { private static String getMetastoreJdbcPasswd(HiveConf conf) throws IOException { return ShimLoader.getHadoopShims().getPassword(conf, - HiveConf.ConfVars.METASTOREPWD.varname); + HiveConf.ConfVars.METASTORE_PWD.varname); } } diff --git a/kafka-handler/src/java/org/apache/hadoop/hive/kafka/KafkaOutputFormat.java b/kafka-handler/src/java/org/apache/hadoop/hive/kafka/KafkaOutputFormat.java index 1ddda8e6992c..19048c1da426 100644 --- a/kafka-handler/src/java/org/apache/hadoop/hive/kafka/KafkaOutputFormat.java +++ b/kafka-handler/src/java/org/apache/hadoop/hive/kafka/KafkaOutputFormat.java @@ -67,7 +67,7 @@ public class KafkaOutputFormat implements HiveOutputFormat(); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/DriverTxnHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/DriverTxnHandler.java index 93b153886c99..89bff9678f1c 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/DriverTxnHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/DriverTxnHandler.java @@ -385,7 +385,7 @@ private ValidTxnWriteIdList getTxnWriteIds(String txnString) throws LockExceptio private void setValidWriteIds(ValidTxnWriteIdList txnWriteIds) { driverContext.getConf().set(ValidTxnWriteIdList.VALID_TABLES_WRITEIDS_KEY, txnWriteIds.toString()); if (driverContext.getPlan().getFetchTask() != null) { - // This is needed for {@link HiveConf.ConfVars.HIVEFETCHTASKCONVERSION} optimization which initializes JobConf + // This is needed for {@link HiveConf.ConfVars.HIVE_FETCH_TASK_CONVERSION} optimization which initializes JobConf // in FetchOperator before recordValidTxns() but this has to be done after locks are acquired to avoid race // conditions in ACID. This case is supported only for single source query. Operator source = driverContext.getPlan().getFetchTask().getWork().getSource(); @@ -602,7 +602,7 @@ synchronized void endTransactionAndCleanup(boolean commit, HiveTxnManager txnMan private void commitOrRollback(boolean commit, HiveTxnManager txnManager) throws LockException { if (commit) { if (driverContext.getConf().getBoolVar(ConfVars.HIVE_IN_TEST) && - driverContext.getConf().getBoolVar(ConfVars.HIVETESTMODEROLLBACKTXN)) { + driverContext.getConf().getBoolVar(ConfVars.HIVE_TEST_MODE_ROLLBACK_TXN)) { txnManager.rollbackTxn(); } else { txnManager.commitTxn(); //both commit & rollback clear ALL locks for this transaction diff --git a/ql/src/java/org/apache/hadoop/hive/ql/Executor.java b/ql/src/java/org/apache/hadoop/hive/ql/Executor.java index 1e2140ed8025..708e3870efa4 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/Executor.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/Executor.java @@ -297,7 +297,7 @@ private void handleFinished() throws Exception { } private String getJobName() { - int maxlen = driverContext.getConf().getIntVar(HiveConf.ConfVars.HIVEJOBNAMELENGTH); + int maxlen = driverContext.getConf().getIntVar(HiveConf.ConfVars.HIVE_JOBNAME_LENGTH); return Utilities.abbreviate(driverContext.getQueryString(), maxlen - 6); } @@ -322,7 +322,7 @@ private int getJobCount() { private void launchTasks(boolean noName, int jobCount, String jobName) throws HiveException { // Launch upto maxthreads tasks Task task; - int maxthreads = HiveConf.getIntVar(driverContext.getConf(), HiveConf.ConfVars.EXECPARALLETHREADNUMBER); + int maxthreads = HiveConf.getIntVar(driverContext.getConf(), HiveConf.ConfVars.EXEC_PARALLEL_THREAD_NUMBER); while ((task = taskQueue.getRunnable(maxthreads)) != null) { TaskRunner runner = launchTask(task, noName, jobName, jobCount); if (!runner.isRunning()) { @@ -346,7 +346,7 @@ private TaskRunner launchTask(Task task, boolean noName, String jobName, int TaskRunner taskRun = new TaskRunner(task, taskQueue); taskQueue.launching(taskRun); - if (HiveConf.getBoolVar(task.getConf(), HiveConf.ConfVars.EXECPARALLEL) && task.canExecuteInParallel()) { + if (HiveConf.getBoolVar(task.getConf(), HiveConf.ConfVars.EXEC_PARALLEL) && task.canExecuteInParallel()) { LOG.info("Starting task [" + task + "] in parallel"); taskRun.start(); } else { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/QueryState.java b/ql/src/java/org/apache/hadoop/hive/ql/QueryState.java index b39037cd65e1..10025dbd9026 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/QueryState.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/QueryState.java @@ -98,7 +98,7 @@ private QueryState(HiveConf conf) { // Get the query id stored in query specific config. public String getQueryId() { - return queryConf.getVar(HiveConf.ConfVars.HIVEQUERYID); + return queryConf.getVar(HiveConf.ConfVars.HIVE_QUERY_ID); } public String getQueryString() { @@ -172,15 +172,15 @@ public void setNumModifiedRows(long numModifiedRows) { } public String getQueryTag() { - return HiveConf.getVar(this.queryConf, HiveConf.ConfVars.HIVEQUERYTAG); + return HiveConf.getVar(this.queryConf, HiveConf.ConfVars.HIVE_QUERY_TAG); } public void setQueryTag(String queryTag) { - HiveConf.setVar(this.queryConf, HiveConf.ConfVars.HIVEQUERYTAG, queryTag); + HiveConf.setVar(this.queryConf, HiveConf.ConfVars.HIVE_QUERY_TAG, queryTag); } public static void setApplicationTag(HiveConf queryConf, String queryTag) { - String jobTag = HiveConf.getVar(queryConf, HiveConf.ConfVars.HIVEQUERYTAG); + String jobTag = HiveConf.getVar(queryConf, HiveConf.ConfVars.HIVE_QUERY_TAG); if (jobTag == null || jobTag.isEmpty()) { jobTag = queryTag; } else { @@ -327,13 +327,13 @@ public QueryState build() { // Generate the new queryId if needed if (generateNewQueryId) { String queryId = QueryPlan.makeQueryId(); - queryConf.setVar(HiveConf.ConfVars.HIVEQUERYID, queryId); + queryConf.setVar(HiveConf.ConfVars.HIVE_QUERY_ID, queryId); setApplicationTag(queryConf, queryId); // FIXME: druid storage handler relies on query.id to maintain some staging directories // expose queryid to session level if (hiveConf != null) { - hiveConf.setVar(HiveConf.ConfVars.HIVEQUERYID, queryId); + hiveConf.setVar(HiveConf.ConfVars.HIVE_QUERY_ID, queryId); } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/database/lock/LockDatabaseAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/database/lock/LockDatabaseAnalyzer.java index fda2282cbfa5..cd0392dd066c 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/database/lock/LockDatabaseAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/database/lock/LockDatabaseAnalyzer.java @@ -52,7 +52,7 @@ public void analyzeInternal(ASTNode root) throws SemanticException { outputs.add(new WriteEntity(getDatabase(databaseName), WriteType.DDL_NO_LOCK)); LockDatabaseDesc desc = - new LockDatabaseDesc(databaseName, mode, HiveConf.getVar(conf, ConfVars.HIVEQUERYID), ctx.getCmd()); + new LockDatabaseDesc(databaseName, mode, HiveConf.getVar(conf, ConfVars.HIVE_QUERY_ID), ctx.getCmd()); rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc))); ctx.setNeedLockMgr(true); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/AbstractBaseAlterTableAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/AbstractBaseAlterTableAnalyzer.java index 17f9fec4d177..9e7688a5b29a 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/AbstractBaseAlterTableAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/AbstractBaseAlterTableAnalyzer.java @@ -105,7 +105,7 @@ protected void addInputsOutputsAlterTable(TableName tableName, Map partitionSpec) throws SemanticException { Set reservedPartitionValues = new HashSet<>(); // Partition can't have this name - reservedPartitionValues.add(HiveConf.getVar(conf, ConfVars.DEFAULTPARTITIONNAME)); + reservedPartitionValues.add(HiveConf.getVar(conf, ConfVars.DEFAULT_PARTITION_NAME)); reservedPartitionValues.add(HiveConf.getVar(conf, ConfVars.DEFAULT_ZOOKEEPER_PARTITION_NAME)); // Partition value can't end in this suffix reservedPartitionValues.add(HiveConf.getVar(conf, ConfVars.METASTORE_INT_ORIGINAL)); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/alter/AlterTableAlterPartitionOperation.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/alter/AlterTableAlterPartitionOperation.java index 0fd8785d1bc7..24deedf63b0c 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/alter/AlterTableAlterPartitionOperation.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/alter/AlterTableAlterPartitionOperation.java @@ -97,7 +97,7 @@ private void checkPartitionValues(Table tbl, int colIndex) throws HiveException try { List values = Warehouse.getPartValuesFromPartName(partName); String value = values.get(colIndex); - if (value.equals(context.getConf().getVar(HiveConf.ConfVars.DEFAULTPARTITIONNAME))) { + if (value.equals(context.getConf().getVar(HiveConf.ConfVars.DEFAULT_PARTITION_NAME))) { continue; } Object convertedValue = converter.convert(value); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/show/ShowPartitionAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/show/ShowPartitionAnalyzer.java index 40500f13ff4b..c0bffcebdb23 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/show/ShowPartitionAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/show/ShowPartitionAnalyzer.java @@ -119,7 +119,7 @@ ExprNodeDesc getShowPartitionsFilter(Table table, ASTNode command) throws Semant } showFilter = replaceDefaultPartNameAndCastType(target, colTypes, - HiveConf.getVar(conf, HiveConf.ConfVars.DEFAULTPARTITIONNAME)); + HiveConf.getVar(conf, HiveConf.ConfVars.DEFAULT_PARTITION_NAME)); } } return showFilter; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/archive/AbstractAlterTableArchiveAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/archive/AbstractAlterTableArchiveAnalyzer.java index 4b793eb545f0..b419c8332354 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/archive/AbstractAlterTableArchiveAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/archive/AbstractAlterTableArchiveAnalyzer.java @@ -51,7 +51,7 @@ public AbstractAlterTableArchiveAnalyzer(QueryState queryState) throws SemanticE // the AST tree protected void analyzeCommand(TableName tableName, Map partSpec, ASTNode command) throws SemanticException { - if (!conf.getBoolVar(HiveConf.ConfVars.HIVEARCHIVEENABLED)) { + if (!conf.getBoolVar(HiveConf.ConfVars.HIVE_ARCHIVE_ENABLED)) { throw new SemanticException(ErrorMsg.ARCHIVE_METHODS_DISABLED.getMsg()); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/archive/AlterTableArchiveOperation.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/archive/AlterTableArchiveOperation.java index f54ea6a6a727..e218e590a24e 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/archive/AlterTableArchiveOperation.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/archive/AlterTableArchiveOperation.java @@ -182,7 +182,7 @@ private Path createArchiveInTmpDir(Table table, PartSpecInfo partitionSpecInfo, context.getConsole().printInfo("Creating " + ARCHIVE_NAME + " for " + originalDir.toString() + " in " + tmpPath); context.getConsole().printInfo("Please wait... (this may take a while)"); try { - int maxJobNameLength = context.getConf().getIntVar(HiveConf.ConfVars.HIVEJOBNAMELENGTH); + int maxJobNameLength = context.getConf().getIntVar(HiveConf.ConfVars.HIVE_JOBNAME_LENGTH); String jobName = String.format("Archiving %s@%s", table.getTableName(), partitionSpecInfo.getName()); jobName = Utilities.abbreviate(jobName, maxJobNameLength - 6); context.getConf().set(MRJobConfig.JOB_NAME, jobName); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/BinaryRecordReader.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/BinaryRecordReader.java index f9db0175edae..c661d2de4f88 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/BinaryRecordReader.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/BinaryRecordReader.java @@ -40,7 +40,7 @@ public class BinaryRecordReader implements RecordReader { public void initialize(InputStream in, Configuration conf, Properties tbl) throws IOException { this.in = in; - maxRecordLength = HiveConf.getIntVar(conf, HiveConf.ConfVars.HIVEBINARYRECORDMAX); + maxRecordLength = HiveConf.getIntVar(conf, HiveConf.ConfVars.HIVE_BINARY_RECORD_MAX); } public Writable createRow() throws IOException { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonJoinOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonJoinOperator.java index e36aaa86cf78..b0b860e16d3f 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonJoinOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonJoinOperator.java @@ -239,7 +239,7 @@ protected void initializeOp(Configuration hconf) throws HiveException { this.hconf = hconf; heartbeatInterval = HiveConf.getIntVar(hconf, - HiveConf.ConfVars.HIVESENDHEARTBEAT); + HiveConf.ConfVars.HIVE_SEND_HEARTBEAT); countAfterReport = 0; totalSz = 0; @@ -296,9 +296,9 @@ protected void initializeOp(Configuration hconf) throws HiveException { dummyObjVectors = new RowContainer[numAliases]; joinEmitInterval = HiveConf.getIntVar(hconf, - HiveConf.ConfVars.HIVEJOINEMITINTERVAL); + HiveConf.ConfVars.HIVE_JOIN_EMIT_INTERVAL); joinCacheSize = HiveConf.getIntVar(hconf, - HiveConf.ConfVars.HIVEJOINCACHESIZE); + HiveConf.ConfVars.HIVE_JOIN_CACHE_SIZE); logEveryNRows = HiveConf.getLongVar(hconf, HiveConf.ConfVars.HIVE_LOG_N_RECORDS); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonMergeJoinOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonMergeJoinOperator.java index 0044a042bd7b..c677796de73a 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonMergeJoinOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonMergeJoinOperator.java @@ -136,13 +136,13 @@ public void initializeOp(Configuration hconf) throws HiveException { int bucketSize; - int oldVar = HiveConf.getIntVar(hconf, HiveConf.ConfVars.HIVEMAPJOINBUCKETCACHESIZE); + int oldVar = HiveConf.getIntVar(hconf, HiveConf.ConfVars.HIVE_MAPJOIN_BUCKET_CACHE_SIZE); shortcutUnmatchedRows = HiveConf.getBoolVar(hconf, HiveConf.ConfVars.HIVE_JOIN_SHORTCUT_UNMATCHED_ROWS); if (oldVar != 100) { bucketSize = oldVar; } else { - bucketSize = HiveConf.getIntVar(hconf, HiveConf.ConfVars.HIVESMBJOINCACHEROWS); + bucketSize = HiveConf.getIntVar(hconf, HiveConf.ConfVars.HIVE_SMBJOIN_CACHE_ROWS); } for (byte pos = 0; pos < order.length; pos++) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/ExplainTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/ExplainTask.java index 0cd931391a3c..ea6dff1becfc 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/ExplainTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/ExplainTask.java @@ -275,7 +275,7 @@ public JSONObject getJSONPlan(PrintStream out, List> tasks, Task fetc return getJSONPlan( out, tasks, fetchTask, jsonOutput, isExtended, appendTaskType, cboInfo, cboPlan, optimizedSQL, - conf.getVar(ConfVars.HIVESTAGEIDREARRANGE)); + conf.getVar(ConfVars.HIVE_STAGE_ID_REARRANGE)); } public JSONObject getJSONPlan(PrintStream out, List> tasks, Task fetchTask, diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java index f47cbffca663..d7903747ae9c 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java @@ -655,7 +655,7 @@ protected void initializeOp(Configuration hconf) throws HiveException { outputClass = serializer.getSerializedClass(); destTablePath = conf.getDestPath(); isInsertOverwrite = conf.getInsertOverwrite(); - counterGroup = HiveConf.getVar(hconf, HiveConf.ConfVars.HIVECOUNTERGROUP); + counterGroup = HiveConf.getVar(hconf, HiveConf.ConfVars.HIVE_COUNTER_GROUP); LOG.info("Using serializer : " + serializer + " and formatter : " + hiveOutputFormat + (isCompressed ? " with compression" : "")); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FilterOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/FilterOperator.java index a3af21a39a99..2fb17e068af5 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FilterOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FilterOperator.java @@ -60,9 +60,9 @@ protected void initializeOp(Configuration hconf) throws HiveException { super.initializeOp(hconf); try { heartbeatInterval = HiveConf.getIntVar(hconf, - HiveConf.ConfVars.HIVESENDHEARTBEAT); + HiveConf.ConfVars.HIVE_SEND_HEARTBEAT); conditionEvaluator = ExprNodeEvaluatorFactory.get(conf.getPredicate(), hconf); - if (HiveConf.getBoolVar(hconf, HiveConf.ConfVars.HIVEEXPREVALUATIONCACHE)) { + if (HiveConf.getBoolVar(hconf, HiveConf.ConfVars.HIVE_EXPR_EVALUATION_CACHE)) { conditionEvaluator = ExprNodeEvaluatorFactory.toCachedEval(conditionEvaluator); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/GroupByOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/GroupByOperator.java index f548afd52401..326c351c7382 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/GroupByOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/GroupByOperator.java @@ -206,7 +206,7 @@ protected void initializeOp(Configuration hconf) throws HiveException { numRowsHashTbl = 0; heartbeatInterval = HiveConf.getIntVar(hconf, - HiveConf.ConfVars.HIVESENDHEARTBEAT); + HiveConf.ConfVars.HIVE_SEND_HEARTBEAT); countAfterReport = 0; ObjectInspector rowInspector = inputObjInspectors[0]; @@ -367,7 +367,7 @@ protected void initializeOp(Configuration hconf) throws HiveException { keyPositionsSize = new ArrayList(); aggrPositions = new List[aggregations.length]; groupbyMapAggrInterval = HiveConf.getIntVar(hconf, - HiveConf.ConfVars.HIVEGROUPBYMAPINTERVAL); + HiveConf.ConfVars.HIVE_GROUPBY_MAP_INTERVAL); // compare every groupbyMapAggrInterval rows numRowsCompareHashAggr = groupbyMapAggrInterval; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/HashTableSinkOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/HashTableSinkOperator.java index ce7279c78ab6..fff5446daef8 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/HashTableSinkOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/HashTableSinkOperator.java @@ -122,7 +122,7 @@ public HashTableSinkOperator(CompilationOpContext ctx, MapJoinOperator mjop) { @SuppressWarnings("unchecked") protected void initializeOp(Configuration hconf) throws HiveException { super.initializeOp(hconf); - boolean isSilent = HiveConf.getBoolVar(hconf, HiveConf.ConfVars.HIVESESSIONSILENT); + boolean isSilent = HiveConf.getBoolVar(hconf, HiveConf.ConfVars.HIVE_SESSION_SILENT); console = new LogHelper(LOG, isSilent); memoryExhaustionChecker = MemoryExhaustionCheckerFactory.getChecker(console, hconf, conf); emptyRowContainer.addRow(emptyObjectArray); @@ -172,7 +172,7 @@ protected void initializeOp(Configuration hconf) throws HiveException { } mapJoinTables = new MapJoinPersistableTableContainer[tagLen]; mapJoinTableSerdes = new MapJoinTableContainerSerDe[tagLen]; - hashTableScale = HiveConf.getLongVar(hconf, HiveConf.ConfVars.HIVEHASHTABLESCALE); + hashTableScale = HiveConf.getLongVar(hconf, HiveConf.ConfVars.HIVE_HASHTABLE_SCALE); if (hashTableScale <= 0) { hashTableScale = 1; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/LimitOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/LimitOperator.java index 41bee8d60f37..e5b60e7781ff 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/LimitOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/LimitOperator.java @@ -68,7 +68,7 @@ protected void initializeOp(Configuration hconf) throws HiveException { currCount = 0; isMap = hconf.getBoolean("mapred.task.is.map", true); - String queryId = HiveConf.getVar(getConfiguration(), HiveConf.ConfVars.HIVEQUERYID); + String queryId = HiveConf.getVar(getConfiguration(), HiveConf.ConfVars.HIVE_QUERY_ID); this.runtimeCache = ObjectCacheFactory.getCache(getConfiguration(), queryId, false, true); // this can happen in HS2 while doing local fetch optimization, where LimitOperator is used @@ -168,14 +168,14 @@ public static String getLimitReachedKey(Configuration conf) { } public static boolean checkLimitReached(JobConf jobConf) { - String queryId = HiveConf.getVar(jobConf, HiveConf.ConfVars.HIVEQUERYID); + String queryId = HiveConf.getVar(jobConf, HiveConf.ConfVars.HIVE_QUERY_ID); String limitReachedKey = getLimitReachedKey(jobConf); return checkLimitReached(jobConf, queryId, limitReachedKey); } public static boolean checkLimitReachedForVertex(JobConf jobConf, String vertexName) { - String queryId = HiveConf.getVar(jobConf, HiveConf.ConfVars.HIVEQUERYID); + String queryId = HiveConf.getVar(jobConf, HiveConf.ConfVars.HIVE_QUERY_ID); return checkLimitReached(jobConf, queryId, vertexName + LIMIT_REACHED_KEY_SUFFIX); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java index 02352c400842..57b1786483ca 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java @@ -177,7 +177,7 @@ protected void initializeOp(Configuration hconf) throws HiveException { // On Tez only: The hash map might already be cached in the container we run // the task in. On MR: The cache is a no-op. - String queryId = HiveConf.getVar(hconf, HiveConf.ConfVars.HIVEQUERYID); + String queryId = HiveConf.getVar(hconf, HiveConf.ConfVars.HIVE_QUERY_ID); // The cacheKey may have already been defined in the MapJoin conf spec // as part of the Shared Work Optimization if it can be reused among // multiple mapjoin operators. In that case, we take that key from conf diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java index 9bc6aa1b41f7..2721977d6f9b 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java @@ -36,7 +36,6 @@ import org.apache.hadoop.hive.metastore.api.Order; import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; -import org.apache.hadoop.hive.ql.Context; import org.apache.hadoop.hive.ql.Context.Operation; import org.apache.hadoop.hive.ql.ErrorMsg; import org.apache.hadoop.hive.ql.ddl.DDLUtils; @@ -66,7 +65,6 @@ import org.apache.hadoop.hive.ql.optimizer.physical.BucketingSortingCtx.BucketCol; import org.apache.hadoop.hive.ql.optimizer.physical.BucketingSortingCtx.SortCol; import org.apache.hadoop.hive.ql.parse.ExplainConfiguration.AnalyzeState; -import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.plan.DynamicPartitionCtx; import org.apache.hadoop.hive.ql.plan.LoadFileDesc; import org.apache.hadoop.hive.ql.plan.LoadMultiFilesDesc; @@ -834,7 +832,7 @@ private void checkFileFormats(Hive db, LoadTableDesc tbd, Table table) } // handle file format check for table level - if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVECHECKFILEFORMAT)) { + if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_CHECK_FILEFORMAT)) { boolean flag = true; // work.checkFileFormat is set to true only for Load Task, so assumption here is // dynamic partition context is null diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/PTFPartition.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/PTFPartition.java index c52ca1877363..5b6248108257 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/PTFPartition.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/PTFPartition.java @@ -75,7 +75,7 @@ protected PTFPartition(Configuration cfg, this.inputOI = inputOI; this.outputOI = outputOI; if ( createElemContainer ) { - int containerNumRows = HiveConf.getIntVar(cfg, ConfVars.HIVEJOINCACHESIZE); + int containerNumRows = HiveConf.getIntVar(cfg, ConfVars.HIVE_JOIN_CACHE_SIZE); elems = new PTFRowContainer>(containerNumRows, cfg, null); elems.setSerDe(serDe, outputOI); elems.setTableDesc(PTFRowContainer.createTableDesc(inputOI)); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/PartitionKeySampler.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/PartitionKeySampler.java index 24bec956a69b..bad296e38505 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/PartitionKeySampler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/PartitionKeySampler.java @@ -135,9 +135,9 @@ public void writePartitionKeys(Path path, JobConf job) throws IOException { // random sampling public static FetchOperator createSampler(FetchWork work, JobConf job, Operator operator) throws HiveException { - int sampleNum = HiveConf.getIntVar(job, HiveConf.ConfVars.HIVESAMPLINGNUMBERFORORDERBY); + int sampleNum = HiveConf.getIntVar(job, HiveConf.ConfVars.HIVE_SAMPLING_NUMBER_FOR_ORDERBY); float samplePercent = - HiveConf.getFloatVar(job, HiveConf.ConfVars.HIVESAMPLINGPERCENTFORORDERBY); + HiveConf.getFloatVar(job, HiveConf.ConfVars.HIVE_SAMPLING_PERCENT_FOR_ORDERBY); if (samplePercent < 0.0 || samplePercent > 1.0) { throw new IllegalArgumentException("Percentile value must be within the range of 0 to 1."); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/SMBMapJoinOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/SMBMapJoinOperator.java index b3f167c3e98d..d119e688b710 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/SMBMapJoinOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/SMBMapJoinOperator.java @@ -132,13 +132,13 @@ protected void initializeOp(Configuration hconf) throws HiveException { int bucketSize; // For backwards compatibility reasons we honor the older - // HIVEMAPJOINBUCKETCACHESIZE if set different from default. + // HIVE_MAPJOIN_BUCKET_CACHE_SIZE if set different from default. // By hive 0.13 we should remove this code. - int oldVar = HiveConf.getIntVar(hconf, HiveConf.ConfVars.HIVEMAPJOINBUCKETCACHESIZE); + int oldVar = HiveConf.getIntVar(hconf, HiveConf.ConfVars.HIVE_MAPJOIN_BUCKET_CACHE_SIZE); if (oldVar != 100) { bucketSize = oldVar; } else { - bucketSize = HiveConf.getIntVar(hconf, HiveConf.ConfVars.HIVESMBJOINCACHEROWS); + bucketSize = HiveConf.getIntVar(hconf, HiveConf.ConfVars.HIVE_SMBJOIN_CACHE_ROWS); } for (byte pos = 0; pos < order.length; pos++) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/ScriptOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/ScriptOperator.java index af22e5e95e88..2dd3e0f59522 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/ScriptOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/ScriptOperator.java @@ -149,8 +149,8 @@ boolean blackListed(Configuration conf, String name) { if (blackListedConfEntries == null) { blackListedConfEntries = new HashSet(); if (conf != null) { - String bl = conf.get(HiveConf.ConfVars.HIVESCRIPT_ENV_BLACKLIST.toString(), - HiveConf.ConfVars.HIVESCRIPT_ENV_BLACKLIST.getDefaultValue()); + String bl = conf.get(HiveConf.ConfVars.HIVE_SCRIPT_ENV_BLACKLIST.toString(), + HiveConf.ConfVars.HIVE_SCRIPT_ENV_BLACKLIST.getDefaultValue()); if (bl != null && !bl.isEmpty()) { String[] bls = bl.split(","); Collections.addAll(blackListedConfEntries, bls); @@ -175,7 +175,7 @@ void addJobConfToEnvironment(Configuration conf, Map env) { String value = conf.get(name); // does variable expansion name = safeEnvVarName(name); boolean truncate = conf - .getBoolean(HiveConf.ConfVars.HIVESCRIPTTRUNCATEENV.toString(), false); + .getBoolean(HiveConf.ConfVars.HIVE_SCRIPT_TRUNCATE_ENV.toString(), false); value = safeEnvVarValue(value, name, truncate); env.put(name, value); } @@ -290,12 +290,12 @@ boolean isBrokenPipeException(IOException e) { } boolean allowPartialConsumption() { - return HiveConf.getBoolVar(hconf, HiveConf.ConfVars.ALLOWPARTIALCONSUMP); + return HiveConf.getBoolVar(hconf, HiveConf.ConfVars.ALLOW_PARTIAL_CONSUMP); } void displayBrokenPipeInfo() { LOG.info("The script did not consume all input data. This is considered as an error."); - LOG.info("set " + HiveConf.ConfVars.ALLOWPARTIALCONSUMP.toString() + "=true; to ignore it."); + LOG.info("set " + HiveConf.ConfVars.ALLOW_PARTIAL_CONSUMP.toString() + "=true; to ignore it."); return; } @@ -339,13 +339,13 @@ public void process(Object row, int tag) throws HiveException { ProcessBuilder pb = new ProcessBuilder(wrappedCmdArgs); Map env = pb.environment(); addJobConfToEnvironment(hconf, env); - env.put(safeEnvVarName(HiveConf.ConfVars.HIVEALIAS.varname), String + env.put(safeEnvVarName(HiveConf.ConfVars.HIVE_ALIAS.varname), String .valueOf(alias)); // Create an environment variable that uniquely identifies this script // operator String idEnvVarName = HiveConf.getVar(hconf, - HiveConf.ConfVars.HIVESCRIPTIDENVVAR); + HiveConf.ConfVars.HIVE_SCRIPT_ID_ENV_VAR); String idEnvVarVal = getOperatorId(); env.put(safeEnvVarName(idEnvVarName), idEnvVarVal); @@ -376,11 +376,11 @@ public void process(Object row, int tag) throws HiveException { .getProperties()); errThread = new StreamThread(scriptErrReader, new ErrorStreamProcessor( - HiveConf.getIntVar(hconf, HiveConf.ConfVars.SCRIPTERRORLIMIT)), + HiveConf.getIntVar(hconf, HiveConf.ConfVars.SCRIPT_ERROR_LIMIT)), "ErrorProcessor"); if (HiveConf - .getBoolVar(hconf, HiveConf.ConfVars.HIVESCRIPTAUTOPROGRESS)) { + .getBoolVar(hconf, HiveConf.ConfVars.HIVE_SCRIPT_AUTO_PROGRESS)) { autoProgressor = new AutoProgressor(this.getClass().getName(), reporter, Utilities.getDefaultNotificationInterval(hconf), HiveConf.getTimeVar( @@ -574,7 +574,7 @@ class CounterStatusProcessor { private final Reporter reporter; CounterStatusProcessor(Configuration hconf, Reporter reporter){ - this.reporterPrefix = HiveConf.getVar(hconf, HiveConf.ConfVars.STREAMREPORTERPERFIX); + this.reporterPrefix = HiveConf.getVar(hconf, HiveConf.ConfVars.STREAM_REPORTER_PREFIX); this.counterPrefix = reporterPrefix + "counter:"; this.statusPrefix = reporterPrefix + "status:"; this.reporter = reporter; @@ -625,7 +625,7 @@ class ErrorStreamProcessor implements StreamProcessor { public ErrorStreamProcessor(int maxBytes) { this.maxBytes = maxBytes; lastReportTime = 0; - if (HiveConf.getBoolVar(hconf, HiveConf.ConfVars.STREAMREPORTERENABLED)){ + if (HiveConf.getBoolVar(hconf, HiveConf.ConfVars.STREAM_REPORTER_ENABLED)){ counterStatus = new CounterStatusProcessor(hconf, reporter); } } @@ -732,7 +732,7 @@ public void run() { * Wrap the script in a wrapper that allows admins to control. */ protected String[] addWrapper(String[] inArgs) { - String wrapper = HiveConf.getVar(hconf, HiveConf.ConfVars.SCRIPTWRAPPER); + String wrapper = HiveConf.getVar(hconf, HiveConf.ConfVars.SCRIPT_WRAPPER); if (wrapper == null) { return inArgs; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/SelectOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/SelectOperator.java index ffaa252a25dc..10adb81fe050 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/SelectOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/SelectOperator.java @@ -66,7 +66,7 @@ protected void initializeOp(Configuration hconf) throws HiveException { assert (colList.get(i) != null); eval[i] = ExprNodeEvaluatorFactory.get(colList.get(i), hconf); } - if (HiveConf.getBoolVar(hconf, HiveConf.ConfVars.HIVEEXPREVALUATIONCACHE)) { + if (HiveConf.getBoolVar(hconf, HiveConf.ConfVars.HIVE_EXPR_EVALUATION_CACHE)) { eval = ExprNodeEvaluatorFactory.toCachedEvals(eval); } output = new Object[eval.length]; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/TableScanOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/TableScanOperator.java index 843686bb30ad..5b1a7a7c2a1b 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/TableScanOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/TableScanOperator.java @@ -320,7 +320,7 @@ protected void initializeOp(Configuration hconf) throws HiveException { jc = new JobConf(hconf); } - defaultPartitionName = HiveConf.getVar(hconf, HiveConf.ConfVars.DEFAULTPARTITIONNAME); + defaultPartitionName = HiveConf.getVar(hconf, HiveConf.ConfVars.DEFAULT_PARTITION_NAME); currentStat = null; stats = new HashMap(); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/TextRecordReader.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/TextRecordReader.java index b227a70a147c..df43e9608900 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/TextRecordReader.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/TextRecordReader.java @@ -60,7 +60,7 @@ public int next(Writable row) throws IOException { int bytesConsumed = lineReader.readLine((Text) row); - if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVESCRIPTESCAPE)) { + if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_SCRIPT_ESCAPE)) { return HiveUtils.unescapeText((Text) row); } return bytesConsumed; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/TextRecordWriter.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/TextRecordWriter.java index acbf46678f6a..99c03fa2bd58 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/TextRecordWriter.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/TextRecordWriter.java @@ -46,7 +46,7 @@ public void write(Writable row) throws IOException { Text text = (Text) row; Text escapeText = text; - if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVESCRIPTESCAPE)) { + if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_SCRIPT_ESCAPE)) { escapeText = HiveUtils.escapeText(text); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/UDTFOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/UDTFOperator.java index c58ed4e564d8..506266c829a7 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/UDTFOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/UDTFOperator.java @@ -94,7 +94,7 @@ protected void initializeOp(Configuration hconf) throws HiveException { // Set up periodic progress reporting in case the UDTF doesn't output rows // for a while - if (HiveConf.getBoolVar(hconf, HiveConf.ConfVars.HIVEUDTFAUTOPROGRESS)) { + if (HiveConf.getBoolVar(hconf, HiveConf.ConfVars.HIVE_UDTF_AUTO_PROGRESS)) { autoProgressor = new AutoProgressor(this.getClass().getName(), reporter, Utilities.getDefaultNotificationInterval(hconf), HiveConf.getTimeVar( diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java index 470d052d8982..6d38f1d5afca 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java @@ -2939,12 +2939,12 @@ public static Map getFullDPSpecs(Configuration conf, Dyn private static void validateDynPartitionCount(Configuration conf, Collection partitions) throws HiveException { int partsToLoad = partitions.size(); - int maxPartition = HiveConf.getIntVar(conf, HiveConf.ConfVars.DYNAMICPARTITIONMAXPARTS); + int maxPartition = HiveConf.getIntVar(conf, HiveConf.ConfVars.DYNAMIC_PARTITION_MAX_PARTS); if (partsToLoad > maxPartition) { throw new HiveException("Number of dynamic partitions created is " + partsToLoad + ", which is more than " + maxPartition - +". To solve this try to set " + HiveConf.ConfVars.DYNAMICPARTITIONMAXPARTS.varname + +". To solve this try to set " + HiveConf.ConfVars.DYNAMIC_PARTITION_MAX_PARTS.varname + " to at least " + partsToLoad + '.'); } } @@ -3356,8 +3356,8 @@ public static String formatMsecToStr(long msec) { */ public static int estimateNumberOfReducers(HiveConf conf, ContentSummary inputSummary, MapWork work, boolean finalMapRed) throws IOException { - long bytesPerReducer = conf.getLongVar(HiveConf.ConfVars.BYTESPERREDUCER); - int maxReducers = conf.getIntVar(HiveConf.ConfVars.MAXREDUCERS); + long bytesPerReducer = conf.getLongVar(HiveConf.ConfVars.BYTES_PER_REDUCER); + int maxReducers = conf.getIntVar(HiveConf.ConfVars.MAX_REDUCERS); double samplePercentage = getHighestSamplePercentage(work); long totalInputFileSize = getTotalInputFileSize(inputSummary, work, samplePercentage); @@ -3809,7 +3809,7 @@ public static void setInputPaths(JobConf job, List pathsToAdd) { */ public static void setInputAttributes(Configuration conf, MapWork mWork) { HiveConf.ConfVars var = HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_EXECUTION_ENGINE).equals("tez") ? - HiveConf.ConfVars.HIVETEZINPUTFORMAT : HiveConf.ConfVars.HIVEINPUTFORMAT; + HiveConf.ConfVars.HIVE_TEZ_INPUT_FORMAT : HiveConf.ConfVars.HIVE_INPUT_FORMAT; if (mWork.getInputformat() != null) { HiveConf.setVar(conf, var, mWork.getInputformat()); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/errors/MapAggrMemErrorHeuristic.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/errors/MapAggrMemErrorHeuristic.java index aa6452e49103..da5bd837e0da 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/errors/MapAggrMemErrorHeuristic.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/errors/MapAggrMemErrorHeuristic.java @@ -46,7 +46,7 @@ public MapAggrMemErrorHeuristic() { @Override public void init(String query, JobConf conf) { super.init(query, conf); - configMatches = HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVEMAPSIDEAGGREGATE); + configMatches = HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_MAPSIDE_AGGREGATE); } @Override @@ -56,9 +56,9 @@ public ErrorAndSolution getErrorAndSolution() { List matchingLines = getRegexToLogLines().get(OUT_OF_MEMORY_REGEX); if (matchingLines.size() > 0) { - String confName = HiveConf.ConfVars.HIVEMAPAGGRHASHMEMORY.toString(); + String confName = HiveConf.ConfVars.HIVE_MAP_AGGR_HASH_MEMORY.toString(); float confValue = HiveConf.getFloatVar(getConf(), - HiveConf.ConfVars.HIVEMAPAGGRHASHMEMORY); + HiveConf.ConfVars.HIVE_MAP_AGGR_HASH_MEMORY); es = new ErrorAndSolution( "Out of memory due to hash maps used in map-side aggregation.", diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java index ce12cea66eda..e19b73dabcdf 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java @@ -208,7 +208,7 @@ public ExecDriver(MapredWork plan, JobConf job, boolean isSilent) throws HiveExc @Override public boolean checkFatalErrors(Counters ctrs, StringBuilder errMsg) { Counters.Counter cntr = ctrs.findCounter( - HiveConf.getVar(job, HiveConf.ConfVars.HIVECOUNTERGROUP), + HiveConf.getVar(job, HiveConf.ConfVars.HIVE_COUNTER_GROUP), Operator.HIVE_COUNTER_FATAL); return cntr != null && cntr.getValue() > 0; } @@ -264,7 +264,7 @@ public int execute() { job.setMapOutputValueClass(BytesWritable.class); try { - String partitioner = HiveConf.getVar(job, ConfVars.HIVEPARTITIONER); + String partitioner = HiveConf.getVar(job, ConfVars.HIVE_PARTITIONER); job.setPartitionerClass(JavaUtils.loadClass(partitioner)); } catch (ClassNotFoundException e) { throw new RuntimeException(e.getMessage(), e); @@ -282,7 +282,7 @@ public int execute() { job.setBoolean(MRJobConfig.REDUCE_SPECULATIVE, false); job.setBoolean(MRJobConfig.MAP_SPECULATIVE, false); - String inpFormat = HiveConf.getVar(job, HiveConf.ConfVars.HIVEINPUTFORMAT); + String inpFormat = HiveConf.getVar(job, HiveConf.ConfVars.HIVE_INPUT_FORMAT); if (mWork.isUseBucketizedHiveInputFormat()) { inpFormat = BucketizedHiveInputFormat.class.getName(); @@ -495,19 +495,19 @@ public static void propagateSplitSettings(JobConf job, MapWork work) { } if (work.getMaxSplitSize() != null) { - HiveConf.setLongVar(job, HiveConf.ConfVars.MAPREDMAXSPLITSIZE, work.getMaxSplitSize()); + HiveConf.setLongVar(job, HiveConf.ConfVars.MAPRED_MAX_SPLIT_SIZE, work.getMaxSplitSize()); } if (work.getMinSplitSize() != null) { - HiveConf.setLongVar(job, HiveConf.ConfVars.MAPREDMINSPLITSIZE, work.getMinSplitSize()); + HiveConf.setLongVar(job, HiveConf.ConfVars.MAPRED_MIN_SPLIT_SIZE, work.getMinSplitSize()); } if (work.getMinSplitSizePerNode() != null) { - HiveConf.setLongVar(job, HiveConf.ConfVars.MAPREDMINSPLITSIZEPERNODE, work.getMinSplitSizePerNode()); + HiveConf.setLongVar(job, HiveConf.ConfVars.MAPRED_MIN_SPLIT_SIZE_PER_NODE, work.getMinSplitSizePerNode()); } if (work.getMinSplitSizePerRack() != null) { - HiveConf.setLongVar(job, HiveConf.ConfVars.MAPREDMINSPLITSIZEPERRACK, work.getMinSplitSizePerRack()); + HiveConf.setLongVar(job, HiveConf.ConfVars.MAPRED_MIN_SPLIT_SIZE_PER_RACK, work.getMinSplitSizePerRack()); } } @@ -572,7 +572,7 @@ private void handleSampling(Context context, MapWork mWork, JobConf job) protected void setInputAttributes(Configuration conf) { MapWork mWork = work.getMapWork(); if (mWork.getInputformat() != null) { - HiveConf.setVar(conf, ConfVars.HIVEINPUTFORMAT, mWork.getInputformat()); + HiveConf.setVar(conf, ConfVars.HIVE_INPUT_FORMAT, mWork.getInputformat()); } // Intentionally overwrites anything the user may have put here conf.setBoolean("hive.input.format.sorted", mWork.isInputFormatSorted()); @@ -692,14 +692,14 @@ public static void main(String[] args) throws IOException, HiveException { } } - boolean isSilent = HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVESESSIONSILENT); + boolean isSilent = HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_SESSION_SILENT); - String queryId = HiveConf.getVar(conf, HiveConf.ConfVars.HIVEQUERYID, "").trim(); + String queryId = HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_QUERY_ID, "").trim(); if(queryId.isEmpty()) { queryId = "unknown-" + System.currentTimeMillis(); - HiveConf.setVar(conf, HiveConf.ConfVars.HIVEQUERYID, queryId); + HiveConf.setVar(conf, HiveConf.ConfVars.HIVE_QUERY_ID, queryId); } - System.setProperty(HiveConf.ConfVars.HIVEQUERYID.toString(), queryId); + System.setProperty(HiveConf.ConfVars.HIVE_QUERY_ID.toString(), queryId); LogUtils.registerLoggingContext(conf); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecMapper.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecMapper.java index a5beb633bcb9..f7a658ea9242 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecMapper.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecMapper.java @@ -236,7 +236,7 @@ public static class ReportStats implements Operator.OperatorFunc { public ReportStats(Reporter rp, Configuration conf) { this.rp = rp; - this.groupName = HiveConf.getVar(conf, HiveConf.ConfVars.HIVECOUNTERGROUP); + this.groupName = HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_COUNTER_GROUP); } @Override diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/HadoopJobExecHelper.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/HadoopJobExecHelper.java index c365d41a8204..62d6e40d02c1 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/HadoopJobExecHelper.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/HadoopJobExecHelper.java @@ -19,7 +19,6 @@ package org.apache.hadoop.hive.ql.exec.mr; import java.io.IOException; -import java.io.Serializable; import java.text.SimpleDateFormat; import java.util.ArrayList; import java.util.Calendar; @@ -143,7 +142,7 @@ public void setJobId(JobID jobId) { public HadoopJobExecHelper(JobConf job, LogHelper console, Task task, HadoopJobExecHook hookCallBack) { - this.queryId = HiveConf.getVar(job, HiveConf.ConfVars.HIVEQUERYID, "unknown-" + System.currentTimeMillis()); + this.queryId = HiveConf.getVar(job, HiveConf.ConfVars.HIVE_QUERY_ID, "unknown-" + System.currentTimeMillis()); this.job = job; this.console = console; this.task = task; @@ -205,10 +204,10 @@ public boolean checkFatalErrors(Counters ctrs, StringBuilder errMsg) { return false; } // check for number of created files - Counters.Counter cntr = ctrs.findCounter(HiveConf.getVar(job, ConfVars.HIVECOUNTERGROUP), + Counters.Counter cntr = ctrs.findCounter(HiveConf.getVar(job, ConfVars.HIVE_COUNTER_GROUP), Operator.HIVE_COUNTER_CREATED_FILES); long numFiles = cntr != null ? cntr.getValue() : 0; - long upperLimit = HiveConf.getLongVar(job, HiveConf.ConfVars.MAXCREATEDFILES); + long upperLimit = HiveConf.getLongVar(job, HiveConf.ConfVars.MAX_CREATED_FILES); if (numFiles > upperLimit) { errMsg.append("total number of created files now is " + numFiles + ", which exceeds ").append(upperLimit); return true; @@ -226,7 +225,7 @@ private MapRedStats progress(ExecDriverTaskHandle th) throws IOException, LockEx job, HiveConf.ConfVars.HIVE_LOG_INCREMENTAL_PLAN_PROGRESS_INTERVAL, TimeUnit.MILLISECONDS); boolean fatal = false; StringBuilder errMsg = new StringBuilder(); - long pullInterval = HiveConf.getLongVar(job, HiveConf.ConfVars.HIVECOUNTERSPULLINTERVAL); + long pullInterval = HiveConf.getLongVar(job, HiveConf.ConfVars.HIVE_COUNTERS_PULL_INTERVAL); boolean initializing = true; boolean initOutputPrinted = false; long cpuMsec = -1; @@ -437,7 +436,7 @@ private MapRedStats progress(ExecDriverTaskHandle th) throws IOException, LockEx //Set the number of table rows affected in mapRedStats to display number of rows inserted. if (ctrs != null) { Counter counter = ctrs.findCounter( - ss.getConf().getVar(HiveConf.ConfVars.HIVECOUNTERGROUP), + ss.getConf().getVar(HiveConf.ConfVars.HIVE_COUNTER_GROUP), FileSinkOperator.TOTAL_TABLE_ROWS_WRITTEN); if (counter != null) { mapRedStats.setNumModifiedRows(counter.getValue()); @@ -474,7 +473,7 @@ public void jobInfo(RunningJob rj) { } console.printInfo(getJobStartMsg(rj.getID()) + ", Tracking URL = " + rj.getTrackingURL()); - console.printInfo("Kill Command = " + HiveConf.getVar(job, ConfVars.MAPREDBIN) + console.printInfo("Kill Command = " + HiveConf.getVar(job, ConfVars.MAPRED_BIN) + " job -kill " + rj.getID()); } } @@ -557,14 +556,14 @@ public int progress(RunningJob rj, JobClient jc, Context ctx) throws IOException // remove the pwd from conf file so that job tracker doesn't show this // logs - String pwd = HiveConf.getVar(job, HiveConf.ConfVars.METASTOREPWD); + String pwd = HiveConf.getVar(job, HiveConf.ConfVars.METASTORE_PWD); if (pwd != null) { - HiveConf.setVar(job, HiveConf.ConfVars.METASTOREPWD, "HIVE"); + HiveConf.setVar(job, HiveConf.ConfVars.METASTORE_PWD, "HIVE"); } // replace it back if (pwd != null) { - HiveConf.setVar(job, HiveConf.ConfVars.METASTOREPWD, pwd); + HiveConf.setVar(job, HiveConf.ConfVars.METASTORE_PWD, pwd); } // add to list of running jobs to kill in case of abnormal shutdown @@ -654,7 +653,7 @@ private Map extractAllCounterValues(Counters counters) { private List getClientStatPublishers() { List clientStatsPublishers = new ArrayList(); - String confString = HiveConf.getVar(job, HiveConf.ConfVars.CLIENTSTATSPUBLISHERS); + String confString = HiveConf.getVar(job, HiveConf.ConfVars.CLIENT_STATS_PUBLISHERS); confString = confString.trim(); if (confString.equals("")) { return clientStatsPublishers; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/MapRedTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/MapRedTask.java index 386358493650..6c8b9ff00d7c 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/MapRedTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/MapRedTask.java @@ -107,7 +107,7 @@ public int execute() { // auto-determine local mode if allowed if (!ctx.isLocalOnlyExecutionMode() && - conf.getBoolVar(HiveConf.ConfVars.LOCALMODEAUTO)) { + conf.getBoolVar(HiveConf.ConfVars.LOCAL_MODE_AUTO)) { if (inputSummary == null) { inputSummary = Utilities.getInputSummary(ctx, work.getMapWork(), null); @@ -142,7 +142,7 @@ public int execute() { } } - runningViaChild = conf.getBoolVar(HiveConf.ConfVars.SUBMITVIACHILD); + runningViaChild = conf.getBoolVar(HiveConf.ConfVars.SUBMIT_VIA_CHILD); if (!runningViaChild) { // since we are running the mapred task in the same jvm, we should update the job conf @@ -172,7 +172,7 @@ public int execute() { super.setInputAttributes(conf); // enable assertion - String hadoopExec = conf.getVar(HiveConf.ConfVars.HADOOPBIN); + String hadoopExec = conf.getVar(HiveConf.ConfVars.HADOOP_BIN); String hiveJar = conf.getJar(); String libJars = super.getResource(conf, ResourceType.JAR); @@ -247,7 +247,7 @@ public int execute() { // if we are running in local mode - then the amount of memory used // by the child jvm can no longer default to the memory used by the // parent jvm - int hadoopMem = conf.getIntVar(HiveConf.ConfVars.HIVEHADOOPMAXMEM); + int hadoopMem = conf.getIntVar(HiveConf.ConfVars.HIVE_HADOOP_MAX_MEM); if (hadoopMem == 0) { // remove env var that would default child jvm to use parent's memory // as default. child jvm would use default memory for a hadoop client @@ -453,13 +453,13 @@ private void setNumberOfReducers() throws IOException { } console .printInfo("In order to change the average load for a reducer (in bytes):"); - console.printInfo(" set " + HiveConf.ConfVars.BYTESPERREDUCER.varname + console.printInfo(" set " + HiveConf.ConfVars.BYTES_PER_REDUCER.varname + "="); console.printInfo("In order to limit the maximum number of reducers:"); - console.printInfo(" set " + HiveConf.ConfVars.MAXREDUCERS.varname + console.printInfo(" set " + HiveConf.ConfVars.MAX_REDUCERS.varname + "="); console.printInfo("In order to set a constant number of reducers:"); - console.printInfo(" set " + HiveConf.ConfVars.HADOOPNUMREDUCERS + console.printInfo(" set " + HiveConf.ConfVars.HADOOP_NUM_REDUCERS + "="); } } @@ -478,13 +478,13 @@ public static String isEligibleForLocalMode(HiveConf conf, long inputLength, long inputFileCount) { - long maxBytes = conf.getLongVar(HiveConf.ConfVars.LOCALMODEMAXBYTES); - long maxInputFiles = conf.getIntVar(HiveConf.ConfVars.LOCALMODEMAXINPUTFILES); + long maxBytes = conf.getLongVar(HiveConf.ConfVars.LOCAL_MODE_MAX_BYTES); + long maxInputFiles = conf.getIntVar(HiveConf.ConfVars.LOCAL_MODE_MAX_INPUT_FILES); // check for max input size if (inputLength > maxBytes) { return "Input Size (= " + inputLength + ") is larger than " + - HiveConf.ConfVars.LOCALMODEMAXBYTES.varname + " (= " + maxBytes + ")"; + HiveConf.ConfVars.LOCAL_MODE_MAX_BYTES.varname + " (= " + maxBytes + ")"; } // ideally we would like to do this check based on the number of splits @@ -494,7 +494,7 @@ public static String isEligibleForLocalMode(HiveConf conf, if (inputFileCount > maxInputFiles) { return "Number of Input Files (= " + inputFileCount + ") is larger than " + - HiveConf.ConfVars.LOCALMODEMAXINPUTFILES.varname + "(= " + maxInputFiles + ")"; + HiveConf.ConfVars.LOCAL_MODE_MAX_INPUT_FILES.varname + "(= " + maxInputFiles + ")"; } // since local mode only runs with 1 reducers - make sure that the diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/MapredLocalTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/MapredLocalTask.java index 62b74dc8425d..0a781a825809 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/MapredLocalTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/MapredLocalTask.java @@ -152,7 +152,7 @@ public boolean requireLock() { @Override public int execute() { - if (conf.getBoolVar(HiveConf.ConfVars.SUBMITLOCALTASKVIACHILD)) { + if (conf.getBoolVar(HiveConf.ConfVars.SUBMIT_LOCAL_TASK_VIA_CHILD)) { // send task off to another jvm return executeInChildVM(); } else { @@ -167,8 +167,8 @@ private int executeInChildVM() { // generate the cmd line to run in the child jvm String hiveJar = conf.getJar(); - String hadoopExec = conf.getVar(HiveConf.ConfVars.HADOOPBIN); - conf.setVar(ConfVars.HIVEADDEDJARS, Utilities.getResourceFiles(conf, SessionState.ResourceType.JAR)); + String hadoopExec = conf.getVar(HiveConf.ConfVars.HADOOP_BIN); + conf.setVar(ConfVars.HIVE_ADDED_JARS, Utilities.getResourceFiles(conf, SessionState.ResourceType.JAR)); // write out the plan to a local file Path planPath = new Path(context.getLocalTmpPath(), "plan.xml"); MapredLocalWork plan = getWork(); @@ -235,8 +235,8 @@ private int executeInChildVM() { // if we are running in local mode - then the amount of memory used // by the child jvm can no longer default to the memory used by the // parent jvm - // int hadoopMem = conf.getIntVar(HiveConf.ConfVars.HIVEHADOOPMAXMEM); - int hadoopMem = conf.getIntVar(HiveConf.ConfVars.HIVEHADOOPMAXMEM); + // int hadoopMem = conf.getIntVar(HiveConf.ConfVars.HIVE_HADOOP_MAX_MEM); + int hadoopMem = conf.getIntVar(HiveConf.ConfVars.HIVE_HADOOP_MAX_MEM); if (hadoopMem == 0) { // remove env var that would default child jvm to use parent's memory // as default. child jvm would use default memory for a hadoop client diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/BytesBytesMultiHashMap.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/BytesBytesMultiHashMap.java index 92a3df4fc8a7..9e116e0e243f 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/BytesBytesMultiHashMap.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/BytesBytesMultiHashMap.java @@ -624,8 +624,8 @@ private static void validateCapacity(long capacity) { if (capacity > Integer.MAX_VALUE) { throw new RuntimeException("Attempting to expand the hash table to " + capacity + " that overflows maximum array size. For this query, you may want to disable " - + ConfVars.HIVEDYNAMICPARTITIONHASHJOIN.varname + " or reduce " - + ConfVars.HIVECONVERTJOINNOCONDITIONALTASKTHRESHOLD.varname); + + ConfVars.HIVE_DYNAMIC_PARTITION_HASHJOIN.varname + " or reduce " + + ConfVars.HIVE_CONVERT_JOIN_NOCONDITIONAL_TASK_THRESHOLD.varname); } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/HashMapWrapper.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/HashMapWrapper.java index ae84d2d6f06d..452ef5991e86 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/HashMapWrapper.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/HashMapWrapper.java @@ -32,7 +32,6 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.exec.ExprNodeEvaluator; import org.apache.hadoop.hive.ql.exec.JoinUtil; -import org.apache.hadoop.hive.ql.exec.persistence.MapJoinTableContainer.NonMatchedSmallTableIterator; import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpressionWriter; import org.apache.hadoop.hive.ql.exec.vector.wrapper.VectorHashKeyWrapperBase; import org.apache.hadoop.hive.ql.exec.vector.wrapper.VectorHashKeyWrapperBatch; @@ -72,15 +71,15 @@ public HashMapWrapper(Map metaData) { } public HashMapWrapper() { - this(HiveConf.ConfVars.HIVEHASHTABLEKEYCOUNTADJUSTMENT.defaultFloatVal, - HiveConf.ConfVars.HIVEHASHTABLETHRESHOLD.defaultIntVal, - HiveConf.ConfVars.HIVEHASHTABLELOADFACTOR.defaultFloatVal, -1); + this(HiveConf.ConfVars.HIVE_HASHTABLE_KEY_COUNT_ADJUSTMENT.defaultFloatVal, + HiveConf.ConfVars.HIVE_HASHTABLE_THRESHOLD.defaultIntVal, + HiveConf.ConfVars.HIVE_HASHTABLE_LOAD_FACTOR.defaultFloatVal, -1); } public HashMapWrapper(Configuration hconf, long keyCount) { - this(HiveConf.getFloatVar(hconf, HiveConf.ConfVars.HIVEHASHTABLEKEYCOUNTADJUSTMENT), - HiveConf.getIntVar(hconf, HiveConf.ConfVars.HIVEHASHTABLETHRESHOLD), - HiveConf.getFloatVar(hconf, HiveConf.ConfVars.HIVEHASHTABLELOADFACTOR), keyCount); + this(HiveConf.getFloatVar(hconf, HiveConf.ConfVars.HIVE_HASHTABLE_KEY_COUNT_ADJUSTMENT), + HiveConf.getIntVar(hconf, HiveConf.ConfVars.HIVE_HASHTABLE_THRESHOLD), + HiveConf.getFloatVar(hconf, HiveConf.ConfVars.HIVE_HASHTABLE_LOAD_FACTOR), keyCount); } private HashMapWrapper(float keyCountAdj, int threshold, float loadFactor, long keyCount) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/HybridHashTableContainer.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/HybridHashTableContainer.java index e66977f758a8..d82c43df84c7 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/HybridHashTableContainer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/HybridHashTableContainer.java @@ -39,7 +39,6 @@ import org.apache.hadoop.hive.ql.exec.JoinUtil.JoinResult; import org.apache.hadoop.hive.ql.exec.SerializationUtilities; import org.apache.hadoop.hive.ql.exec.persistence.MapJoinBytesTableContainer.KeyValueHelper; -import org.apache.hadoop.hive.ql.exec.persistence.MapJoinTableContainer.NonMatchedSmallTableIterator; import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpressionWriter; import org.apache.hadoop.hive.ql.exec.vector.rowbytescontainer.VectorRowBytesContainer; import org.apache.hadoop.hive.ql.exec.vector.wrapper.VectorHashKeyWrapperBase; @@ -279,15 +278,15 @@ public int size() { public HybridHashTableContainer(Configuration hconf, long keyCount, long memoryAvailable, long estimatedTableSize, HybridHashTableConf nwayConf) throws SerDeException, IOException { - this(HiveConf.getFloatVar(hconf, HiveConf.ConfVars.HIVEHASHTABLEKEYCOUNTADJUSTMENT), - HiveConf.getIntVar(hconf, HiveConf.ConfVars.HIVEHASHTABLETHRESHOLD), - HiveConf.getFloatVar(hconf, HiveConf.ConfVars.HIVEHASHTABLELOADFACTOR), - HiveConf.getIntVar(hconf, HiveConf.ConfVars.HIVEHYBRIDGRACEHASHJOINMEMCHECKFREQ), - HiveConf.getIntVar(hconf, HiveConf.ConfVars.HIVEHYBRIDGRACEHASHJOINMINWBSIZE), - HiveConf.getIntVar(hconf, HiveConf.ConfVars.HIVEHASHTABLEWBSIZE), - HiveConf.getIntVar(hconf, HiveConf.ConfVars.HIVEHYBRIDGRACEHASHJOINMINNUMPARTITIONS), - HiveConf.getFloatVar(hconf, HiveConf.ConfVars.HIVEMAPJOINOPTIMIZEDTABLEPROBEPERCENT), - HiveConf.getBoolVar(hconf, HiveConf.ConfVars.HIVEHYBRIDGRACEHASHJOINBLOOMFILTER), + this(HiveConf.getFloatVar(hconf, HiveConf.ConfVars.HIVE_HASHTABLE_KEY_COUNT_ADJUSTMENT), + HiveConf.getIntVar(hconf, HiveConf.ConfVars.HIVE_HASHTABLE_THRESHOLD), + HiveConf.getFloatVar(hconf, HiveConf.ConfVars.HIVE_HASHTABLE_LOAD_FACTOR), + HiveConf.getIntVar(hconf, HiveConf.ConfVars.HIVE_HYBRIDGRACE_HASHJOIN_MEMCHECK_FREQ), + HiveConf.getIntVar(hconf, HiveConf.ConfVars.HIVE_HYBRIDGRACE_HASHJOIN_MIN_WB_SIZE), + HiveConf.getIntVar(hconf, HiveConf.ConfVars.HIVE_HASHTABLE_WB_SIZE), + HiveConf.getIntVar(hconf, HiveConf.ConfVars.HIVE_HYBRIDGRACE_HASHJOIN_MIN_NUM_PARTITIONS), + HiveConf.getFloatVar(hconf, HiveConf.ConfVars.HIVE_MAPJOIN_OPTIMIZED_TABLE_PROBE_PERCENT), + HiveConf.getBoolVar(hconf, HiveConf.ConfVars.HIVE_HYBRIDGRACE_HASHJOIN_BLOOMFILTER), estimatedTableSize, keyCount, memoryAvailable, nwayConf, HiveUtils.getLocalDirList(hconf)); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinBytesTableContainer.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinBytesTableContainer.java index 79695975ef26..ac3570900775 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinBytesTableContainer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinBytesTableContainer.java @@ -30,7 +30,6 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.exec.ExprNodeEvaluator; import org.apache.hadoop.hive.ql.exec.JoinUtil; -import org.apache.hadoop.hive.ql.exec.persistence.MapJoinTableContainer.NonMatchedSmallTableIterator; import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpressionWriter; import org.apache.hadoop.hive.ql.exec.vector.wrapper.VectorHashKeyWrapperBase; import org.apache.hadoop.hive.ql.exec.vector.wrapper.VectorHashKeyWrapperBatch; @@ -104,10 +103,10 @@ public class MapJoinBytesTableContainer public MapJoinBytesTableContainer(Configuration hconf, MapJoinObjectSerDeContext valCtx, long keyCount, long memUsage) throws SerDeException { - this(HiveConf.getFloatVar(hconf, HiveConf.ConfVars.HIVEHASHTABLEKEYCOUNTADJUSTMENT), - HiveConf.getIntVar(hconf, HiveConf.ConfVars.HIVEHASHTABLETHRESHOLD), - HiveConf.getFloatVar(hconf, HiveConf.ConfVars.HIVEHASHTABLELOADFACTOR), - HiveConf.getIntVar(hconf, HiveConf.ConfVars.HIVEHASHTABLEWBSIZE), + this(HiveConf.getFloatVar(hconf, HiveConf.ConfVars.HIVE_HASHTABLE_KEY_COUNT_ADJUSTMENT), + HiveConf.getIntVar(hconf, HiveConf.ConfVars.HIVE_HASHTABLE_THRESHOLD), + HiveConf.getFloatVar(hconf, HiveConf.ConfVars.HIVE_HASHTABLE_LOAD_FACTOR), + HiveConf.getIntVar(hconf, HiveConf.ConfVars.HIVE_HASHTABLE_WB_SIZE), valCtx, keyCount, memUsage); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplDumpTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplDumpTask.java index 8ee54fe8a1d8..4100bbccab5d 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplDumpTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplDumpTask.java @@ -252,7 +252,7 @@ public int execute() { DumpMetaData dmd = new DumpMetaData(hiveDumpRoot, conf); // Initialize ReplChangeManager instance since we will require it to encode file URI. ReplChangeManager.getInstance(conf); - Path cmRoot = new Path(conf.getVar(HiveConf.ConfVars.REPLCMDIR)); + Path cmRoot = new Path(conf.getVar(HiveConf.ConfVars.REPL_CM_DIR)); Long lastReplId; LOG.info("Data copy at load enabled : {}", conf.getBoolVar(HiveConf.ConfVars.REPL_RUN_DATA_COPY_TASKS_ON_TARGET)); if (isFailoverTarget) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplLoadTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplLoadTask.java index 6ce83ee3e700..690e9a298a0c 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplLoadTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplLoadTask.java @@ -92,7 +92,6 @@ import java.util.Collections; import java.util.HashMap; import java.util.HashSet; -import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.LinkedList; @@ -436,8 +435,8 @@ a database ( directory ) } private boolean isReadOnlyHookRegistered() { - return conf.get(HiveConf.ConfVars.PREEXECHOOKS.varname) != null && - conf.get(HiveConf.ConfVars.PREEXECHOOKS.varname).contains(READ_ONLY_HOOK); + return conf.get(HiveConf.ConfVars.PRE_EXEC_HOOKS.varname) != null && + conf.get(HiveConf.ConfVars.PRE_EXEC_HOOKS.varname).contains(READ_ONLY_HOOK); } /** diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/util/PathInfo.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/util/PathInfo.java index 7383d018ece7..cf4c5a57c7a8 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/util/PathInfo.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/util/PathInfo.java @@ -41,7 +41,7 @@ public class PathInfo { public PathInfo(HiveConf hiveConf) { this.hiveConf = hiveConf; - stagingDir = HiveConf.getVar(hiveConf, HiveConf.ConfVars.STAGINGDIR); + stagingDir = HiveConf.getVar(hiveConf, HiveConf.ConfVars.STAGING_DIR); } public Map getFsScratchDirs() { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/util/ReplUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/util/ReplUtils.java index 61516a8604da..2c42c9b6156f 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/util/ReplUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/util/ReplUtils.java @@ -70,7 +70,6 @@ import org.apache.hadoop.hive.ql.parse.repl.dump.metric.IncrementalDumpMetricCollector; import org.apache.hadoop.hive.ql.parse.repl.load.metric.BootstrapLoadMetricCollector; import org.apache.hadoop.hive.ql.parse.repl.load.metric.IncrementalLoadMetricCollector; -import org.apache.hadoop.hive.ql.parse.repl.load.metric.PreOptimizedBootstrapLoadMetricCollector; import org.apache.hadoop.hive.ql.parse.repl.metric.ReplicationMetricCollector; import org.apache.hadoop.hive.ql.parse.repl.metric.event.Metadata; import org.apache.hadoop.hive.ql.parse.repl.metric.event.Status; @@ -480,7 +479,7 @@ public static boolean failedWithNonRecoverableError(Path dumpRoot, HiveConf conf } public static Path getEncodedDumpRootPath(HiveConf conf, String dbname) throws UnsupportedEncodingException { - return new Path(conf.getVar(HiveConf.ConfVars.REPLDIR), + return new Path(conf.getVar(HiveConf.ConfVars.REPL_DIR), Base64.getEncoder().encodeToString(dbname .getBytes(StandardCharsets.UTF_8.name()))); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java index b9be333761e6..896be0018d79 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java @@ -372,28 +372,28 @@ private JobConf initializeVertexConf(JobConf baseConf, Context context, MapWork } if (mapWork.getMaxSplitSize() != null) { - HiveConf.setLongVar(conf, HiveConf.ConfVars.MAPREDMAXSPLITSIZE, + HiveConf.setLongVar(conf, HiveConf.ConfVars.MAPRED_MAX_SPLIT_SIZE, mapWork.getMaxSplitSize().longValue()); } if (mapWork.getMinSplitSize() != null) { - HiveConf.setLongVar(conf, HiveConf.ConfVars.MAPREDMINSPLITSIZE, + HiveConf.setLongVar(conf, HiveConf.ConfVars.MAPRED_MIN_SPLIT_SIZE, mapWork.getMinSplitSize().longValue()); } if (mapWork.getMinSplitSizePerNode() != null) { - HiveConf.setLongVar(conf, HiveConf.ConfVars.MAPREDMINSPLITSIZEPERNODE, + HiveConf.setLongVar(conf, HiveConf.ConfVars.MAPRED_MIN_SPLIT_SIZE_PER_NODE, mapWork.getMinSplitSizePerNode().longValue()); } if (mapWork.getMinSplitSizePerRack() != null) { - HiveConf.setLongVar(conf, HiveConf.ConfVars.MAPREDMINSPLITSIZEPERRACK, + HiveConf.setLongVar(conf, HiveConf.ConfVars.MAPRED_MIN_SPLIT_SIZE_PER_RACK, mapWork.getMinSplitSizePerRack().longValue()); } Utilities.setInputAttributes(conf, mapWork); - String inpFormat = HiveConf.getVar(conf, HiveConf.ConfVars.HIVETEZINPUTFORMAT); + String inpFormat = HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_TEZ_INPUT_FORMAT); if (mapWork.isUseBucketizedHiveInputFormat()) { inpFormat = BucketizedHiveInputFormat.class.getName(); @@ -665,10 +665,10 @@ private Map createPartitionerConf(String partitionerClassName, * container size isn't set. */ public static Resource getContainerResource(Configuration conf) { - int memorySizeMb = HiveConf.getIntVar(conf, HiveConf.ConfVars.HIVETEZCONTAINERSIZE); + int memorySizeMb = HiveConf.getIntVar(conf, HiveConf.ConfVars.HIVE_TEZ_CONTAINER_SIZE); if (memorySizeMb <= 0) { LOG.warn("No Tez container size specified by {}. Falling back to MapReduce container MB {}", - HiveConf.ConfVars.HIVETEZCONTAINERSIZE, MRJobConfig.MAP_MEMORY_MB); + HiveConf.ConfVars.HIVE_TEZ_CONTAINER_SIZE, MRJobConfig.MAP_MEMORY_MB); memorySizeMb = conf.getInt(MRJobConfig.MAP_MEMORY_MB, MRJobConfig.DEFAULT_MAP_MEMORY_MB); // When config is explicitly set to "-1" defaultValue does not work! if (memorySizeMb <= 0) { @@ -676,10 +676,10 @@ public static Resource getContainerResource(Configuration conf) { memorySizeMb = MRJobConfig.DEFAULT_MAP_MEMORY_MB; } } - int cpuCores = HiveConf.getIntVar(conf, HiveConf.ConfVars.HIVETEZCPUVCORES); + int cpuCores = HiveConf.getIntVar(conf, HiveConf.ConfVars.HIVE_TEZ_CPU_VCORES); if (cpuCores <= 0) { LOG.warn("No Tez VCore size specified by {}. Falling back to MapReduce container VCores {}", - HiveConf.ConfVars.HIVETEZCPUVCORES, MRJobConfig.MAP_CPU_VCORES); + HiveConf.ConfVars.HIVE_TEZ_CPU_VCORES, MRJobConfig.MAP_CPU_VCORES); cpuCores = conf.getInt(MRJobConfig.MAP_CPU_VCORES, MRJobConfig.DEFAULT_MAP_CPU_VCORES); if (cpuCores <= 0) { LOG.warn("Falling back to default container VCores {}", MRJobConfig.DEFAULT_MAP_CPU_VCORES); @@ -705,9 +705,9 @@ Map getContainerEnvironment(Configuration conf, boolean isMap) { * are set */ private static String getContainerJavaOpts(Configuration conf) { - String javaOpts = HiveConf.getVar(conf, HiveConf.ConfVars.HIVETEZJAVAOPTS); + String javaOpts = HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_TEZ_JAVA_OPTS); - String logLevel = HiveConf.getVar(conf, HiveConf.ConfVars.HIVETEZLOGLEVEL); + String logLevel = HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_TEZ_LOG_LEVEL); List logProps = Lists.newArrayList(); TezUtils.addLog4jSystemProperties(logLevel, logProps); StringBuilder sb = new StringBuilder(); @@ -716,7 +716,7 @@ private static String getContainerJavaOpts(Configuration conf) { } logLevel = sb.toString(); - if (HiveConf.getIntVar(conf, HiveConf.ConfVars.HIVETEZCONTAINERSIZE) > 0) { + if (HiveConf.getIntVar(conf, HiveConf.ConfVars.HIVE_TEZ_CONTAINER_SIZE) > 0) { if (javaOpts != null) { return javaOpts + " " + logLevel; } else { @@ -724,8 +724,8 @@ private static String getContainerJavaOpts(Configuration conf) { } } else { if (javaOpts != null && !javaOpts.isEmpty()) { - LOG.warn(HiveConf.ConfVars.HIVETEZJAVAOPTS + " will be ignored because " - + HiveConf.ConfVars.HIVETEZCONTAINERSIZE + " is not set!"); + LOG.warn(HiveConf.ConfVars.HIVE_TEZ_JAVA_OPTS + " will be ignored because " + + HiveConf.ConfVars.HIVE_TEZ_CONTAINER_SIZE + " is not set!"); } return logLevel + " " + MRHelpers.getJavaOptsForMRMapper(conf); } @@ -1100,7 +1100,7 @@ public List localizeTempFilesFromConf( String hdfsDirPathStr, Configuration conf) throws IOException, LoginException { List tmpResources = new ArrayList(); - if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVEADDFILESUSEHDFSLOCATION)) { + if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_ADD_FILES_USE_HDFS_LOCATION)) { // reference HDFS based resource directly, to use distribute cache efficiently. addHdfsResource(conf, tmpResources, LocalResourceType.FILE, getHdfsTempFilesFromConf(conf)); // local resources are session based. @@ -1146,7 +1146,7 @@ private static String[] getHdfsTempFilesFromConf(Configuration conf) { private static String[] getLocalTempFilesFromConf(Configuration conf) { String addedFiles = Utilities.getLocalResourceFiles(conf, SessionState.ResourceType.FILE); String addedJars = Utilities.getLocalResourceFiles(conf, SessionState.ResourceType.JAR); - String auxJars = HiveConf.getVar(conf, HiveConf.ConfVars.HIVEAUXJARS); + String auxJars = HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_AUX_JARS); String reloadableAuxJars = SessionState.get() == null ? null : SessionState.get().getReloadableAuxJars(); String allFiles = HiveStringUtils.joinIgnoringEmpty(new String[]{auxJars, reloadableAuxJars, addedJars, addedFiles}, ','); @@ -1159,13 +1159,13 @@ public static String[] getTempFilesFromConf(Configuration conf) { } String addedFiles = Utilities.getResourceFiles(conf, SessionState.ResourceType.FILE); if (StringUtils.isNotBlank(addedFiles)) { - HiveConf.setVar(conf, ConfVars.HIVEADDEDFILES, addedFiles); + HiveConf.setVar(conf, ConfVars.HIVE_ADDED_FILES, addedFiles); } String addedJars = Utilities.getResourceFiles(conf, SessionState.ResourceType.JAR); if (StringUtils.isNotBlank(addedJars)) { - HiveConf.setVar(conf, ConfVars.HIVEADDEDJARS, addedJars); + HiveConf.setVar(conf, ConfVars.HIVE_ADDED_JARS, addedJars); } - String auxJars = HiveConf.getVar(conf, HiveConf.ConfVars.HIVEAUXJARS); + String auxJars = HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_AUX_JARS); String reloadableAuxJars = SessionState.get() == null ? null : SessionState.get().getReloadableAuxJars(); // need to localize the additional jars and files @@ -1178,7 +1178,7 @@ public static String[] getTempFilesFromConf(Configuration conf) { private static String[] getTempArchivesFromConf(Configuration conf) { String addedArchives = Utilities.getResourceFiles(conf, SessionState.ResourceType.ARCHIVE); if (StringUtils.isNotBlank(addedArchives)) { - HiveConf.setVar(conf, ConfVars.HIVEADDEDARCHIVES, addedArchives); + HiveConf.setVar(conf, ConfVars.HIVE_ADDED_ARCHIVES, addedArchives); return addedArchives.split(","); } return new String[0]; @@ -1272,7 +1272,7 @@ public String getExecJarPathLocal(Configuration configuration) { } } catch (Exception ignored) {} //Fall back to hive config, if the uri could not get, or it does not point to a .jar file - String jar = configuration.get(ConfVars.HIVEJAR.varname); + String jar = configuration.get(ConfVars.HIVE_JAR.varname); if (!StringUtils.isBlank(jar)) { return jar; } @@ -1466,7 +1466,7 @@ public JobConf createConfiguration(HiveConf hiveConf, boolean skipAMConf) throws conf.set(MRJobConfig.OUTPUT_KEY_CLASS, HiveKey.class.getName()); conf.set(MRJobConfig.OUTPUT_VALUE_CLASS, BytesWritable.class.getName()); - conf.set("mapred.partitioner.class", HiveConf.getVar(conf, HiveConf.ConfVars.HIVEPARTITIONER)); + conf.set("mapred.partitioner.class", HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_PARTITIONER)); conf.set("tez.runtime.partitioner.class", MRPartitioner.class.getName()); // Removing job credential entry/ cannot be set on the tasks @@ -1699,7 +1699,7 @@ public String createDagName(Configuration conf, QueryPlan plan) { } public static String getUserSpecifiedDagName(Configuration conf) { - String name = HiveConf.getVar(conf, HiveConf.ConfVars.HIVEQUERYNAME); + String name = HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_QUERY_NAME); return (name != null) ? name : conf.get("mapred.job.name"); } @@ -1712,7 +1712,7 @@ public static String getUserSpecifiedDagName(Configuration conf) { * TODO This method is temporary. Ideally Hive should only need to pass to Tez the amount of memory * it requires to do the map join, and Tez should take care of figuring out how much to allocate * Adjust the percentage of memory to be reserved for the processor from Tez - * based on the actual requested memory by the Map Join, i.e. HIVECONVERTJOINNOCONDITIONALTASKTHRESHOLD + * based on the actual requested memory by the Map Join, i.e. HIVE_CONVERT_JOIN_NOCONDITIONAL_TASK_THRESHOLD * @return the adjusted percentage */ static double adjustMemoryReserveFraction(long memoryRequested, HiveConf conf) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HashTableLoader.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HashTableLoader.java index 023d4d371d6c..8df866a9ca78 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HashTableLoader.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HashTableLoader.java @@ -79,7 +79,7 @@ public void init(ExecMapperContext context, MapredContext mrContext, Configurati this.hconf = hconf; this.desc = joinOp.getConf(); this.cacheKey = joinOp.getCacheKey(); - String counterGroup = HiveConf.getVar(hconf, HiveConf.ConfVars.HIVECOUNTERGROUP); + String counterGroup = HiveConf.getVar(hconf, HiveConf.ConfVars.HIVE_COUNTER_GROUP); String vertexName = hconf.get(Operator.CONTEXT_NAME_KEY, ""); String counterName = Utilities.getVertexCounterName(HashTableLoaderCounters.HASHTABLE_LOAD_TIME_MS.name(), vertexName); this.htLoadCounter = tezContext.getTezProcessorContext().getCounters().findCounter(counterGroup, counterName); @@ -100,7 +100,7 @@ public void load(MapJoinTableContainer[] mapJoinTables, } boolean useOptimizedTables = HiveConf.getBoolVar( - hconf, HiveConf.ConfVars.HIVEMAPJOINUSEOPTIMIZEDTABLE); + hconf, HiveConf.ConfVars.HIVE_MAPJOIN_USE_OPTIMIZED_TABLE); boolean useHybridGraceHashJoin = desc.isHybridHashJoin(); boolean isFirstKey = true; @@ -109,13 +109,13 @@ public void load(MapJoinTableContainer[] mapJoinTables, LOG.info("Memory manager allocates " + totalMapJoinMemory + " bytes for the loading hashtable."); if (totalMapJoinMemory <= 0) { totalMapJoinMemory = HiveConf.getLongVar( - hconf, HiveConf.ConfVars.HIVECONVERTJOINNOCONDITIONALTASKTHRESHOLD); + hconf, HiveConf.ConfVars.HIVE_CONVERT_JOIN_NOCONDITIONAL_TASK_THRESHOLD); } long processMaxMemory = ManagementFactory.getMemoryMXBean().getHeapMemoryUsage().getMax(); if (totalMapJoinMemory > processMaxMemory) { float hashtableMemoryUsage = HiveConf.getFloatVar( - hconf, HiveConf.ConfVars.HIVEHASHTABLEFOLLOWBYGBYMAXMEMORYUSAGE); + hconf, HiveConf.ConfVars.HIVE_HASHTABLE_FOLLOWBY_GBY_MAX_MEMORY_USAGE); LOG.warn("totalMapJoinMemory value of " + totalMapJoinMemory + " is greater than the max memory size of " + processMaxMemory); // Don't want to attempt to grab more memory than we have available .. percentage is a bit arbitrary @@ -153,8 +153,8 @@ public void load(MapJoinTableContainer[] mapJoinTables, int numPartitions = 0; try { numPartitions = HybridHashTableContainer.calcNumPartitions(memory, maxSize, - HiveConf.getIntVar(hconf, HiveConf.ConfVars.HIVEHYBRIDGRACEHASHJOINMINNUMPARTITIONS), - HiveConf.getIntVar(hconf, HiveConf.ConfVars.HIVEHYBRIDGRACEHASHJOINMINWBSIZE)); + HiveConf.getIntVar(hconf, HiveConf.ConfVars.HIVE_HYBRIDGRACE_HASHJOIN_MIN_NUM_PARTITIONS), + HiveConf.getIntVar(hconf, HiveConf.ConfVars.HIVE_HYBRIDGRACE_HASHJOIN_MIN_WB_SIZE)); } catch (IOException e) { throw new HiveException(e); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HiveSplitGenerator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HiveSplitGenerator.java index 2a68b20c27b8..46df46bbdf66 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HiveSplitGenerator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HiveSplitGenerator.java @@ -23,14 +23,11 @@ import java.util.Arrays; import java.util.BitSet; import java.util.Comparator; -import java.util.HashMap; import java.util.HashSet; -import java.util.LinkedHashMap; import java.util.List; import java.util.Optional; import java.util.Set; -import com.google.common.base.Joiner; import com.google.common.base.Preconditions; import org.apache.hadoop.fs.BlockLocation; @@ -191,7 +188,7 @@ public List initialize() throws Exception { int availableSlots = getAvailableSlotsCalculator().getAvailableSlots(); - if (HiveConf.getLongVar(conf, HiveConf.ConfVars.MAPREDMINSPLITSIZE, 1) <= 1) { + if (HiveConf.getLongVar(conf, HiveConf.ConfVars.MAPRED_MIN_SPLIT_SIZE, 1) <= 1) { // broken configuration from mapred-default.xml final long blockSize = conf.getLongBytes(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT); @@ -199,7 +196,7 @@ public List initialize() throws Exception { TezMapReduceSplitsGrouper.TEZ_GROUPING_SPLIT_MIN_SIZE, TezMapReduceSplitsGrouper.TEZ_GROUPING_SPLIT_MIN_SIZE_DEFAULT); final long preferredSplitSize = Math.min(blockSize / 2, minGrouping); - HiveConf.setLongVar(jobConf, HiveConf.ConfVars.MAPREDMINSPLITSIZE, preferredSplitSize); + HiveConf.setLongVar(jobConf, HiveConf.ConfVars.MAPRED_MIN_SPLIT_SIZE, preferredSplitSize); LOG.info("The preferred split size is " + preferredSplitSize); } @@ -216,7 +213,7 @@ public List initialize() throws Exception { InputSplit[] splits; if (generateSingleSplit && - conf.get(HiveConf.ConfVars.HIVETEZINPUTFORMAT.varname).equals(HiveInputFormat.class.getName())) { + conf.get(HiveConf.ConfVars.HIVE_TEZ_INPUT_FORMAT.varname).equals(HiveInputFormat.class.getName())) { MapWork mapWork = Utilities.getMapWork(jobConf); List paths = Utilities.getInputPathsTez(jobConf, mapWork); FileSystem fs = paths.get(0).getFileSystem(jobConf); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/MapRecordProcessor.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/MapRecordProcessor.java index a1593cc8e372..5a31f22b200a 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/MapRecordProcessor.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/MapRecordProcessor.java @@ -99,7 +99,7 @@ public class MapRecordProcessor extends RecordProcessor { public MapRecordProcessor(final JobConf jconf, final ProcessorContext context) throws Exception { super(jconf, context); - String queryId = HiveConf.getVar(jconf, HiveConf.ConfVars.HIVEQUERYID); + String queryId = HiveConf.getVar(jconf, HiveConf.ConfVars.HIVE_QUERY_ID); if (LlapProxy.isDaemon()) { setLlapOfFragmentId(context); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/MergeFileRecordProcessor.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/MergeFileRecordProcessor.java index 2e87b3e44344..6c2fb4be7217 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/MergeFileRecordProcessor.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/MergeFileRecordProcessor.java @@ -93,7 +93,7 @@ void init( .initialize(); } - String queryId = HiveConf.getVar(jconf, HiveConf.ConfVars.HIVEQUERYID); + String queryId = HiveConf.getVar(jconf, HiveConf.ConfVars.HIVE_QUERY_ID); cache = ObjectCacheFactory.getCache(jconf, queryId, true); try { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/ReduceRecordProcessor.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/ReduceRecordProcessor.java index 39c098b13738..c46082988582 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/ReduceRecordProcessor.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/ReduceRecordProcessor.java @@ -86,7 +86,7 @@ public class ReduceRecordProcessor extends RecordProcessor { public ReduceRecordProcessor(final JobConf jconf, final ProcessorContext context) throws Exception { super(jconf, context); - String queryId = HiveConf.getVar(jconf, HiveConf.ConfVars.HIVEQUERYID); + String queryId = HiveConf.getVar(jconf, HiveConf.ConfVars.HIVE_QUERY_ID); cache = ObjectCacheFactory.getCache(jconf, queryId, true); dynamicValueCache = ObjectCacheFactory.getCache(jconf, queryId, false, true); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionPool.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionPool.java index 0cf6bf0e313c..33d4210fb226 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionPool.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionPool.java @@ -298,7 +298,7 @@ private void configureAmRegistry(SessionType session) { bySessionId.put(session.getSessionId(), session); HiveConf conf = session.getConf(); conf.set(ConfVars.LLAP_TASK_SCHEDULER_AM_REGISTRY_NAME.varname, amRegistryName); - conf.set(ConfVars.HIVESESSIONID.varname, session.getSessionId()); + conf.set(ConfVars.HIVE_SESSION_ID.varname, session.getSessionId()); } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionPoolManager.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionPoolManager.java index 7fbd1573ee77..c87c968b5c8f 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionPoolManager.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionPoolManager.java @@ -18,8 +18,6 @@ package org.apache.hadoop.hive.ql.exec.tez; -import org.apache.hadoop.hive.ql.exec.tez.TezSessionState.HiveResources; - import java.util.ArrayList; import java.util.Collection; import java.util.Collections; @@ -278,7 +276,7 @@ private TezSessionState getSession(HiveConf conf, boolean doOpen) throws Excepti // TODO Session re-use completely disabled for doAs=true. Always launches a new session. boolean nonDefaultUser = conf.getBoolVar(HiveConf.ConfVars.HIVE_SERVER2_ENABLE_DOAS); - boolean jobNameSet = !HiveConf.getVar(conf, ConfVars.HIVETEZJOBNAME).equals("HIVE-%s"); + boolean jobNameSet = !HiveConf.getVar(conf, ConfVars.HIVE_TEZ_JOB_NAME).equals("HIVE-%s"); /* * if the user has specified a queue name themselves or job name is set, we create a new diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionState.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionState.java index 015e826d8e0f..c2a9ae5203b1 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionState.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionState.java @@ -363,7 +363,7 @@ protected void openInternal(String[] additionalFilesNotFromConf, */ HiveConfUtil.updateCredentialProviderPasswordForJobs(tezConfig); - String tezJobNameFormat = HiveConf.getVar(conf, ConfVars.HIVETEZJOBNAME); + String tezJobNameFormat = HiveConf.getVar(conf, ConfVars.HIVE_TEZ_JOB_NAME); final TezClient session = TezClient.newBuilder(String.format(tezJobNameFormat, sessionId), tezConfig) .setIsSession(true).setLocalResources(commonLocalResources) .setCredentials(llapCredentials).setServicePluginDescriptor(servicePluginsDescriptor) @@ -608,7 +608,7 @@ private void setupSessionAcls(Configuration tezConf, HiveConf hiveConf) throws String loginUser = loginUserUgi == null ? null : loginUserUgi.getShortUserName(); boolean addHs2User = - HiveConf.getBoolVar(hiveConf, ConfVars.HIVETEZHS2USERACCESS); + HiveConf.getBoolVar(hiveConf, ConfVars.HIVE_TEZ_HS2_USER_ACCESS); String viewStr = Utilities.getAclStringWithHiveModification(tezConf, TezConfiguration.TEZ_AM_VIEW_ACLS, addHs2User, user, loginUser); @@ -794,12 +794,12 @@ private Path createTezDir(String sessionId, String suffix) throws IOException { // tez needs its own scratch dir (per session) // TODO: De-link from SessionState. A TezSession can be linked to different Hive Sessions via the pool. SessionState sessionState = SessionState.get(); - String hdfsScratchDir = sessionState == null ? HiveConf.getVar(conf, HiveConf.ConfVars.SCRATCHDIR) : sessionState + String hdfsScratchDir = sessionState == null ? HiveConf.getVar(conf, HiveConf.ConfVars.SCRATCH_DIR) : sessionState .getHdfsScratchDirURIString(); Path tezDir = new Path(hdfsScratchDir, TEZ_DIR); tezDir = new Path(tezDir, sessionId + ((suffix == null) ? "" : ("-" + suffix))); FileSystem fs = tezDir.getFileSystem(conf); - FsPermission fsPermission = new FsPermission(HiveConf.getVar(conf, HiveConf.ConfVars.SCRATCHDIRPERMISSION)); + FsPermission fsPermission = new FsPermission(HiveConf.getVar(conf, HiveConf.ConfVars.SCRATCH_DIR_PERMISSION)); fs.mkdirs(tezDir, fsPermission); // Make sure the path is normalized (we expect validation to pass since we just created it). tezDir = DagUtils.validateTargetDir(tezDir, conf).getPath(); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezTask.java index 32942ef98a71..48e907b20ab7 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezTask.java @@ -148,7 +148,7 @@ public int execute() { Context ctx = null; Ref sessionRef = Ref.from(null); - final String queryId = HiveConf.getVar(conf, HiveConf.ConfVars.HIVEQUERYID); + final String queryId = HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_QUERY_ID); try { // Get or create Context object. If we create it we have to clean it later as well. @@ -397,7 +397,7 @@ private void collectCommitInformation(TezWork work) throws IOException, TezExcep private void updateNumRows() { if (counters != null) { TezCounter counter = counters.findCounter( - conf.getVar(HiveConf.ConfVars.HIVECOUNTERGROUP), FileSinkOperator.TOTAL_TABLE_ROWS_WRITTEN); + conf.getVar(HiveConf.ConfVars.HIVE_COUNTER_GROUP), FileSinkOperator.TOTAL_TABLE_ROWS_WRITTEN); if (counter != null) { queryState.setNumModifiedRows(counter.getValue()); } @@ -486,8 +486,8 @@ DAG build(JobConf conf, TezWork tezWork, Path scratchDir, Context ctx, .put("description", ctx.getCmd()); String dagInfo = json.toString(); - String queryId = HiveConf.getVar(conf, HiveConf.ConfVars.HIVEQUERYID); - dag.setConf(HiveConf.ConfVars.HIVEQUERYID.varname, queryId); + String queryId = HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_QUERY_ID); + dag.setConf(HiveConf.ConfVars.HIVE_QUERY_ID.varname, queryId); LOG.debug("DagInfo: {}", dagInfo); @@ -596,7 +596,7 @@ private static void setAccessControlsForCurrentUser(DAG dag, String queryId, String loginUser = loginUserUgi == null ? null : loginUserUgi.getShortUserName(); boolean addHs2User = - HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVETEZHS2USERACCESS); + HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_TEZ_HS2_USER_ACCESS); // Temporarily re-using the TEZ AM View ACLs property for individual dag access control. // Hive may want to setup it's own parameters if it wants to control per dag access. diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/WorkloadManager.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/WorkloadManager.java index 33f9a8a34d26..2c71296772fd 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/WorkloadManager.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/WorkloadManager.java @@ -21,7 +21,6 @@ import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import com.fasterxml.jackson.annotation.JsonAutoDetect; -import com.fasterxml.jackson.databind.MapperFeature; import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.SerializationFeature; import com.google.common.annotations.VisibleForTesting; @@ -1568,7 +1567,7 @@ public WmTezSession getSession(TezSessionState session, MappingInput input, Hive WmEvent wmEvent = new WmEvent(WmEvent.EventType.GET); // Note: not actually used for pool sessions; verify some things like doAs are not set. validateConfig(conf); - String queryId = HiveConf.getVar(conf, HiveConf.ConfVars.HIVEQUERYID); + String queryId = HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_QUERY_ID); SettableFuture future = SettableFuture.create(); WmTezSession wmSession = checkSessionForReuse(session); GetRequest req = new GetRequest( diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/monitoring/DAGSummary.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/monitoring/DAGSummary.java index c039342d1ed8..045fb13e70be 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/monitoring/DAGSummary.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/monitoring/DAGSummary.java @@ -28,7 +28,6 @@ import org.apache.tez.common.counters.TezCounter; import org.apache.tez.common.counters.TezCounters; import org.apache.tez.dag.api.DAG; -import org.apache.tez.dag.api.TezConfiguration; import org.apache.tez.dag.api.TezException; import org.apache.tez.dag.api.Vertex; import org.apache.tez.dag.api.client.DAGClient; @@ -73,7 +72,7 @@ class DAGSummary implements PrintSummary { this.dagClient = dagClient; this.dag = dag; this.perfLogger = perfLogger; - this.hiveCountersGroup = HiveConf.getVar(hiveConf, HiveConf.ConfVars.HIVECOUNTERGROUP); + this.hiveCountersGroup = HiveConf.getVar(hiveConf, HiveConf.ConfVars.HIVE_COUNTER_GROUP); this.hiveCounters = hiveCounters(dagClient); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorFilterOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorFilterOperator.java index 9cfe9fbec155..47694f241540 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorFilterOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorFilterOperator.java @@ -25,7 +25,6 @@ import org.apache.hadoop.hive.ql.exec.vector.expressions.ConstantVectorExpression; import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpression; import org.apache.hadoop.hive.ql.metadata.HiveException; -import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; import org.apache.hadoop.hive.ql.plan.FilterDesc; import org.apache.hadoop.hive.ql.plan.OperatorDesc; import org.apache.hadoop.hive.ql.plan.VectorDesc; @@ -84,7 +83,7 @@ protected void initializeOp(Configuration hconf) throws HiveException { VectorExpression.doTransientInit(predicateExpression, hconf); try { heartbeatInterval = HiveConf.getIntVar(hconf, - HiveConf.ConfVars.HIVESENDHEARTBEAT); + HiveConf.ConfVars.HIVE_SEND_HEARTBEAT); predicateExpression.init(hconf); } catch (Throwable e) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorGroupByOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorGroupByOperator.java index c2dfaeb074eb..505db9e5e611 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorGroupByOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorGroupByOperator.java @@ -405,7 +405,7 @@ public void initialize(Configuration hconf) throws HiveException { this.maxHtEntries = HiveConf.getIntVar(hconf, HiveConf.ConfVars.HIVE_VECTORIZATION_GROUPBY_MAXENTRIES); this.numRowsCompareHashAggr = HiveConf.getIntVar(hconf, - HiveConf.ConfVars.HIVEGROUPBYMAPINTERVAL); + HiveConf.ConfVars.HIVE_GROUPBY_MAP_INTERVAL); } else { this.percentEntriesToFlush = @@ -415,7 +415,7 @@ public void initialize(Configuration hconf) throws HiveException { this.maxHtEntries = HiveConf.ConfVars.HIVE_VECTORIZATION_GROUPBY_MAXENTRIES.defaultIntVal; this.numRowsCompareHashAggr = - HiveConf.ConfVars.HIVEGROUPBYMAPINTERVAL.defaultIntVal; + HiveConf.ConfVars.HIVE_GROUPBY_MAP_INTERVAL.defaultIntVal; } minReductionHashAggr = getConf().getMinReductionHashAggr(); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorInBloomFilterColDynamicValue.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorInBloomFilterColDynamicValue.java index 836c1477e4f7..c288feb8f9f9 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorInBloomFilterColDynamicValue.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorInBloomFilterColDynamicValue.java @@ -102,7 +102,7 @@ public void init(Configuration conf) { throw new IllegalStateException("Unsupported type " + colVectorType); } - String queryId = HiveConf.getVar(conf, HiveConf.ConfVars.HIVEQUERYID); + String queryId = HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_QUERY_ID); runtimeCache = ObjectCacheFactory.getCache(conf, queryId, false, true); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastHashTableLoader.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastHashTableLoader.java index 460f05b0e174..cfada606eb16 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastHashTableLoader.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastHashTableLoader.java @@ -35,7 +35,6 @@ import org.apache.hadoop.hive.ql.exec.Operator; import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.exec.mapjoin.MapJoinMemoryExhaustionError; -import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAccessControlException; import org.apache.hive.common.util.FixedSizedObjectPool; import org.apache.tez.common.counters.TezCounter; import org.slf4j.Logger; @@ -90,7 +89,7 @@ public VectorMapJoinFastHashTableLoader(TezContext context, Configuration hconf, this.desc = joinOp.getConf(); this.cacheKey = joinOp.getCacheKey(); this.htLoadCounter = this.tezContext.getTezProcessorContext().getCounters().findCounter( - HiveConf.getVar(hconf, HiveConf.ConfVars.HIVECOUNTERGROUP), hconf.get(Operator.CONTEXT_NAME_KEY, "")); + HiveConf.getVar(hconf, HiveConf.ConfVars.HIVE_COUNTER_GROUP), hconf.get(Operator.CONTEXT_NAME_KEY, "")); } @Override @@ -100,7 +99,7 @@ public void init(ExecMapperContext context, MapredContext mrContext, this.hconf = hconf; this.desc = joinOp.getConf(); this.cacheKey = joinOp.getCacheKey(); - String counterGroup = HiveConf.getVar(hconf, HiveConf.ConfVars.HIVECOUNTERGROUP); + String counterGroup = HiveConf.getVar(hconf, HiveConf.ConfVars.HIVE_COUNTER_GROUP); String vertexName = hconf.get(Operator.CONTEXT_NAME_KEY, ""); String counterName = Utilities.getVertexCounterName(HashTableLoaderCounters.HASHTABLE_LOAD_TIME_MS.name(), vertexName); this.htLoadCounter = tezContext.getTezProcessorContext().getCounters().findCounter(counterGroup, counterName); @@ -111,7 +110,7 @@ private void initHTLoadingService(long estKeyCount) { // Avoid many small HTs that will rehash multiple times causing GCs this.numLoadThreads = 1; } else { - int initialValue = HiveConf.getIntVar(hconf, HiveConf.ConfVars.HIVEMAPJOINPARALELHASHTABLETHREADS); + int initialValue = HiveConf.getIntVar(hconf, HiveConf.ConfVars.HIVE_MAPJOIN_PARALEL_HASHTABLE_THREADS); Preconditions.checkArgument(initialValue > 0, "The number of HT-loading-threads should be positive."); int adjustedValue = Integer.highestOneBit(initialValue); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastTableContainer.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastTableContainer.java index 1b281caf0ecd..20ae779f2215 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastTableContainer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastTableContainer.java @@ -62,9 +62,9 @@ public VectorMapJoinFastTableContainer(MapJoinDesc desc, Configuration hconf, this.desc = desc; this.hconf = hconf; - keyCountAdj = HiveConf.getFloatVar(hconf, HiveConf.ConfVars.HIVEHASHTABLEKEYCOUNTADJUSTMENT); - threshold = HiveConf.getIntVar(hconf, HiveConf.ConfVars.HIVEHASHTABLETHRESHOLD); - loadFactor = HiveConf.getFloatVar(hconf, HiveConf.ConfVars.HIVEHASHTABLELOADFACTOR); + keyCountAdj = HiveConf.getFloatVar(hconf, HiveConf.ConfVars.HIVE_HASHTABLE_KEY_COUNT_ADJUSTMENT); + threshold = HiveConf.getIntVar(hconf, HiveConf.ConfVars.HIVE_HASHTABLE_THRESHOLD); + loadFactor = HiveConf.getFloatVar(hconf, HiveConf.ConfVars.HIVE_HASHTABLE_LOAD_FACTOR); this.numHTs = numHTs; this.estimatedKeyCount = estimatedKeys > numHTs ? (estimatedKeys/ numHTs) : estimatedKeys; @@ -98,7 +98,7 @@ private VectorMapJoinFastHashTableContainerBase createHashTables(int newThreshol boolean isFullOuter = vectorDesc.getIsFullOuter(); boolean minMaxEnabled = vectorDesc.getMinMaxEnabled(); - int writeBufferSize = HiveConf.getIntVar(hconf, HiveConf.ConfVars.HIVEHASHTABLEWBSIZE); + int writeBufferSize = HiveConf.getIntVar(hconf, HiveConf.ConfVars.HIVE_HASHTABLE_WB_SIZE); VectorMapJoinFastHashTableContainerBase htWrapper = null; switch (hashTableKeyType) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/history/HiveHistoryImpl.java b/ql/src/java/org/apache/hadoop/hive/ql/history/HiveHistoryImpl.java index 205a5464f1ed..5261a1beb1c5 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/history/HiveHistoryImpl.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/history/HiveHistoryImpl.java @@ -21,7 +21,6 @@ import java.io.File; import java.io.IOException; import java.io.PrintWriter; -import java.io.Serializable; import java.util.HashMap; import java.util.Map; import java.util.Random; @@ -80,7 +79,7 @@ public HiveHistoryImpl(SessionState ss) { try { console = new LogHelper(LOG); String conf_file_loc = ss.getConf().getVar( - HiveConf.ConfVars.HIVEHISTORYFILELOC); + HiveConf.ConfVars.HIVE_HISTORY_FILE_LOC); if ((conf_file_loc == null) || conf_file_loc.length() == 0) { console.printError("No history file location given"); return; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/hooks/HiveProtoLoggingHook.java b/ql/src/java/org/apache/hadoop/hive/ql/hooks/HiveProtoLoggingHook.java index 74d6ac4ce017..c0e3bb30054c 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/hooks/HiveProtoLoggingHook.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/hooks/HiveProtoLoggingHook.java @@ -426,7 +426,7 @@ private HiveHookEventProtoPartialBuilder getPreHookEvent(HookContext hookContext plan.getOptimizedQueryString(), plan.getOptimizedCBOPlan()); return new HiveHookEventProtoPartialBuilder( - builder, explainWork, otherInfo, plan.getQueryStr(), conf.getVar(ConfVars.HIVESTAGEIDREARRANGE)); + builder, explainWork, otherInfo, plan.getQueryStr(), conf.getVar(ConfVars.HIVE_STAGE_ID_REARRANGE)); } private HiveHookEventProtoPartialBuilder getPostHookEvent(HookContext hookContext, boolean success) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/hooks/HookContext.java b/ql/src/java/org/apache/hadoop/hive/ql/hooks/HookContext.java index cd23b247063a..f3fc63ac3e4b 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/hooks/HookContext.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/hooks/HookContext.java @@ -49,11 +49,11 @@ public class HookContext { static public enum HookType { - PRE_EXEC_HOOK(HiveConf.ConfVars.PREEXECHOOKS, ExecuteWithHookContext.class, + PRE_EXEC_HOOK(HiveConf.ConfVars.PRE_EXEC_HOOKS, ExecuteWithHookContext.class, "Pre-execution hooks to be invoked for each statement"), - POST_EXEC_HOOK(HiveConf.ConfVars.POSTEXECHOOKS, ExecuteWithHookContext.class, + POST_EXEC_HOOK(HiveConf.ConfVars.POST_EXEC_HOOKS, ExecuteWithHookContext.class, "Post-execution hooks to be invoked for each statement"), - ON_FAILURE_HOOK(HiveConf.ConfVars.ONFAILUREHOOKS, ExecuteWithHookContext.class, + ON_FAILURE_HOOK(HiveConf.ConfVars.ON_FAILURE_HOOKS, ExecuteWithHookContext.class, "On-failure hooks to be invoked for each statement"), QUERY_LIFETIME_HOOKS(HiveConf.ConfVars.HIVE_QUERY_LIFETIME_HOOKS, QueryLifeTimeHook.class, "Hooks that will be triggered before/after query compilation and before/after query execution"), @@ -61,7 +61,7 @@ static public enum HookType { "Hooks that invoked before/after Hive performs its own semantic analysis on a statement"), DRIVER_RUN_HOOKS(HiveConf.ConfVars.HIVE_DRIVER_RUN_HOOKS, HiveDriverRunHook.class, "Hooks that Will be run at the beginning and end of Driver.run"), - QUERY_REDACTOR_HOOKS(HiveConf.ConfVars.QUERYREDACTORHOOKS, Redactor.class, + QUERY_REDACTOR_HOOKS(HiveConf.ConfVars.QUERY_REDACTOR_HOOKS, Redactor.class, "Hooks to be invoked for each query which can transform the query before it's placed in the job.xml file"), // The HiveSessionHook.class cannot access, use Hook.class instead HIVE_SERVER2_SESSION_HOOK(HiveConf.ConfVars.HIVE_SERVER2_SESSION_HOOK, Hook.class, diff --git a/ql/src/java/org/apache/hadoop/hive/ql/hooks/PostExecTezSummaryPrinter.java b/ql/src/java/org/apache/hadoop/hive/ql/hooks/PostExecTezSummaryPrinter.java index 102b2b517312..7633ac85612f 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/hooks/PostExecTezSummaryPrinter.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/hooks/PostExecTezSummaryPrinter.java @@ -23,7 +23,6 @@ import org.apache.hadoop.hive.ql.exec.tez.CompileTimeCounters; import org.apache.hadoop.hive.ql.exec.tez.HiveInputCounters; import org.apache.tez.common.counters.FileSystemCounter; -import org.apache.tez.dag.api.client.DAGClient; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.hive.conf.HiveConf; @@ -62,7 +61,7 @@ public void run(HookContext hookContext) throws Exception { LOG.info("Printing summary for tez task: " + tezTask.getName()); TezCounters counters = tezTask.getTezCounters(); if (counters != null) { - String hiveCountersGroup = HiveConf.getVar(conf, HiveConf.ConfVars.HIVECOUNTERGROUP); + String hiveCountersGroup = HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_COUNTER_GROUP); for (CounterGroup group : counters) { if (hiveCountersGroup.equals(group.getDisplayName())) { console.printInfo(tezTask.getId() + " HIVE COUNTERS:", false); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java b/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java index 987411401e4f..ab90142162cf 100755 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java @@ -553,7 +553,7 @@ private void addSplitsForGroup(List dirs, TableScanOperator tableScan, Job } else { // if the input is Compressed OR not text we have no way of splitting them! // In that case RecordReader should take care of header/footer skipping! - HiveConf.setLongVar(conf, ConfVars.MAPREDMINSPLITSIZE, Long.MAX_VALUE); + HiveConf.setLongVar(conf, ConfVars.MAPRED_MIN_SPLIT_SIZE, Long.MAX_VALUE); } } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/RCFile.java b/ql/src/java/org/apache/hadoop/hive/ql/io/RCFile.java index 6eba2e5b71a6..90bd7339a71d 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/RCFile.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/RCFile.java @@ -1057,7 +1057,7 @@ void init(Configuration conf, FSDataOutputStream out, this.out = out; this.codec = codec; this.metadata = metadata; - this.useNewMagic = conf.getBoolean(HIVEUSEEXPLICITRCFILEHEADER.varname, true); + this.useNewMagic = conf.getBoolean(HIVE_USE_EXPLICIT_RCFILE_HEADER.varname, true); } /** Returns the compression codec of data in this file. */ diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/RCFileRecordReader.java b/ql/src/java/org/apache/hadoop/hive/ql/io/RCFileRecordReader.java index 6d93de0b29c4..d6ae00bfa8d4 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/RCFileRecordReader.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/RCFileRecordReader.java @@ -102,7 +102,7 @@ public RCFileRecordReader(Configuration conf, FileSplit split) this.conf = conf; this.split = split; - useCache = HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVEUSERCFILESYNCCACHE); + useCache = HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_USE_RCFILE_SYNC_CACHE); if (split.getStart() > in.getPosition()) { long oldSync = useCache ? syncCache.get(split) : -1; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/merge/MergeFileTask.java b/ql/src/java/org/apache/hadoop/hive/ql/io/merge/MergeFileTask.java index 7d808c25d254..dac1393976d6 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/merge/MergeFileTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/merge/MergeFileTask.java @@ -118,7 +118,7 @@ public int execute() { String jobName = null; if (noName && this.getQueryPlan() != null) { - int maxlen = conf.getIntVar(HiveConf.ConfVars.HIVEJOBNAMELENGTH); + int maxlen = conf.getIntVar(HiveConf.ConfVars.HIVE_JOBNAME_LENGTH); jobName = Utilities.abbreviate(this.getQueryPlan().getQueryStr(), maxlen - 6); } @@ -137,9 +137,9 @@ public int execute() { Utilities.setMapWork(job, work, ctx.getMRTmpPath(), true); // remove pwd from conf file so that job tracker doesn't show this logs - String pwd = HiveConf.getVar(job, HiveConf.ConfVars.METASTOREPWD); + String pwd = HiveConf.getVar(job, HiveConf.ConfVars.METASTORE_PWD); if (pwd != null) { - HiveConf.setVar(job, HiveConf.ConfVars.METASTOREPWD, "HIVE"); + HiveConf.setVar(job, HiveConf.ConfVars.METASTORE_PWD, "HIVE"); } // submit the job diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/ExternalCache.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/ExternalCache.java index c19c49f266bc..bae96b1b67bc 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/ExternalCache.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/ExternalCache.java @@ -43,7 +43,6 @@ import org.apache.hadoop.hive.ql.io.sarg.SearchArgumentFactory; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.shims.HadoopShims.HdfsFileStatusWithId; -import org.apache.orc.OrcProto; import org.apache.orc.impl.OrcTail; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -98,7 +97,7 @@ public boolean hasPpd() { public void configure(HiveConf queryConfig) { this.conf = queryConfig; this.sarg = ConvertAstToSearchArg.createFromConf(conf); - this.isPpdEnabled = HiveConf.getBoolVar(conf, ConfVars.HIVEOPTINDEXFILTER) + this.isPpdEnabled = HiveConf.getBoolVar(conf, ConfVars.HIVE_OPT_INDEX_FILTER) && HiveConf.getBoolVar(conf, ConfVars.HIVE_ORC_MS_FOOTER_CACHE_PPD); this.isInTest = HiveConf.getBoolVar(conf, ConfVars.HIVE_IN_TEST); this.sargIsOriginal = this.sargNotIsOriginal = null; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java index 730ede4e5a02..564836144f0b 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java @@ -19,7 +19,6 @@ package org.apache.hadoop.hive.ql.io.orc; import org.apache.commons.collections.CollectionUtils; -import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus; import org.apache.hadoop.hive.common.BlobStorageUtils; import org.apache.hadoop.hive.common.NoDynamicValuesException; @@ -333,7 +332,7 @@ public static int getRootColumn(boolean isOriginal) { public static void raiseAcidTablesMustBeReadWithAcidReaderException(Configuration conf) throws IOException { - String hiveInputFormat = HiveConf.getVar(conf, ConfVars.HIVEINPUTFORMAT); + String hiveInputFormat = HiveConf.getVar(conf, ConfVars.HIVE_INPUT_FORMAT); if (hiveInputFormat.equals(HiveInputFormat.class.getName())) { throw new IOException(ErrorMsg.ACID_TABLES_MUST_BE_READ_WITH_ACID_READER.getErrorCodedMsg()); } else { @@ -717,8 +716,8 @@ static class Context { this.isVectorMode = Utilities.getIsVectorized(conf); this.forceThreadpool = HiveConf.getBoolVar(conf, ConfVars.HIVE_IN_TEST); this.sarg = ConvertAstToSearchArg.createFromConf(conf); - minSize = HiveConf.getLongVar(conf, ConfVars.MAPREDMINSPLITSIZE, DEFAULT_MIN_SPLIT_SIZE); - maxSize = HiveConf.getLongVar(conf, ConfVars.MAPREDMAXSPLITSIZE, DEFAULT_MAX_SPLIT_SIZE); + minSize = HiveConf.getLongVar(conf, ConfVars.MAPRED_MIN_SPLIT_SIZE, DEFAULT_MIN_SPLIT_SIZE); + maxSize = HiveConf.getLongVar(conf, ConfVars.MAPRED_MAX_SPLIT_SIZE, DEFAULT_MAX_SPLIT_SIZE); String ss = conf.get(ConfVars.HIVE_ORC_SPLIT_STRATEGY.varname); if (ss == null || ss.equals(SplitStrategyKind.HYBRID.name())) { splitStrategyKind = SplitStrategyKind.HYBRID; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRecordUpdater.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRecordUpdater.java index cf73c9551ebd..3710ee71c7c9 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRecordUpdater.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRecordUpdater.java @@ -359,7 +359,7 @@ private static TypeDescription getTypeDescriptionFromTableProperties(Properties writerOptions.getConfiguration().set(OrcConf.DICTIONARY_KEY_SIZE_THRESHOLD.getAttribute(), "-1.0"); } } - if(!HiveConf.getBoolVar(options.getConfiguration(), HiveConf.ConfVars.HIVETESTMODEACIDKEYIDXSKIP)) { + if(!HiveConf.getBoolVar(options.getConfiguration(), HiveConf.ConfVars.HIVE_TEST_MODE_ACID_KEY_IDX_SKIP)) { writerOptions.fileSystem(fs).callback(indexBuilder); } rowInspector = (StructObjectInspector)options.getInspector(); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/truncate/ColumnTruncateTask.java b/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/truncate/ColumnTruncateTask.java index fe98dd0aee4e..43a47a95bb5b 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/truncate/ColumnTruncateTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/truncate/ColumnTruncateTask.java @@ -78,7 +78,7 @@ public boolean requireLock() { */ @Override public int execute() { - HiveConf.setVar(job, HiveConf.ConfVars.HIVEINPUTFORMAT, + HiveConf.setVar(job, HiveConf.ConfVars.HIVE_INPUT_FORMAT, BucketizedHiveInputFormat.class.getName()); success = true; HiveFileFormatUtils.prepareJobOutput(job); @@ -105,16 +105,16 @@ public int execute() { job.setBoolean(MRJobConfig.MAP_SPECULATIVE, false); if (work.getMinSplitSize() != null) { - HiveConf.setLongVar(job, HiveConf.ConfVars.MAPREDMINSPLITSIZE, work + HiveConf.setLongVar(job, HiveConf.ConfVars.MAPRED_MIN_SPLIT_SIZE, work .getMinSplitSize().longValue()); } if (work.getInputformat() != null) { - HiveConf.setVar(job, HiveConf.ConfVars.HIVEINPUTFORMAT, work + HiveConf.setVar(job, HiveConf.ConfVars.HIVE_INPUT_FORMAT, work .getInputformat()); } - String inpFormat = HiveConf.getVar(job, HiveConf.ConfVars.HIVEINPUTFORMAT); + String inpFormat = HiveConf.getVar(job, HiveConf.ConfVars.HIVE_INPUT_FORMAT); LOG.info("Using " + inpFormat); try { @@ -146,7 +146,7 @@ public int execute() { String jobName = null; if (noName && this.getQueryPlan() != null) { - int maxlen = conf.getIntVar(HiveConf.ConfVars.HIVEJOBNAMELENGTH); + int maxlen = conf.getIntVar(HiveConf.ConfVars.HIVE_JOBNAME_LENGTH); jobName = Utilities.abbreviate(this.getQueryPlan().getQueryStr(), maxlen - 6); } @@ -166,9 +166,9 @@ public int execute() { // remove the pwd from conf file so that job tracker doesn't show this // logs - String pwd = HiveConf.getVar(job, HiveConf.ConfVars.METASTOREPWD); + String pwd = HiveConf.getVar(job, HiveConf.ConfVars.METASTORE_PWD); if (pwd != null) { - HiveConf.setVar(job, HiveConf.ConfVars.METASTOREPWD, "HIVE"); + HiveConf.setVar(job, HiveConf.ConfVars.METASTORE_PWD, "HIVE"); } JobClient jc = new JobClient(job); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java index b60570b1ec73..c4013fc2c789 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java @@ -268,7 +268,7 @@ long openTxn(Context ctx, String user, TxnType txnType, long delay) throws LockE shouldReallocateWriteIds = false; isExplicitTransaction = false; startTransactionCount = 0; - this.queryId = ctx.getConf().get(HiveConf.ConfVars.HIVEQUERYID.varname); + this.queryId = ctx.getConf().get(HiveConf.ConfVars.HIVE_QUERY_ID.varname); LOG.info("Opened " + JavaUtils.txnIdToString(txnId)); ctx.setHeartbeater(startHeartbeat(delay)); return txnId; @@ -727,7 +727,7 @@ private Heartbeater startHeartbeat(long initialDelay) throws LockException { private ScheduledFuture startHeartbeat(long initialDelay, long heartbeatInterval, Runnable heartbeater) { // For negative testing purpose.. - if(conf.getBoolVar(HiveConf.ConfVars.HIVE_IN_TEST) && conf.getBoolVar(HiveConf.ConfVars.HIVETESTMODEFAILHEARTBEATER)) { + if(conf.getBoolVar(HiveConf.ConfVars.HIVE_IN_TEST) && conf.getBoolVar(HiveConf.ConfVars.HIVE_TEST_MODE_FAIL_HEARTBEATER)) { initialDelay = 0; } else if (initialDelay == 0) { /*make initialDelay a random number in [0, 0.75*heartbeatInterval] so that if a lot @@ -1120,8 +1120,8 @@ public LockException getLockException() { public void run() { try { // For negative testing purpose.. - if(conf.getBoolVar(HiveConf.ConfVars.HIVE_IN_TEST) && conf.getBoolVar(HiveConf.ConfVars.HIVETESTMODEFAILHEARTBEATER)) { - throw new LockException(HiveConf.ConfVars.HIVETESTMODEFAILHEARTBEATER.name() + "=true"); + if(conf.getBoolVar(HiveConf.ConfVars.HIVE_IN_TEST) && conf.getBoolVar(HiveConf.ConfVars.HIVE_TEST_MODE_FAIL_HEARTBEATER)) { + throw new LockException(HiveConf.ConfVars.HIVE_TEST_MODE_FAIL_HEARTBEATER.name() + "=true"); } LOG.debug("Heartbeating...for currentUser: " + currentUser); currentUser.doAs((PrivilegedExceptionAction) () -> { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java index 192fa13ffa1f..b3ef272bbfe5 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java @@ -3371,8 +3371,8 @@ public Map, Partition> loadDynamicPartitions(final LoadTable LOG.debug("Cancelling " + futures.size() + " dynamic loading tasks"); executor.shutdownNow(); } - if (HiveConf.getBoolVar(conf, ConfVars.HIVE_IN_TEST) && HiveConf.getBoolVar(conf, ConfVars.HIVETESTMODEFAILLOADDYNAMICPARTITION)) { - throw new HiveException(HiveConf.ConfVars.HIVETESTMODEFAILLOADDYNAMICPARTITION.name() + "=true"); + if (HiveConf.getBoolVar(conf, ConfVars.HIVE_IN_TEST) && HiveConf.getBoolVar(conf, ConfVars.HIVE_TEST_MODE_FAIL_LOAD_DYNAMIC_PARTITION)) { + throw new HiveException(HiveConf.ConfVars.HIVE_TEST_MODE_FAIL_LOAD_DYNAMIC_PARTITION.name() + "=true"); } try { if (isTxnTable) { @@ -3680,7 +3680,7 @@ public Partition getPartition(Table tbl, Map partSpec, for (FieldSchema field : tbl.getPartCols()) { String val = partSpec.get(field.getName()); // enable dynamic partitioning - if ((val == null && !HiveConf.getBoolVar(conf, HiveConf.ConfVars.DYNAMICPARTITIONING)) + if ((val == null && !HiveConf.getBoolVar(conf, HiveConf.ConfVars.DYNAMIC_PARTITIONING)) || (val != null && val.length() == 0)) { throw new HiveException("get partition: Value for key " + field.getName() + " is null or empty"); @@ -4076,7 +4076,7 @@ public List getPartitionNames(Table tbl, ExprNodeGenericFuncDesc expr, S exprBytes = SerializationUtilities.serializeObjectWithTypeInformation(expr); } try { - String defaultPartitionName = HiveConf.getVar(conf, ConfVars.DEFAULTPARTITIONNAME); + String defaultPartitionName = HiveConf.getVar(conf, ConfVars.DEFAULT_PARTITION_NAME); PartitionsByExprRequest req = new PartitionsByExprRequest(tbl.getDbName(), tbl.getTableName(), ByteBuffer.wrap(exprBytes)); if (defaultPartitionName != null) { @@ -4523,7 +4523,7 @@ public boolean getPartitionsByExpr(Table tbl, ExprNodeDesc expr, HiveConf conf, perfLogger.perfLogBegin(CLASS_NAME, PerfLogger.HIVE_GET_PARTITIONS_BY_EXPR); try { Preconditions.checkNotNull(partitions); - String defaultPartitionName = HiveConf.getVar(conf, ConfVars.DEFAULTPARTITIONNAME); + String defaultPartitionName = HiveConf.getVar(conf, ConfVars.DEFAULT_PARTITION_NAME); if (tbl.getStorageHandler() != null && tbl.getStorageHandler().alwaysUnpartitioned()) { partitions.addAll(tbl.getStorageHandler().getPartitionsByExpr(tbl, expr)); return false; @@ -5700,7 +5700,7 @@ private void deleteOldPathForReplace(Path destPath, Path oldPath, HiveConf conf, public void cleanUpOneDirectoryForReplace(Path path, FileSystem fs, PathFilter pathFilter, HiveConf conf, boolean purge, boolean isNeedRecycle) throws IOException, HiveException { - if (isNeedRecycle && conf.getBoolVar(HiveConf.ConfVars.REPLCMENABLED)) { + if (isNeedRecycle && conf.getBoolVar(HiveConf.ConfVars.REPL_CM_ENABLED)) { recycleDirToCmPath(path, purge); } if (!fs.exists(path)) { @@ -5910,7 +5910,7 @@ public synchronized IMetaStoreClient getMSC( } throw ex; } - String metaStoreUris = conf.getVar(HiveConf.ConfVars.METASTOREURIS); + String metaStoreUris = conf.getVar(HiveConf.ConfVars.METASTORE_URIS); if (!org.apache.commons.lang3.StringUtils.isEmpty(metaStoreUris)) { // get a synchronized wrapper if the meta store is remote. metaStoreClient = HiveMetaStoreClient.newSynchronizedClient(metaStoreClient); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMetaStoreClientWithLocalCache.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMetaStoreClientWithLocalCache.java index 625dbaeaf3eb..015895da9669 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMetaStoreClientWithLocalCache.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMetaStoreClientWithLocalCache.java @@ -62,7 +62,6 @@ import org.apache.hadoop.hive.metastore.api.UniqueConstraintsRequest; import org.apache.hadoop.hive.metastore.api.UniqueConstraintsResponse; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; -import org.apache.hadoop.hive.ql.QueryState; import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hadoop.hive.ql.util.IncrementalObjectSizeEstimator; import org.apache.hadoop.hive.ql.util.IncrementalObjectSizeEstimator.ObjectEstimator; @@ -520,7 +519,7 @@ private boolean isCacheEnabledAndInitialized() { protected String getQueryId() { try { - return Hive.get().getConf().get(HiveConf.ConfVars.HIVEQUERYID.varname); + return Hive.get().getConf().get(HiveConf.ConfVars.HIVE_QUERY_ID.varname); } catch (HiveException e) { LOG.error("Error getting query id. Query level and Global HMS caching will be disabled", e); return null; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java index 3bb93ed60d1c..31efa27abec1 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java @@ -1446,7 +1446,7 @@ public List dropPartitions(String catName, String dbName, String tblN List result = new ArrayList<>(); for (Pair pair : partExprs) { byte[] expr = pair.getRight(); - String filter = generateJDOFilter(table, expr, conf.get(HiveConf.ConfVars.DEFAULTPARTITIONNAME.varname)); + String filter = generateJDOFilter(table, expr, conf.get(HiveConf.ConfVars.DEFAULT_PARTITION_NAME.varname)); List partitions = tt.listPartitionsByFilter(filter); for (Partition p : partitions) { Partition droppedPartition = tt.dropPartition(p.getValues()); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/VirtualColumn.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/VirtualColumn.java index 8ffd41c49f94..e5109b34ef7e 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/VirtualColumn.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/VirtualColumn.java @@ -112,7 +112,7 @@ public static List getRegistry(Configuration conf) { ArrayList l = new ArrayList(); l.add(BLOCKOFFSET); l.add(FILENAME); - if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVEROWOFFSET)) { + if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_ROW_OFFSET)) { l.add(ROWOFFSET); } l.add(ROWID); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketMapjoinProc.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketMapjoinProc.java index 5d4e6cd8c8ce..bab07f179b1c 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketMapjoinProc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketMapjoinProc.java @@ -49,7 +49,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, // Throw an error if the user asked for bucketed mapjoin to be enforced and // bucketed mapjoin cannot be performed - if (!convert && conf.getBoolVar(HiveConf.ConfVars.HIVEENFORCEBUCKETMAPJOIN)) { + if (!convert && conf.getBoolVar(HiveConf.ConfVars.HIVE_ENFORCE_BUCKET_MAPJOIN)) { throw new SemanticException(ErrorMsg.BUCKET_MAPJOIN_NOT_POSSIBLE.getMsg()); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConvertJoinMapJoin.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConvertJoinMapJoin.java index 98a80ab603ee..87b3820f6735 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConvertJoinMapJoin.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConvertJoinMapJoin.java @@ -113,7 +113,7 @@ public class ConvertJoinMapJoin implements SemanticNodeProcessor { OptimizeTezProcContext context = (OptimizeTezProcContext) procCtx; - hashTableLoadFactor = context.conf.getFloatVar(ConfVars.HIVEHASHTABLELOADFACTOR); + hashTableLoadFactor = context.conf.getFloatVar(ConfVars.HIVE_HASHTABLE_LOAD_FACTOR); fastHashTableAvailable = context.conf.getBoolVar(ConfVars.HIVE_VECTORIZATION_MAPJOIN_NATIVE_FAST_HASHTABLE_ENABLED); JoinOperator joinOp = (JoinOperator) nd; @@ -133,7 +133,7 @@ public class ConvertJoinMapJoin implements SemanticNodeProcessor { TezBucketJoinProcCtx tezBucketJoinProcCtx = new TezBucketJoinProcCtx(context.conf); - boolean hiveConvertJoin = context.conf.getBoolVar(HiveConf.ConfVars.HIVECONVERTJOIN) & + boolean hiveConvertJoin = context.conf.getBoolVar(HiveConf.ConfVars.HIVE_CONVERT_JOIN) & !context.parseContext.getDisableMapJoin(); if (!hiveConvertJoin) { // we are just converting to a common merge join operator. The shuffle @@ -251,7 +251,7 @@ private boolean selectJoinForLlap(OptimizeTezProcContext context, JoinOperator j TezBucketJoinProcCtx tezBucketJoinProcCtx, LlapClusterStateForCompile llapInfo, MapJoinConversion mapJoinConversion, int numBuckets) throws SemanticException { - if (!context.conf.getBoolVar(HiveConf.ConfVars.HIVEDYNAMICPARTITIONHASHJOIN) + if (!context.conf.getBoolVar(HiveConf.ConfVars.HIVE_DYNAMIC_PARTITION_HASHJOIN) && numBuckets > 1) { // DPHJ is disabled, only attempt BMJ or mapjoin return convertJoinBucketMapJoin(joinOp, context, mapJoinConversion, tezBucketJoinProcCtx); @@ -406,7 +406,7 @@ private static long hashTableDataSizeAdjustment(long numRows, List - HiveConf.getIntVar(context.conf, HiveConf.ConfVars.XPRODSMALLTABLEROWSTHRESHOLD)) { + HiveConf.getIntVar(context.conf, HiveConf.ConfVars.XPROD_SMALL_TABLE_ROWS_THRESHOLD)) { // if any of smaller side is estimated to generate more than // threshold rows we would disable mapjoin return null; @@ -1328,7 +1328,7 @@ public MapJoinOperator convertJoinMapJoin(JoinOperator joinOp, OptimizeTezProcCo } MapJoinDesc mapJoinDesc = mapJoinOp.getConf(); mapJoinDesc.setHybridHashJoin(HiveConf.getBoolVar(context.conf, - HiveConf.ConfVars.HIVEUSEHYBRIDGRACEHASHJOIN)); + HiveConf.ConfVars.HIVE_USE_HYBRIDGRACE_HASHJOIN)); List joinExprs = mapJoinDesc.getKeys().values().iterator().next(); if (joinExprs.size() == 0) { // In case of cross join, we disable hybrid grace hash join mapJoinDesc.setHybridHashJoin(false); @@ -1585,8 +1585,8 @@ private boolean convertJoinDynamicPartitionedHashJoin(JoinOperator joinOp, Optim private void fallbackToReduceSideJoin(JoinOperator joinOp, OptimizeTezProcContext context) throws SemanticException { - if (context.conf.getBoolVar(HiveConf.ConfVars.HIVECONVERTJOIN) && - context.conf.getBoolVar(HiveConf.ConfVars.HIVEDYNAMICPARTITIONHASHJOIN)) { + if (context.conf.getBoolVar(HiveConf.ConfVars.HIVE_CONVERT_JOIN) && + context.conf.getBoolVar(HiveConf.ConfVars.HIVE_DYNAMIC_PARTITION_HASHJOIN)) { if (convertJoinDynamicPartitionedHashJoin(joinOp, context)) { return; } @@ -1617,7 +1617,7 @@ private void fallbackToMergeJoin(JoinOperator joinOp, OptimizeTezProcContext con private boolean checkNumberOfEntriesForHashTable(JoinOperator joinOp, int position, OptimizeTezProcContext context) { long max = HiveConf.getLongVar(context.parseContext.getConf(), - HiveConf.ConfVars.HIVECONVERTJOINMAXENTRIESHASHTABLE); + HiveConf.ConfVars.HIVE_CONVERT_JOIN_MAX_ENTRIES_HASHTABLE); if (max < 1) { // Max is disabled, we can safely return true return true; @@ -1652,7 +1652,7 @@ private boolean checkNumberOfEntriesForHashTable(JoinOperator joinOp, int positi private boolean checkShuffleSizeForLargeTable(JoinOperator joinOp, int position, OptimizeTezProcContext context) { long max = HiveConf.getLongVar(context.parseContext.getConf(), - HiveConf.ConfVars.HIVECONVERTJOINMAXSHUFFLESIZE); + HiveConf.ConfVars.HIVE_CONVERT_JOIN_MAX_SHUFFLE_SIZE); if (max < 1) { // Max is disabled, we can safely return false return false; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/CountDistinctRewriteProc.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/CountDistinctRewriteProc.java index 32edacba7c3e..29e421136129 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/CountDistinctRewriteProc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/CountDistinctRewriteProc.java @@ -180,7 +180,7 @@ protected int checkCountDistinct(GroupByOperator mGby, ReduceSinkOperator rs, return -1; } // check if it is potential to trigger nullscan - if (pGraphContext.getConf().getBoolVar(HiveConf.ConfVars.HIVEMETADATAONLYQUERIES)) { + if (pGraphContext.getConf().getBoolVar(HiveConf.ConfVars.HIVE_METADATA_ONLY_QUERIES)) { for (TableScanOperator tsOp : pGraphContext.getTopOps().values()) { List colIDs = tsOp.getNeededColumnIDs(); TableScanDesc desc = tsOp.getConf(); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/DynamicPartitionPruningOptimization.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/DynamicPartitionPruningOptimization.java index 85a420df8883..bc4bff81d6b8 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/DynamicPartitionPruningOptimization.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/DynamicPartitionPruningOptimization.java @@ -492,16 +492,16 @@ private void generateEventOperatorPlan(DynamicListContext ctx, ParseContext pars // do a group by on the list to dedup float groupByMemoryUsage = - HiveConf.getFloatVar(parseContext.getConf(), HiveConf.ConfVars.HIVEMAPAGGRHASHMEMORY); + HiveConf.getFloatVar(parseContext.getConf(), HiveConf.ConfVars.HIVE_MAP_AGGR_HASH_MEMORY); float memoryThreshold = HiveConf.getFloatVar(parseContext.getConf(), - HiveConf.ConfVars.HIVEMAPAGGRMEMORYTHRESHOLD); + HiveConf.ConfVars.HIVE_MAP_AGGR_MEMORY_THRESHOLD); float minReductionHashAggr = HiveConf.getFloatVar(parseContext.getConf(), - ConfVars.HIVEMAPAGGRHASHMINREDUCTION); + ConfVars.HIVE_MAP_AGGR_HASH_MIN_REDUCTION); float minReductionHashAggrLowerBound = HiveConf.getFloatVar(parseContext.getConf(), - ConfVars.HIVEMAPAGGRHASHMINREDUCTIONLOWERBOUND); + ConfVars.HIVE_MAP_AGGR_HASH_MIN_REDUCTION_LOWER_BOUND); List groupByExprs = new ArrayList(); ExprNodeDesc groupByExpr = @@ -613,16 +613,16 @@ private boolean generateSemiJoinOperatorPlan(DynamicListContext ctx, ParseContex // do a group by to aggregate min,max and bloom filter. float groupByMemoryUsage = - HiveConf.getFloatVar(parseContext.getConf(), HiveConf.ConfVars.HIVEMAPAGGRHASHMEMORY); + HiveConf.getFloatVar(parseContext.getConf(), HiveConf.ConfVars.HIVE_MAP_AGGR_HASH_MEMORY); float memoryThreshold = HiveConf.getFloatVar(parseContext.getConf(), - HiveConf.ConfVars.HIVEMAPAGGRMEMORYTHRESHOLD); + HiveConf.ConfVars.HIVE_MAP_AGGR_MEMORY_THRESHOLD); float minReductionHashAggr = HiveConf.getFloatVar(parseContext.getConf(), - ConfVars.HIVEMAPAGGRHASHMINREDUCTION); + ConfVars.HIVE_MAP_AGGR_HASH_MIN_REDUCTION); float minReductionHashAggrLowerBound = HiveConf.getFloatVar(parseContext.getConf(), - ConfVars.HIVEMAPAGGRHASHMINREDUCTIONLOWERBOUND); + ConfVars.HIVE_MAP_AGGR_HASH_MIN_REDUCTION_LOWER_BOUND); // Add min/max and bloom filter aggregations List aggFnOIs = new ArrayList(); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java index 94dc5a5c26ec..175f868bb82d 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java @@ -544,7 +544,7 @@ public static void setMapWork(MapWork plan, ParseContext parseCtx, Set fileSinkOp = OperatorFactory.get( parent.getCompilationOpContext(), desc, parent.getSchema()); @@ -1251,9 +1251,9 @@ public static void createMRWorkForMergingFiles(FileSinkOperator fsInput, + " into " + finalName); } - boolean isBlockMerge = (conf.getBoolVar(ConfVars.HIVEMERGERCFILEBLOCKLEVEL) && + boolean isBlockMerge = (conf.getBoolVar(ConfVars.HIVE_MERGE_RCFILE_BLOCK_LEVEL) && fsInputDesc.getTableInfo().getInputFileFormatClass().equals(RCFileInputFormat.class)) || - (conf.getBoolVar(ConfVars.HIVEMERGEORCFILESTRIPELEVEL) && + (conf.getBoolVar(ConfVars.HIVE_MERGE_ORC_FILE_STRIPE_LEVEL) && fsInputDesc.getTableInfo().getInputFileFormatClass().equals(OrcInputFormat.class)); RowSchema inputRS = fsInput.getSchema(); @@ -1268,7 +1268,7 @@ public static void createMRWorkForMergingFiles(FileSinkOperator fsInput, // Create a FileSink operator TableDesc ts = (TableDesc) fsInputDesc.getTableInfo().clone(); Path mergeDest = srcMmWriteId == null ? finalName : finalName.getParent(); - fsOutputDesc = new FileSinkDesc(mergeDest, ts, conf.getBoolVar(ConfVars.COMPRESSRESULT)); + fsOutputDesc = new FileSinkDesc(mergeDest, ts, conf.getBoolVar(ConfVars.COMPRESS_RESULT)); fsOutputDesc.setMmWriteId(srcMmWriteId); fsOutputDesc.setIsMerge(true); // Create and attach the filesink for the merge. @@ -1316,7 +1316,7 @@ public static void createMRWorkForMergingFiles(FileSinkOperator fsInput, cplan = GenMapRedUtils.createMergeTask(fsInputDesc, finalName, dpCtx != null && dpCtx.getNumDPCols() > 0, fsInput.getCompilationOpContext()); if (conf.getVar(ConfVars.HIVE_EXECUTION_ENGINE).equals("tez")) { - work = new TezWork(conf.getVar(HiveConf.ConfVars.HIVEQUERYID), conf); + work = new TezWork(conf.getVar(HiveConf.ConfVars.HIVE_QUERY_ID), conf); cplan.setName("File Merge"); ((TezWork) work).add(cplan); } else { @@ -1325,7 +1325,7 @@ public static void createMRWorkForMergingFiles(FileSinkOperator fsInput, } else { cplan = createMRWorkForMergingFiles(conf, tsMerge, fsInputDesc); if (conf.getVar(ConfVars.HIVE_EXECUTION_ENGINE).equals("tez")) { - work = new TezWork(conf.getVar(HiveConf.ConfVars.HIVEQUERYID), conf); + work = new TezWork(conf.getVar(HiveConf.ConfVars.HIVE_QUERY_ID), conf); cplan.setName("File Merge"); ((TezWork)work).add(cplan); } else { @@ -1910,7 +1910,7 @@ public static boolean isMergeRequired(List> mvTasks, HiveConf hco if (currTask.getWork() instanceof TezWork) { // tez blurs the boundary between map and reduce, thus it has it's own config - return hconf.getBoolVar(ConfVars.HIVEMERGETEZFILES); + return hconf.getBoolVar(ConfVars.HIVE_MERGE_TEZ_FILES); } return isMergeRequiredForMr(hconf, fsOp, currTask); } @@ -1918,12 +1918,12 @@ public static boolean isMergeRequired(List> mvTasks, HiveConf hco private static boolean isMergeRequiredForMr(HiveConf hconf, FileSinkOperator fsOp, Task currTask) { if (fsOp.getConf().isLinkedFileSink()) { - // If the user has HIVEMERGEMAPREDFILES set to false, the idea was the + // If the user has HIVE_MERGE_MAPRED_FILES set to false, the idea was the // number of reducers are few, so the number of files anyway are small. // However, with this optimization, we are increasing the number of files // possibly by a big margin. So, merge aggressively. - return (hconf.getBoolVar(ConfVars.HIVEMERGEMAPFILES) || - hconf.getBoolVar(ConfVars.HIVEMERGEMAPREDFILES)); + return (hconf.getBoolVar(ConfVars.HIVE_MERGE_MAPFILES) || + hconf.getBoolVar(ConfVars.HIVE_MERGE_MAPRED_FILES)); } // There are separate configuration parameters to control whether to // merge for a map-only job @@ -1931,9 +1931,9 @@ private static boolean isMergeRequiredForMr(HiveConf hconf, if (currTask.getWork() instanceof MapredWork) { ReduceWork reduceWork = ((MapredWork) currTask.getWork()).getReduceWork(); boolean mergeMapOnly = - hconf.getBoolVar(ConfVars.HIVEMERGEMAPFILES) && reduceWork == null; + hconf.getBoolVar(ConfVars.HIVE_MERGE_MAPFILES) && reduceWork == null; boolean mergeMapRed = - hconf.getBoolVar(ConfVars.HIVEMERGEMAPREDFILES) && + hconf.getBoolVar(ConfVars.HIVE_MERGE_MAPRED_FILES) && reduceWork != null; if (mergeMapOnly || mergeMapRed) { return true; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GroupByOptimizer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GroupByOptimizer.java index f52d5652b608..ec2a6ccb818c 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GroupByOptimizer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GroupByOptimizer.java @@ -83,7 +83,7 @@ public ParseContext transform(ParseContext pctx) throws SemanticException { Map opRules = new LinkedHashMap(); HiveConf conf = pctx.getConf(); - if (!HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVEGROUPBYSKEW)) { + if (!HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_GROUPBY_SKEW)) { // process group-by pattern opRules.put(new RuleRegExp("R1", GroupByOperator.getOperatorName() + "%" + @@ -188,7 +188,7 @@ protected void processGroupBy(GroupByOptimizerContext ctx, if (!groupByOpDesc.isDistinct()) { removeReduceSink = true; } - else if (!HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVEGROUPBYSKEW)) { + else if (!HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVE_GROUPBY_SKEW)) { // Optimize the query: select count(distinct keys) from T, where // T is bucketized and sorted by T // Partial aggregation can be done by the mappers in this scenario diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/LimitPushdownOptimizer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/LimitPushdownOptimizer.java index 4cae3b26a3fd..8b71ea65f283 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/LimitPushdownOptimizer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/LimitPushdownOptimizer.java @@ -251,10 +251,10 @@ private static class LimitPushdownContext implements NodeProcessorCtx { private final float threshold; public LimitPushdownContext(HiveConf conf) throws SemanticException { - threshold = conf.getFloatVar(HiveConf.ConfVars.HIVELIMITPUSHDOWNMEMORYUSAGE); + threshold = conf.getFloatVar(HiveConf.ConfVars.HIVE_LIMIT_PUSHDOWN_MEMORY_USAGE); if (threshold <= 0 || threshold >= 1) { throw new SemanticException("Invalid memory usage value " + threshold + - " for " + HiveConf.ConfVars.HIVELIMITPUSHDOWNMEMORYUSAGE); + " for " + HiveConf.ConfVars.HIVE_LIMIT_PUSHDOWN_MEMORY_USAGE); } } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/MapJoinProcessor.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/MapJoinProcessor.java index adf4fbe1b216..839db8f9bbc6 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/MapJoinProcessor.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/MapJoinProcessor.java @@ -413,7 +413,7 @@ private static boolean checkFullOuterMapJoinCompatible(HiveConf hiveConf, boolean isVectorizationMapJoinNativeEnabled = HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVE_VECTORIZATION_MAPJOIN_NATIVE_ENABLED); boolean isHybridHashJoin = HiveConf.getBoolVar(hiveConf, - HiveConf.ConfVars.HIVEUSEHYBRIDGRACEHASHJOIN); + HiveConf.ConfVars.HIVE_USE_HYBRIDGRACE_HASHJOIN); if (isVectorizationMapJoinNativeEnabled && isHybridHashJoin) { LOG.debug("FULL OUTER MapJoin not enabled: Native Vector MapJoin and Hybrid Grace not supported"); return false; @@ -469,12 +469,12 @@ public static boolean isFullOuterMapEnabled(HiveConf hiveConf, JoinOperator join final boolean isEnabled = HiveConf.getBoolVar( hiveConf, - HiveConf.ConfVars.HIVEMAPJOINFULLOUTER); + HiveConf.ConfVars.HIVE_MAPJOIN_FULL_OUTER); switch (mapJoinFullOuterOverride) { case NONE: { if (!isEnabled) { - LOG.debug("FULL OUTER MapJoin not enabled: {} is false", HiveConf.ConfVars.HIVEMAPJOINFULLOUTER.varname); + LOG.debug("FULL OUTER MapJoin not enabled: {} is false", HiveConf.ConfVars.HIVE_MAPJOIN_FULL_OUTER.varname); return false; } } @@ -483,18 +483,18 @@ public static boolean isFullOuterMapEnabled(HiveConf hiveConf, JoinOperator join if (LOG.isDebugEnabled()) { LOG.debug("FULL OUTER MapJoin not enabled: " + HiveConf.ConfVars.HIVE_TEST_MAPJOINFULLOUTER_OVERRIDE.varname + " is disable (" + - " " + HiveConf.ConfVars.HIVEMAPJOINFULLOUTER.varname + " is " + isEnabled + ")"); + " " + HiveConf.ConfVars.HIVE_MAPJOIN_FULL_OUTER.varname + " is " + isEnabled + ")"); } return false; case ENABLE: // Different parts of the code may rely on this being set... HiveConf.setBoolVar(hiveConf, - HiveConf.ConfVars.HIVEMAPJOINFULLOUTER, true); + HiveConf.ConfVars.HIVE_MAPJOIN_FULL_OUTER, true); if (LOG.isDebugEnabled()) { LOG.debug("FULL OUTER MapJoin is enabled: " + HiveConf.ConfVars.HIVE_TEST_MAPJOINFULLOUTER_OVERRIDE.varname + " is enable (" + - " " + HiveConf.ConfVars.HIVEMAPJOINFULLOUTER.varname + " is " + isEnabled + ")"); + " " + HiveConf.ConfVars.HIVE_MAPJOIN_FULL_OUTER.varname + " is " + isEnabled + ")"); } break; default: @@ -520,9 +520,9 @@ public static boolean isFullOuterMapEnabled(HiveConf hiveConf, JoinOperator join final boolean isOptimizedHashTableEnabled = HiveConf.getBoolVar( hiveConf, - HiveConf.ConfVars.HIVEMAPJOINUSEOPTIMIZEDTABLE); + HiveConf.ConfVars.HIVE_MAPJOIN_USE_OPTIMIZED_TABLE); if (!isOptimizedHashTableEnabled) { - LOG.debug("FULL OUTER MapJoin not enabled: {} is false", HiveConf.ConfVars.HIVEMAPJOINUSEOPTIMIZEDTABLE.varname); + LOG.debug("FULL OUTER MapJoin not enabled: {} is false", HiveConf.ConfVars.HIVE_MAPJOIN_USE_OPTIMIZED_TABLE.varname); return false; } @@ -652,8 +652,8 @@ public MapJoinOperator generateMapJoinOperator(ParseContext pctx, JoinOperator o int mapJoinPos) throws SemanticException { HiveConf hiveConf = pctx.getConf(); boolean noCheckOuterJoin = HiveConf.getBoolVar(hiveConf, - HiveConf.ConfVars.HIVEOPTSORTMERGEBUCKETMAPJOIN) - && HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVEOPTBUCKETMAPJOIN); + HiveConf.ConfVars.HIVE_OPT_SORT_MERGE_BUCKET_MAPJOIN) + && HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVE_OPT_BUCKET_MAPJOIN); MapJoinOperator mapJoinOp = convertMapJoin(pctx.getConf(), op, op.getConf().isLeftInputJoin(), op.getConf().getBaseSrc(), diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/Optimizer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/Optimizer.java index 41bdf77f5fbb..3341be88fd10 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/Optimizer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/Optimizer.java @@ -74,7 +74,7 @@ public void initialize(HiveConf hiveConf) { // Add the transformation that computes the lineage information. Set postExecHooks = Sets.newHashSet( Splitter.on(",").trimResults().omitEmptyStrings().split( - Strings.nullToEmpty(HiveConf.getVar(hiveConf, HiveConf.ConfVars.POSTEXECHOOKS)))); + Strings.nullToEmpty(HiveConf.getVar(hiveConf, HiveConf.ConfVars.POST_EXEC_HOOKS)))); if (hiveConf.getBoolVar(HiveConf.ConfVars.HIVE_LINEAGE_INFO) || postExecHooks.contains("org.apache.hadoop.hive.ql.hooks.PostExecutePrinter") || postExecHooks.contains("org.apache.hadoop.hive.ql.hooks.LineageLogger") @@ -83,33 +83,33 @@ public void initialize(HiveConf hiveConf) { } // Try to transform OR predicates in Filter into simpler IN clauses first - if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVEPOINTLOOKUPOPTIMIZER) && + if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVE_POINT_LOOKUP_OPTIMIZER) && !pctx.getContext().isCboSucceeded()) { final int min = HiveConf.getIntVar(hiveConf, - HiveConf.ConfVars.HIVEPOINTLOOKUPOPTIMIZERMIN); + HiveConf.ConfVars.HIVE_POINT_LOOKUP_OPTIMIZER_MIN); transformations.add(new PointLookupOptimizer(min)); } - if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVEPARTITIONCOLUMNSEPARATOR)) { + if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVE_PARTITION_COLUMN_SEPARATOR)) { transformations.add(new PartitionColumnsSeparator()); } - if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVEOPTPPD) && + if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVE_OPT_PPD) && !pctx.getContext().isCboSucceeded()) { transformations.add(new PredicateTransitivePropagate()); - if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVEOPTCONSTANTPROPAGATION)) { + if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVE_OPT_CONSTANT_PROPAGATION)) { transformations.add(new ConstantPropagate()); } transformations.add(new SyntheticJoinPredicate()); transformations.add(new PredicatePushDown()); - } else if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVEOPTPPD) && + } else if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVE_OPT_PPD) && pctx.getContext().isCboSucceeded()) { transformations.add(new SyntheticJoinPredicate()); transformations.add(new SimplePredicatePushDown()); transformations.add(new RedundantDynamicPruningConditionsRemoval()); } - if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVEOPTCONSTANTPROPAGATION) && + if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVE_OPT_CONSTANT_PROPAGATION) && (!pctx.getContext().isCboSucceeded() || pctx.getContext().getOperation() == Context.Operation.MERGE)) { // We run constant propagation twice because after predicate pushdown, filter expressions // are combined and may become eligible for reduction (like is not null filter). @@ -121,26 +121,26 @@ public void initialize(HiveConf hiveConf) { transformations.add(new SortedDynPartitionTimeGranularityOptimizer()); - if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVEOPTPPD)) { + if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVE_OPT_PPD)) { transformations.add(new PartitionPruner()); transformations.add(new PartitionConditionRemover()); - if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVEOPTLISTBUCKETING)) { + if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVE_OPT_LIST_BUCKETING)) { /* Add list bucketing pruner. */ transformations.add(new ListBucketingPruner()); } - if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVEOPTCONSTANTPROPAGATION) && + if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVE_OPT_CONSTANT_PROPAGATION) && !pctx.getContext().isCboSucceeded()) { // PartitionPruner may create more folding opportunities, run ConstantPropagate again. transformations.add(new ConstantPropagate()); } } - if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVEOPTGROUPBY) || + if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVE_OPT_GROUPBY) || HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVE_MAP_GROUPBY_SORT)) { transformations.add(new GroupByOptimizer()); } transformations.add(new ColumnPruner()); - if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVECOUNTDISTINCTOPTIMIZER) + if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVE_COUNT_DISTINCT_OPTIMIZER) && (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVE_IN_TEST) || isTezExecEngine)) { transformations.add(new CountDistinctRewriteProc()); } @@ -156,7 +156,7 @@ public void initialize(HiveConf hiveConf) { MapJoinProcessor mapJoinProcessor = new MapJoinProcessor(); transformations.add(mapJoinProcessor); - if ((HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVEOPTBUCKETMAPJOIN)) + if ((HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVE_OPT_BUCKET_MAPJOIN)) && !isTezExecEngine) { transformations.add(new BucketMapJoinOptimizer()); bucketMapJoinOptimizer = true; @@ -164,7 +164,7 @@ public void initialize(HiveConf hiveConf) { // If optimize hive.optimize.bucketmapjoin.sortedmerge is set, add both // BucketMapJoinOptimizer and SortedMergeBucketMapJoinOptimizer - if ((HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVEOPTSORTMERGEBUCKETMAPJOIN)) + if ((HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVE_OPT_SORT_MERGE_BUCKET_MAPJOIN)) && !isTezExecEngine) { if (!bucketMapJoinOptimizer) { // No need to add BucketMapJoinOptimizer twice @@ -173,20 +173,20 @@ public void initialize(HiveConf hiveConf) { transformations.add(new SortedMergeBucketMapJoinOptimizer()); } - if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVEOPTIMIZEBUCKETINGSORTING)) { + if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVE_OPTIMIZE_BUCKETING_SORTING)) { transformations.add(new BucketingSortingReduceSinkOptimizer()); } transformations.add(new UnionProcessor()); - if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.NWAYJOINREORDER)) { + if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.N_WAY_JOIN_REORDER)) { transformations.add(new JoinReorder()); } if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.TEZ_OPTIMIZE_BUCKET_PRUNING) - && HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVEOPTPPD) - && HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVEOPTINDEXFILTER)) { + && HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVE_OPT_PPD) + && HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVE_OPT_INDEX_FILTER)) { final boolean compatMode = HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.TEZ_OPTIMIZE_BUCKET_PRUNING_COMPAT); transformations.add(new FixedBucketPruningOptimizer(compatMode)); @@ -194,31 +194,31 @@ public void initialize(HiveConf hiveConf) { transformations.add(new BucketVersionPopulator()); - if(HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVEOPTREDUCEDEDUPLICATION) && + if(HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVE_OPT_REDUCE_DEDUPLICATION) && !isTezExecEngine) { transformations.add(new ReduceSinkDeDuplication()); } transformations.add(new NonBlockingOpDeDupProc()); - if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVEIDENTITYPROJECTREMOVER) + if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVE_IDENTITY_PROJECT_REMOVER) && !HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVE_CBO_RETPATH_HIVEOP)) { transformations.add(new IdentityProjectRemover()); } - if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVELIMITOPTENABLE)) { + if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVE_LIMIT_OPT_ENABLE)) { transformations.add(new GlobalLimitOptimizer()); } - if(HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVEOPTCORRELATION) && - !HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVEGROUPBYSKEW) && + if(HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVE_OPT_CORRELATION) && + !HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVE_GROUPBY_SKEW) && !HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVE_OPTIMIZE_SKEWJOIN_COMPILETIME) && !isTezExecEngine) { transformations.add(new CorrelationOptimizer()); } - if (HiveConf.getFloatVar(hiveConf, HiveConf.ConfVars.HIVELIMITPUSHDOWNMEMORYUSAGE) > 0) { + if (HiveConf.getFloatVar(hiveConf, HiveConf.ConfVars.HIVE_LIMIT_PUSHDOWN_MEMORY_USAGE) > 0) { transformations.add(new LimitPushdownOptimizer()); } if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVE_OPTIMIZE_LIMIT)) { transformations.add(new OrderlessLimitPushDownOptimizer()); } - if(HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVEOPTIMIZEMETADATAQUERIES)) { + if(HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVE_OPTIMIZE_METADATA_QUERIES)) { transformations.add(new StatsOptimizer()); } if (pctx.getContext().isExplainSkipExecution() && !isTezExecEngine) { @@ -226,11 +226,11 @@ public void initialize(HiveConf hiveConf) { transformations.add(new AnnotateWithOpTraits()); } - if (!HiveConf.getVar(hiveConf, HiveConf.ConfVars.HIVEFETCHTASKCONVERSION).equals("none")) { + if (!HiveConf.getVar(hiveConf, HiveConf.ConfVars.HIVE_FETCH_TASK_CONVERSION).equals("none")) { transformations.add(new SimpleFetchOptimizer()); // must be called last } - if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVEFETCHTASKAGGR)) { + if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVE_FETCH_TASK_AGGR)) { transformations.add(new SimpleFetchAggregation()); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SemiJoinReductionMerge.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SemiJoinReductionMerge.java index 1201418bcccc..d3764dcc2365 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SemiJoinReductionMerge.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SemiJoinReductionMerge.java @@ -399,10 +399,10 @@ private static GroupByOperator createGroupBy(SelectOperator selectOp, Operator stack, ReduceSinkOperator sink = (ReduceSinkOperator) nd; ReduceSinkDesc desc = sink.getConf(); - long bytesPerReducer = context.conf.getLongVar(HiveConf.ConfVars.BYTESPERREDUCER); - int maxReducers = context.conf.getIntVar(HiveConf.ConfVars.MAXREDUCERS); - int constantReducers = context.conf.getIntVar(HiveConf.ConfVars.HADOOPNUMREDUCERS); + long bytesPerReducer = context.conf.getLongVar(HiveConf.ConfVars.BYTES_PER_REDUCER); + int maxReducers = context.conf.getIntVar(HiveConf.ConfVars.MAX_REDUCERS); + int constantReducers = context.conf.getIntVar(HiveConf.ConfVars.HADOOP_NUM_REDUCERS); if (context.visitedReduceSinks.contains(sink)) { // skip walking the children diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SimpleFetchOptimizer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SimpleFetchOptimizer.java index 83f6d9e7226a..fb1f6a1c7952 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SimpleFetchOptimizer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SimpleFetchOptimizer.java @@ -131,7 +131,7 @@ public ParseContext transform(ParseContext pctx) throws SemanticException { private FetchTask optimize(ParseContext pctx, String alias, TableScanOperator source) throws Exception { String mode = HiveConf.getVar( - pctx.getConf(), HiveConf.ConfVars.HIVEFETCHTASKCONVERSION); + pctx.getConf(), HiveConf.ConfVars.HIVE_FETCH_TASK_CONVERSION); boolean aggressive = "more".equals(mode); final int limit = pctx.getQueryProperties().getOuterQueryLimit(); @@ -144,7 +144,7 @@ private FetchTask optimize(ParseContext pctx, String alias, TableScanOperator so FetchWork fetchWork = fetch.convertToWork(); FetchTask fetchTask = (FetchTask) TaskFactory.get(fetchWork); fetchTask.setCachingEnabled(HiveConf.getBoolVar(pctx.getConf(), - HiveConf.ConfVars.HIVEFETCHTASKCACHING)); + HiveConf.ConfVars.HIVE_FETCH_TASK_CACHING)); fetchWork.setSink(fetch.completed(pctx, fetchWork)); fetchWork.setSource(source); fetchWork.setLimit(limit); @@ -154,7 +154,7 @@ private FetchTask optimize(ParseContext pctx, String alias, TableScanOperator so } private boolean checkThreshold(FetchData data, int limit, ParseContext pctx) throws Exception { - boolean cachingEnabled = HiveConf.getBoolVar(pctx.getConf(), HiveConf.ConfVars.HIVEFETCHTASKCACHING); + boolean cachingEnabled = HiveConf.getBoolVar(pctx.getConf(), HiveConf.ConfVars.HIVE_FETCH_TASK_CACHING); if (!cachingEnabled) { if (limit > 0) { if (data.hasOnlyPruningFilter()) { @@ -176,7 +176,7 @@ private boolean checkThreshold(FetchData data, int limit, ParseContext pctx) thr } // if caching is enabled we apply the treshold in all cases long threshold = HiveConf.getLongVar(pctx.getConf(), - HiveConf.ConfVars.HIVEFETCHTASKCONVERSIONTHRESHOLD); + HiveConf.ConfVars.HIVE_FETCH_TASK_CONVERSION_THRESHOLD); if (threshold < 0) { return true; } @@ -209,7 +209,7 @@ private FetchData checkTree(boolean aggressive, ParseContext pctx, String alias, } boolean bypassFilter = false; - if (HiveConf.getBoolVar(pctx.getConf(), HiveConf.ConfVars.HIVEOPTPPD)) { + if (HiveConf.getBoolVar(pctx.getConf(), HiveConf.ConfVars.HIVE_OPT_PPD)) { ExprNodeDesc pruner = pctx.getOpToPartPruner().get(ts); if (PartitionPruner.onlyContainsPartnCols(table, pruner)) { bypassFilter = !pctx.getPrunedPartitions(alias, ts).hasUnknownPartitions(); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedDynPartitionOptimizer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedDynPartitionOptimizer.java index b57ddd8e6c4e..cebb937c1209 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedDynPartitionOptimizer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedDynPartitionOptimizer.java @@ -836,7 +836,7 @@ private ArrayList getPositionsToExprNodes(List pos, private boolean shouldDo(List partitionPos, Operator fsParent) { int threshold = HiveConf.getIntVar(this.parseCtx.getConf(), - HiveConf.ConfVars.HIVEOPTSORTDYNAMICPARTITIONTHRESHOLD); + HiveConf.ConfVars.HIVE_OPT_SORT_DYNAMIC_PARTITION_THRESHOLD); long MAX_WRITERS = -1; switch (threshold) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedMergeBucketMapjoinProc.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedMergeBucketMapjoinProc.java index cbfb749fc628..e8cec2fceefc 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedMergeBucketMapjoinProc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedMergeBucketMapjoinProc.java @@ -55,7 +55,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, // and sort merge bucketed mapjoin cannot be performed if (!convert && pGraphContext.getConf().getBoolVar( - HiveConf.ConfVars.HIVEENFORCESORTMERGEBUCKETMAPJOIN)) { + HiveConf.ConfVars.HIVE_ENFORCE_SORT_MERGE_BUCKET_MAPJOIN)) { throw new SemanticException(ErrorMsg.SORTMERGE_MAPJOIN_FAILED.getMsg()); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveDefaultRelMetadataProvider.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveDefaultRelMetadataProvider.java index 5b6570a1bad2..7e7c1489cc1c 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveDefaultRelMetadataProvider.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveDefaultRelMetadataProvider.java @@ -82,7 +82,7 @@ private RelMetadataProvider init(HiveConf hiveConf, List>(); - trustScript = pctx.getConf().getBoolVar(HIVESCRIPTOPERATORTRUST); - minReducer = pctx.getConf().getIntVar(HIVEOPTREDUCEDEDUPLICATIONMINREDUCER); - isMapAggr = pctx.getConf().getBoolVar(HIVEMAPSIDEAGGREGATE); + trustScript = pctx.getConf().getBoolVar(HIVE_SCRIPT_OPERATOR_TRUST); + minReducer = pctx.getConf().getIntVar(HIVE_OPT_REDUCE_DEDUPLICATION_MIN_REDUCER); + isMapAggr = pctx.getConf().getBoolVar(HIVE_MAPSIDE_AGGREGATE); this.pctx = pctx; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/CorrelationOptimizer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/CorrelationOptimizer.java index 43870562a3d7..19a2295a0c10 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/CorrelationOptimizer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/CorrelationOptimizer.java @@ -178,7 +178,7 @@ private void findPossibleAutoConvertedJoinOperators() throws SemanticException { } long ThresholdOfSmallTblSizeSum = HiveConf.getLongVar(pCtx.getConf(), - HiveConf.ConfVars.HIVESMALLTABLESFILESIZE); + HiveConf.ConfVars.HIVE_SMALL_TABLES_FILESIZE); for (int i = 0; i < numAliases; i++) { // this table cannot be big table if (!bigTableCandidates.contains(i)) { @@ -212,7 +212,7 @@ public ParseContext transform(ParseContext pctx) throws SemanticException { pCtx = pctx; - if (HiveConf.getBoolVar(pCtx.getConf(),HiveConf.ConfVars.HIVECONVERTJOIN)) { + if (HiveConf.getBoolVar(pCtx.getConf(),HiveConf.ConfVars.HIVE_CONVERT_JOIN)) { findPossibleAutoConvertedJoinOperators(); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/ReduceSinkDeDuplication.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/ReduceSinkDeDuplication.java index dc5c97d5a32f..51af5b7cd161 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/ReduceSinkDeDuplication.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/ReduceSinkDeDuplication.java @@ -18,8 +18,8 @@ package org.apache.hadoop.hive.ql.optimizer.correlation; -import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.HIVECONVERTJOIN; -import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.HIVECONVERTJOINNOCONDITIONALTASK; +import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.HIVE_CONVERT_JOIN; +import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.HIVE_CONVERT_JOIN_NOCONDITIONALTASK; import java.util.ArrayList; import java.util.LinkedHashMap; @@ -73,10 +73,10 @@ public ParseContext transform(ParseContext pctx) throws SemanticException { ReduceSinkDeduplicateProcCtx cppCtx = new ReduceSinkDeduplicateProcCtx(pGraphContext); // for auto convert map-joins, it not safe to dedup in here (todo) - boolean mergeJoins = !pctx.getConf().getBoolVar(HIVECONVERTJOIN) && - !pctx.getConf().getBoolVar(HIVECONVERTJOINNOCONDITIONALTASK) && + boolean mergeJoins = !pctx.getConf().getBoolVar(HIVE_CONVERT_JOIN) && + !pctx.getConf().getBoolVar(HIVE_CONVERT_JOIN_NOCONDITIONALTASK) && !pctx.getConf().getBoolVar(ConfVars.HIVE_CONVERT_JOIN_BUCKET_MAPJOIN_TEZ) && - !pctx.getConf().getBoolVar(ConfVars.HIVEDYNAMICPARTITIONHASHJOIN); + !pctx.getConf().getBoolVar(ConfVars.HIVE_DYNAMIC_PARTITION_HASHJOIN); // If multiple rules can be matched with same cost, last rule will be choosen as a processor // see DefaultRuleDispatcher#dispatch() @@ -329,7 +329,7 @@ public Object process(ReduceSinkOperator cRS, GroupByOperator cGBY, start, ReduceSinkOperator.class, dedupCtx.trustScript()); if (pRS != null && ReduceSinkDeDuplicationUtils .merge(dedupCtx.getPctx().getConf(), cRS, pRS, dedupCtx.minReducer())) { - if (dedupCtx.getPctx().getConf().getBoolVar(HiveConf.ConfVars.HIVEGROUPBYSKEW)) { + if (dedupCtx.getPctx().getConf().getBoolVar(HiveConf.ConfVars.HIVE_GROUPBY_SKEW)) { return false; } CorrelationUtilities.removeReduceSinkForGroupBy(cRS, cGBY, dedupCtx.getPctx(), dedupCtx); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/ReduceSinkDeDuplicationUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/ReduceSinkDeDuplicationUtils.java index 9327c643d768..27e56dfead17 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/ReduceSinkDeDuplicationUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/ReduceSinkDeDuplicationUtils.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.hive.ql.optimizer.correlation; -import java.lang.reflect.Field; import java.util.ArrayList; import java.util.List; import java.util.Map.Entry; @@ -33,14 +32,12 @@ import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.optimizer.correlation.ReduceSinkDeDuplication.ReduceSinkDeduplicateProcCtx; import org.apache.hadoop.hive.ql.parse.SemanticException; -import org.apache.hadoop.hive.ql.plan.ColStatistics; import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc; import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; import org.apache.hadoop.hive.ql.plan.ExprNodeDescUtils; import org.apache.hadoop.hive.ql.plan.OperatorDesc; import org.apache.hadoop.hive.ql.plan.PlanUtils; import org.apache.hadoop.hive.ql.plan.ReduceSinkDesc; -import org.apache.hadoop.hive.ql.plan.Statistics; import org.apache.hadoop.hive.ql.plan.TableDesc; import com.google.common.collect.ImmutableList; @@ -149,7 +146,7 @@ public static boolean merge(HiveConf hiveConf, ReduceSinkOperator cRS, ReduceSin // child RS but Sorting order of the child RS is more specific than // that of the parent RS. throw new SemanticException("Sorting columns and order don't match. " + - "Try set " + HiveConf.ConfVars.HIVEOPTREDUCEDEDUPLICATION + "=false;"); + "Try set " + HiveConf.ConfVars.HIVE_OPT_REDUCE_DEDUPLICATION + "=false;"); } pRS.getConf().setOrder(cRS.getConf().getOrder()); pRS.getConf().setNullOrder(cRS.getConf().getNullOrder()); @@ -210,7 +207,7 @@ private static long estimateReducers(HiveConf conf, ReduceSinkOperator rs) { if (rs.getConf().getNumReducers() > 0) { return rs.getConf().getNumReducers(); } - int constantReducers = conf.getIntVar(HiveConf.ConfVars.HADOOPNUMREDUCERS); + int constantReducers = conf.getIntVar(HiveConf.ConfVars.HADOOP_NUM_REDUCERS); if (constantReducers > 0) { return constantReducers; } @@ -221,8 +218,8 @@ private static long estimateReducers(HiveConf conf, ReduceSinkOperator rs) { inputTotalBytes = StatsUtils.safeAdd(inputTotalBytes, sibling.getStatistics().getDataSize()); } } - int maxReducers = conf.getIntVar(HiveConf.ConfVars.MAXREDUCERS); - long bytesPerReducer = conf.getLongVar(HiveConf.ConfVars.BYTESPERREDUCER); + int maxReducers = conf.getIntVar(HiveConf.ConfVars.MAX_REDUCERS); + long bytesPerReducer = conf.getLongVar(HiveConf.ConfVars.BYTES_PER_REDUCER); return Utilities.estimateReducers(inputTotalBytes, bytesPerReducer, maxReducers, false); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/CommonJoinTaskDispatcher.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/CommonJoinTaskDispatcher.java index 28e8d12dcead..86dcb10b557c 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/CommonJoinTaskDispatcher.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/CommonJoinTaskDispatcher.java @@ -143,7 +143,7 @@ private long calculateLocalTableTotalSize(MapredLocalWork localWork) { * Check if the total size of local tables will be under * the limit after we merge localWork1 and localWork2. * The limit of the total size of local tables is defined by - * HiveConf.ConfVars.HIVECONVERTJOINNOCONDITIONALTASKTHRESHOLD. + * HiveConf.ConfVars.HIVE_CONVERT_JOIN_NOCONDITIONAL_TASK_THRESHOLD. * @param conf * @param localWorks * @return @@ -152,7 +152,7 @@ private boolean isLocalTableTotalSizeUnderLimitAfterMerge( Configuration conf, MapredLocalWork... localWorks) { final long localTableTotalSizeLimit = HiveConf.getLongVar(conf, - HiveConf.ConfVars.HIVECONVERTJOINNOCONDITIONALTASKTHRESHOLD); + HiveConf.ConfVars.HIVE_CONVERT_JOIN_NOCONDITIONAL_TASK_THRESHOLD); long localTableTotalSize = 0; for (int i = 0; i < localWorks.length; i++) { final long localWorkTableTotalSize = calculateLocalTableTotalSize(localWorks[i]); @@ -166,7 +166,7 @@ private boolean isLocalTableTotalSizeUnderLimitAfterMerge( if (localTableTotalSize > localTableTotalSizeLimit) { // The total size of local tables after we merge localWorks // is larger than the limit set by - // HiveConf.ConfVars.HIVECONVERTJOINNOCONDITIONALTASKTHRESHOLD. + // HiveConf.ConfVars.HIVE_CONVERT_JOIN_NOCONDITIONAL_TASK_THRESHOLD. return false; } @@ -431,12 +431,12 @@ public Task processCurrentTask(MapRedTask currTask, // If sizes of at least n-1 tables in a n-way join is known, and their sum is smaller than // the threshold size, convert the join into map-join and don't create a conditional task boolean convertJoinMapJoin = HiveConf.getBoolVar(conf, - HiveConf.ConfVars.HIVECONVERTJOINNOCONDITIONALTASK); + HiveConf.ConfVars.HIVE_CONVERT_JOIN_NOCONDITIONALTASK); int bigTablePosition = -1; if (convertJoinMapJoin) { // This is the threshold that the user has specified to fit in mapjoin long mapJoinSize = HiveConf.getLongVar(conf, - HiveConf.ConfVars.HIVECONVERTJOINNOCONDITIONALTASKTHRESHOLD); + HiveConf.ConfVars.HIVE_CONVERT_JOIN_NOCONDITIONAL_TASK_THRESHOLD); Long bigTableSize = null; Set aliases = aliasToWork.keySet(); @@ -480,7 +480,7 @@ public Task processCurrentTask(MapRedTask currTask, } long ThresholdOfSmallTblSizeSum = HiveConf.getLongVar(conf, - HiveConf.ConfVars.HIVESMALLTABLESFILESIZE); + HiveConf.ConfVars.HIVE_SMALL_TABLES_FILESIZE); for (int pos = 0; pos < joinOp.getNumParent(); pos++) { // this table cannot be big table if (!bigTableCandidates.contains(pos)) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/GenMRSkewJoinProcessor.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/GenMRSkewJoinProcessor.java index 9c9dac07a671..d846428f78ae 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/GenMRSkewJoinProcessor.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/GenMRSkewJoinProcessor.java @@ -147,7 +147,7 @@ public static void processSkewJoin(JoinOperator joinOp, joinDescriptor.setBigKeysDirMap(bigKeysDirMap); joinDescriptor.setSmallKeysDirMap(smallKeysDirMap); joinDescriptor.setSkewKeyDefinition(HiveConf.getIntVar(parseCtx.getConf(), - HiveConf.ConfVars.HIVESKEWJOINKEY)); + HiveConf.ConfVars.HIVE_SKEWJOIN_KEY)); HashMap> bigKeysDirToTaskMap = new HashMap>(); @@ -323,9 +323,9 @@ public static void processSkewJoin(JoinOperator joinOp, GenMRSkewJoinProcessor.class); newPlan.setNumMapTasks(HiveConf - .getIntVar(jc, HiveConf.ConfVars.HIVESKEWJOINMAPJOINNUMMAPTASK)); + .getIntVar(jc, HiveConf.ConfVars.HIVE_SKEWJOIN_MAPJOIN_NUM_MAP_TASK)); newPlan - .setMinSplitSize(HiveConf.getLongVar(jc, HiveConf.ConfVars.HIVESKEWJOINMAPJOINMINSPLIT)); + .setMinSplitSize(HiveConf.getLongVar(jc, HiveConf.ConfVars.HIVE_SKEWJOIN_MAPJOIN_MIN_SPLIT)); newPlan.setInputformat(HiveInputFormat.class.getName()); MapredWork w = new MapredWork(); @@ -366,7 +366,7 @@ public static void processSkewJoin(JoinOperator joinOp, public static boolean skewJoinEnabled(HiveConf conf, JoinOperator joinOp) { - if (conf != null && !conf.getBoolVar(HiveConf.ConfVars.HIVESKEWJOIN)) { + if (conf != null && !conf.getBoolVar(HiveConf.ConfVars.HIVE_SKEW_JOIN)) { return false; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/LlapDecider.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/LlapDecider.java index 666da19e5813..2fef7f570b15 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/LlapDecider.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/LlapDecider.java @@ -173,7 +173,7 @@ private void adjustAutoParallelism(BaseWork work) { clusterState.initClusterInfo(); final int targetCount; final int executorCount; - final int maxReducers = conf.getIntVar(HiveConf.ConfVars.MAXREDUCERS); + final int maxReducers = conf.getIntVar(HiveConf.ConfVars.MAX_REDUCERS); if (!clusterState.hasClusterInfo()) { LOG.warn("Cannot determine LLAP cluster information"); executorCount = executorsPerNode; // assume 1 node @@ -190,7 +190,7 @@ private void adjustAutoParallelism(BaseWork work) { if (newMin < reduceWork.getMaxReduceTasks()) { reduceWork.setMinReduceTasks(newMin); reduceWork.getEdgePropRef().setAutoReduce(conf, true, newMin, - reduceWork.getMaxReduceTasks(), conf.getLongVar(HiveConf.ConfVars.BYTESPERREDUCER), + reduceWork.getMaxReduceTasks(), conf.getLongVar(HiveConf.ConfVars.BYTES_PER_REDUCER), reduceWork.getMinSrcFraction(), reduceWork.getMaxSrcFraction()); } else { reduceWork.setAutoReduceParallelism(false); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/LocalMapJoinProcFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/LocalMapJoinProcFactory.java index ca840d7f4eb4..9d8985fe62ec 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/LocalMapJoinProcFactory.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/LocalMapJoinProcFactory.java @@ -96,7 +96,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx ctx, Object.. context.setFollowedByGroupBy(true); GroupByOperator groupByOp = (GroupByOperator) nd; float groupByMemoryUsage = context.getParseCtx().getConf().getFloatVar( - HiveConf.ConfVars.HIVEMAPJOINFOLLOWEDBYMAPAGGRHASHMEMORY); + HiveConf.ConfVars.HIVE_MAPJOIN_FOLLOWEDBY_MAP_AGGR_HASH_MEMORY); groupByOp.getConf().setGroupByMemoryUsage(groupByMemoryUsage); return null; } @@ -130,10 +130,10 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx ctx, Object.. float hashtableMemoryUsage; if (context.isFollowedByGroupBy()) { hashtableMemoryUsage = conf.getFloatVar( - HiveConf.ConfVars.HIVEHASHTABLEFOLLOWBYGBYMAXMEMORYUSAGE); + HiveConf.ConfVars.HIVE_HASHTABLE_FOLLOWBY_GBY_MAX_MEMORY_USAGE); } else { hashtableMemoryUsage = conf.getFloatVar( - HiveConf.ConfVars.HIVEHASHTABLEMAXMEMORYUSAGE); + HiveConf.ConfVars.HIVE_HASHTABLE_MAX_MEMORY_USAGE); } mapJoinDesc.setHashTableMemoryUsage(hashtableMemoryUsage); LOG.info("Setting max memory usage to " + hashtableMemoryUsage + " for table sink " @@ -148,7 +148,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx ctx, Object.. // todo: support tez/vectorization boolean useNontaged = conf.getBoolVar( - HiveConf.ConfVars.HIVECONVERTJOINUSENONSTAGED) && + HiveConf.ConfVars.HIVE_CONVERT_JOIN_USE_NONSTAGED) && conf.getVar(HiveConf.ConfVars.HIVE_EXECUTION_ENGINE).equals("mr") && !conf.getBoolVar(HiveConf.ConfVars.HIVE_VECTORIZATION_ENABLED); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/MemoryDecider.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/MemoryDecider.java index 8e51417f12c6..14f322e55a95 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/MemoryDecider.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/MemoryDecider.java @@ -73,9 +73,9 @@ public class MemoryCalculator implements SemanticDispatcher { public MemoryCalculator(PhysicalContext pctx) { this.pctx = pctx; - this.totalAvailableMemory = HiveConf.getLongVar(pctx.conf, HiveConf.ConfVars.HIVECONVERTJOINNOCONDITIONALTASKTHRESHOLD); - this.minimumHashTableSize = HiveConf.getIntVar(pctx.conf, HiveConf.ConfVars.HIVEHYBRIDGRACEHASHJOINMINNUMPARTITIONS) - * HiveConf.getIntVar(pctx.conf, HiveConf.ConfVars.HIVEHYBRIDGRACEHASHJOINMINWBSIZE); + this.totalAvailableMemory = HiveConf.getLongVar(pctx.conf, HiveConf.ConfVars.HIVE_CONVERT_JOIN_NOCONDITIONAL_TASK_THRESHOLD); + this.minimumHashTableSize = HiveConf.getIntVar(pctx.conf, HiveConf.ConfVars.HIVE_HYBRIDGRACE_HASHJOIN_MIN_NUM_PARTITIONS) + * HiveConf.getIntVar(pctx.conf, HiveConf.ConfVars.HIVE_HYBRIDGRACE_HASHJOIN_MIN_WB_SIZE); this.inflationFactor = HiveConf.getFloatVar(pctx.conf, HiveConf.ConfVars.HIVE_HASH_TABLE_INFLATION_FACTOR); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/PhysicalOptimizer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/PhysicalOptimizer.java index 8903eb738107..0ed3b3589381 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/PhysicalOptimizer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/PhysicalOptimizer.java @@ -22,7 +22,6 @@ import java.util.List; import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.parse.SemanticException; /** @@ -46,7 +45,7 @@ public PhysicalOptimizer(PhysicalContext pctx, HiveConf hiveConf) { */ private void initialize(HiveConf hiveConf) { resolvers = new ArrayList(); - if (hiveConf.getBoolVar(HiveConf.ConfVars.HIVECONVERTJOIN)) { + if (hiveConf.getBoolVar(HiveConf.ConfVars.HIVE_CONVERT_JOIN)) { resolvers.add(new CommonJoinResolver()); // The joins have been automatically converted to map-joins. @@ -56,18 +55,18 @@ private void initialize(HiveConf hiveConf) { resolvers.add(new SortMergeJoinResolver()); } } - if (hiveConf.getBoolVar(HiveConf.ConfVars.HIVESKEWJOIN)) { + if (hiveConf.getBoolVar(HiveConf.ConfVars.HIVE_SKEW_JOIN)) { resolvers.add(new SkewJoinResolver()); } resolvers.add(new MapJoinResolver()); - if (hiveConf.getBoolVar(HiveConf.ConfVars.HIVEMETADATAONLYQUERIES)) { + if (hiveConf.getBoolVar(HiveConf.ConfVars.HIVE_METADATA_ONLY_QUERIES)) { resolvers.add(new MetadataOnlyOptimizer()); } - if (hiveConf.getBoolVar(HiveConf.ConfVars.HIVENULLSCANOPTIMIZE)) { + if (hiveConf.getBoolVar(HiveConf.ConfVars.HIVE_NULL_SCAN_OPTIMIZE)) { resolvers.add(new NullScanOptimizer()); } - if (hiveConf.getBoolVar(HiveConf.ConfVars.HIVESAMPLINGFORORDERBY)) { + if (hiveConf.getBoolVar(HiveConf.ConfVars.HIVE_SAMPLING_FOR_ORDERBY)) { resolvers.add(new SamplingOptimizer()); } @@ -91,7 +90,7 @@ private void initialize(HiveConf hiveConf) { "enable")) { resolvers.add(new Vectorizer()); } - if (!"none".equalsIgnoreCase(hiveConf.getVar(HiveConf.ConfVars.HIVESTAGEIDREARRANGE))) { + if (!"none".equalsIgnoreCase(hiveConf.getVar(HiveConf.ConfVars.HIVE_STAGE_ID_REARRANGE))) { resolvers.add(new StageIDsRearranger()); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SortMergeJoinTaskDispatcher.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SortMergeJoinTaskDispatcher.java index 54373effe23f..d67c5d720388 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SortMergeJoinTaskDispatcher.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SortMergeJoinTaskDispatcher.java @@ -274,7 +274,7 @@ public Task processCurrentTask(MapRedTask currTask, pathToAliases, aliasToSize); long ThresholdOfSmallTblSizeSum = HiveConf.getLongVar(conf, - HiveConf.ConfVars.HIVESMALLTABLESFILESIZE); + HiveConf.ConfVars.HIVE_SMALL_TABLES_FILESIZE); for (int bigTablePosition = 0; bigTablePosition < numAliases; bigTablePosition++) { // this table cannot be big table diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/StageIDsRearranger.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/StageIDsRearranger.java index 02bbf6a99511..51a79650e602 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/StageIDsRearranger.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/StageIDsRearranger.java @@ -52,7 +52,7 @@ public PhysicalContext resolve(PhysicalContext pctx) throws SemanticException { } private static List getExplainOrder(PhysicalContext pctx) { - List tasks = getExplainOrder(pctx.getRootTasks(), pctx.getConf().getVar(HiveConf.ConfVars.HIVESTAGEIDREARRANGE)); + List tasks = getExplainOrder(pctx.getRootTasks(), pctx.getConf().getVar(HiveConf.ConfVars.HIVE_STAGE_ID_REARRANGE)); if (pctx.getFetchTask() != null) { tasks.add(pctx.getFetchTask()); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java index ce2e31d58800..a33e5627b4bd 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java @@ -3912,7 +3912,7 @@ private boolean canSpecializeMapJoin(Operator op, MapJoi vectorMapJoinInfo.setBigTableFilterExpressions(bigTableFilterExpressions); boolean useOptimizedTable = - HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVEMAPJOINUSEOPTIMIZEDTABLE); + HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVE_MAPJOIN_USE_OPTIMIZED_TABLE); // Remember the condition variables for EXPLAIN regardless of whether we specialize or not. vectorDesc.setVectorMapJoinInfo(vectorMapJoinInfo); @@ -5330,7 +5330,7 @@ public Operator validateAndVectorizeOperator(Operator partitio List partNames = Hive.get().getPartitionNames( tab.getDbName(), tab.getTableName(), (short) -1); - String defaultPartitionName = conf.getVar(HiveConf.ConfVars.DEFAULTPARTITIONNAME); + String defaultPartitionName = conf.getVar(HiveConf.ConfVars.DEFAULT_PARTITION_NAME); List partCols = extractPartColNames(tab); List partColTypeInfos = extractPartColTypes(tab); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java index d7744587e689..416443479fa3 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java @@ -1528,7 +1528,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, AnnotateStatsProcCtx aspCtx = (AnnotateStatsProcCtx) procCtx; HiveConf conf = aspCtx.getConf(); - long maxSplitSize = HiveConf.getLongVar(conf, HiveConf.ConfVars.MAPREDMAXSPLITSIZE); + long maxSplitSize = HiveConf.getLongVar(conf, HiveConf.ConfVars.MAPRED_MAX_SPLIT_SIZE); List aggDesc = gop.getConf().getAggregators(); Map colExprMap = gop.getColumnExprMap(); RowSchema rs = gop.getSchema(); @@ -1577,7 +1577,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, // be updated to bytes per reducer (1GB default) if (top == null) { inputSize = parentStats.getDataSize(); - maxSplitSize = HiveConf.getLongVar(conf, HiveConf.ConfVars.BYTESPERREDUCER); + maxSplitSize = HiveConf.getLongVar(conf, HiveConf.ConfVars.BYTES_PER_REDUCER); } else { inputSize = top.getConf().getStatistics().getDataSize(); } @@ -1875,7 +1875,7 @@ private long getParentNumRows(GroupByOperator op, List gbyKeys, Hi /** * This method does not take into account many configs used at runtime to - * disable hash aggregation like HIVEMAPAGGRHASHMINREDUCTION. This method + * disable hash aggregation like HIVE_MAP_AGGR_HASH_MIN_REDUCTION. This method * roughly estimates the number of rows and size of each row to see if it * can fit in hashtable for aggregation. * @param gop - group by operator @@ -1891,8 +1891,8 @@ private boolean checkMapSideAggregation(GroupByOperator gop, GroupByDesc.Mode mode = desc.getMode(); if (mode.equals(GroupByDesc.Mode.HASH)) { - float hashAggMem = conf.getFloatVar(HiveConf.ConfVars.HIVEMAPAGGRHASHMEMORY); - float hashAggMaxThreshold = conf.getFloatVar(HiveConf.ConfVars.HIVEMAPAGGRMEMORYTHRESHOLD); + float hashAggMem = conf.getFloatVar(HiveConf.ConfVars.HIVE_MAP_AGGR_HASH_MEMORY); + float hashAggMaxThreshold = conf.getFloatVar(HiveConf.ConfVars.HIVE_MAP_AGGR_MEMORY_THRESHOLD); // get available map memory in bytes long totalMemory = DagUtils.getContainerResource(conf).getMemorySize() * 1024L * 1024L; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java index bc36832b6d1a..773cafd01c6c 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java @@ -1197,9 +1197,9 @@ public TableSpec(Hive db, HiveConf conf, ASTNode ast, boolean allowDynamicPartit try { // get table metadata tableName = HiveTableName.withNoDefault(getUnescapedName((ASTNode)ast.getChild(0))); - boolean testMode = conf.getBoolVar(HiveConf.ConfVars.HIVETESTMODE); + boolean testMode = conf.getBoolVar(HiveConf.ConfVars.HIVE_TEST_MODE); if (testMode) { - tableName = TableName.fromString(String.join("", conf.getVar(HiveConf.ConfVars.HIVETESTMODEPREFIX), + tableName = TableName.fromString(String.join("", conf.getVar(HiveConf.ConfVars.HIVE_TEST_MODE_PREFIX), tableName.getTable()), tableName.getCat(), tableName.getDb()); // not that elegant, but hard to refactor } if (ast.getToken().getType() != HiveParser.TOK_CREATETABLE && @@ -1274,7 +1274,7 @@ public TableSpec(Hive db, HiveConf conf, ASTNode ast, boolean allowDynamicPartit numStaPart = parts.size() - numDynParts; } if (numStaPart == 0 && - conf.getVar(HiveConf.ConfVars.DYNAMICPARTITIONINGMODE).equalsIgnoreCase("strict")) { + conf.getVar(HiveConf.ConfVars.DYNAMIC_PARTITIONING_MODE).equalsIgnoreCase("strict")) { throw new SemanticException(ErrorMsg.DYNAMIC_PARTITION_STRICT_MODE.getMsg()); } @@ -1613,7 +1613,7 @@ private static boolean getPartExprNodeDesc(ASTNode astNode, HiveConf conf, } TypeCheckCtx typeCheckCtx = new TypeCheckCtx(null); - String defaultPartitionName = HiveConf.getVar(conf, HiveConf.ConfVars.DEFAULTPARTITIONNAME); + String defaultPartitionName = HiveConf.getVar(conf, HiveConf.ConfVars.DEFAULT_PARTITION_NAME); boolean result = true; for (Node childNode : astNode.getChildren()) { ASTNode childASTNode = (ASTNode)childNode; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java index b105de8174ee..c23e94e6e165 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java @@ -506,9 +506,9 @@ public static RelOptPlanner createPlanner(HiveConf conf) { private static RelOptPlanner createPlanner( HiveConf conf, StatsSource statsSource, boolean isExplainPlan) { final Double maxSplitSize = (double) HiveConf.getLongVar( - conf, HiveConf.ConfVars.MAPREDMAXSPLITSIZE); + conf, HiveConf.ConfVars.MAPRED_MAX_SPLIT_SIZE); final Double maxMemory = (double) HiveConf.getLongVar( - conf, HiveConf.ConfVars.HIVECONVERTJOINNOCONDITIONALTASKTHRESHOLD); + conf, HiveConf.ConfVars.HIVE_CONVERT_JOIN_NOCONDITIONAL_TASK_THRESHOLD); HiveAlgorithmsConf algorithmsConf = new HiveAlgorithmsConf(maxSplitSize, maxMemory); HiveRulesRegistry registry = new HiveRulesRegistry(); Properties calciteConfigProperties = new Properties(); @@ -1745,7 +1745,7 @@ protected RelNode applyPreJoinOrderingTransforms(RelNode basePlan, RelMetadataPr PerfLogger perfLogger = SessionState.getPerfLogger(); final int maxCNFNodeCount = conf.getIntVar(HiveConf.ConfVars.HIVE_CBO_CNF_NODES_LIMIT); - final int minNumORClauses = conf.getIntVar(HiveConf.ConfVars.HIVEPOINTLOOKUPOPTIMIZERMIN); + final int minNumORClauses = conf.getIntVar(HiveConf.ConfVars.HIVE_POINT_LOOKUP_OPTIMIZER_MIN); final boolean allowDisjunctivePredicates = conf.getBoolVar(ConfVars.HIVE_JOIN_DISJ_TRANSITIVE_PREDICATES_PUSHDOWN); final HepProgramBuilder program = new HepProgramBuilder(); @@ -1790,7 +1790,7 @@ protected RelNode applyPreJoinOrderingTransforms(RelNode basePlan, RelMetadataPr } // Run this optimization early, since it is expanding the operator pipeline. if (!conf.getVar(HiveConf.ConfVars.HIVE_EXECUTION_ENGINE).equals("mr") && - conf.getBoolVar(HiveConf.ConfVars.HIVEOPTIMIZEDISTINCTREWRITE)) { + conf.getBoolVar(HiveConf.ConfVars.HIVE_OPTIMIZE_DISTINCT_REWRITE)) { // Its not clear, if this rewrite is always performant on MR, since extra map phase // introduced for 2nd MR job may offset gains of this multi-stage aggregation. // We need a cost model for MR to enable this on MR. @@ -1809,7 +1809,7 @@ protected RelNode applyPreJoinOrderingTransforms(RelNode basePlan, RelMetadataPr // 3. Run exhaustive PPD, add not null filters, transitive inference, // constant propagation, constant folding List rules = Lists.newArrayList(); - if (conf.getBoolVar(HiveConf.ConfVars.HIVEOPTPPD_WINDOWING)) { + if (conf.getBoolVar(HiveConf.ConfVars.HIVE_OPT_PPD_WINDOWING)) { rules.add(HiveFilterProjectTransposeRule.DETERMINISTIC_WINDOWING); } else { rules.add(HiveFilterProjectTransposeRule.DETERMINISTIC); @@ -1836,7 +1836,7 @@ protected RelNode applyPreJoinOrderingTransforms(RelNode basePlan, RelMetadataPr rules.add(HiveReduceExpressionsRule.SEMIJOIN_INSTANCE); rules.add(HiveAggregateReduceFunctionsRule.INSTANCE); rules.add(HiveAggregateReduceRule.INSTANCE); - if (conf.getBoolVar(HiveConf.ConfVars.HIVEPOINTLOOKUPOPTIMIZER)) { + if (conf.getBoolVar(HiveConf.ConfVars.HIVE_POINT_LOOKUP_OPTIMIZER)) { rules.add(new HivePointLookupOptimizerRule.FilterCondition(minNumORClauses)); rules.add(new HivePointLookupOptimizerRule.JoinCondition(minNumORClauses)); rules.add(new HivePointLookupOptimizerRule.ProjectionExpressions(minNumORClauses)); @@ -2156,7 +2156,7 @@ private RelNode applyJoinOrderingTransform(RelNode basePlan, RelMetadataProvider rules.add(HiveJoinProjectTransposeRule.RIGHT_PROJECT_BTW_JOIN); rules.add(HiveProjectMergeRule.INSTANCE); if (profilesCBO.contains(ExtendedCBOProfile.REFERENTIAL_CONSTRAINTS)) { - rules.add(conf.getBoolVar(HiveConf.ConfVars.HIVEOPTPPD_WINDOWING) ? + rules.add(conf.getBoolVar(HiveConf.ConfVars.HIVE_OPT_PPD_WINDOWING) ? HiveFilterProjectTransposeRule.DETERMINISTIC_WINDOWING_ON_NON_FILTERING_JOIN : HiveFilterProjectTransposeRule.DETERMINISTIC_ON_NON_FILTERING_JOIN); rules.add(HiveFilterJoinRule.FILTER_ON_NON_FILTERING_JOIN); @@ -2899,7 +2899,7 @@ private RelNode genTableLogicalPlan(String tableAlias, QB qb) throws SemanticExc // 2. if returnpath is on and hivetestmode is on bail if (qb.getParseInfo().getTabSample(tableAlias) != null || getNameToSplitSampleMap().containsKey(tableAlias) - || (conf.getBoolVar(HiveConf.ConfVars.HIVE_CBO_RETPATH_HIVEOP)) && (conf.getBoolVar(HiveConf.ConfVars.HIVETESTMODE)) ) { + || (conf.getBoolVar(HiveConf.ConfVars.HIVE_CBO_RETPATH_HIVEOP)) && (conf.getBoolVar(HiveConf.ConfVars.HIVE_TEST_MODE)) ) { String msg = String.format("Table Sample specified for %s." + " Currently we don't support Table Sample clauses in CBO," + " turn off cbo for queries on tableSamples.", tableAlias); @@ -3663,16 +3663,16 @@ private RelNode genGBLogicalPlan(QB qb, RelNode srcRel) throws SemanticException || !qbp.getDestGroupingSets().isEmpty() || !qbp.getDestCubes().isEmpty()); // 2. Sanity check - if (conf.getBoolVar(HiveConf.ConfVars.HIVEGROUPBYSKEW) + if (conf.getBoolVar(HiveConf.ConfVars.HIVE_GROUPBY_SKEW) && qbp.getDistinctFuncExprsForClause(destClauseName).size() > 1) { throw new SemanticException(ErrorMsg.UNSUPPORTED_MULTIPLE_DISTINCTS.getMsg()); } if (cubeRollupGrpSetPresent) { - if (!HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVEMAPSIDEAGGREGATE)) { + if (!HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_MAPSIDE_AGGREGATE)) { throw new SemanticException(ErrorMsg.HIVE_GROUPING_SETS_AGGR_NOMAPAGGR.getMsg()); } - if (conf.getBoolVar(HiveConf.ConfVars.HIVEGROUPBYSKEW)) { + if (conf.getBoolVar(HiveConf.ConfVars.HIVE_GROUPBY_SKEW)) { if (qbp.getDestGroupingSets().size() > conf .getIntVar(HiveConf.ConfVars.HIVE_NEW_JOB_GROUPING_SET_CARDINALITY)) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ExecuteStatementAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ExecuteStatementAnalyzer.java index 029ea483a7fe..535d8d855ed3 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ExecuteStatementAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ExecuteStatementAnalyzer.java @@ -205,9 +205,9 @@ public void analyzeInternal(ASTNode root) throws SemanticException { this.prepareQuery = false; // reset config - String queryId = this.conf.getVar(HiveConf.ConfVars.HIVEQUERYID); + String queryId = this.conf.getVar(HiveConf.ConfVars.HIVE_QUERY_ID); this.conf.syncFromConf(cachedPlan.getQueryState().getConf()); - this.conf.setVar(HiveConf.ConfVars.HIVEQUERYID, queryId); + this.conf.setVar(HiveConf.ConfVars.HIVE_QUERY_ID, queryId); // set rest of the params this.inputs = cachedPlan.getInputs(); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/EximUtil.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/EximUtil.java index 4add301fb0ae..91cda5591981 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/EximUtil.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/EximUtil.java @@ -259,8 +259,8 @@ private EximUtil() { */ public static URI getValidatedURI(HiveConf conf, String dcPath) throws SemanticException { try { - boolean testMode = conf.getBoolVar(HiveConf.ConfVars.HIVETESTMODE) - || conf.getBoolVar(HiveConf.ConfVars.HIVEEXIMTESTMODE); + boolean testMode = conf.getBoolVar(HiveConf.ConfVars.HIVE_TEST_MODE) + || conf.getBoolVar(HiveConf.ConfVars.HIVE_EXIM_TEST_MODE); URI uri = new Path(dcPath).toUri(); FileSystem fs = FileSystem.get(uri, conf); // Get scheme from FileSystem @@ -316,8 +316,8 @@ static void validateTable(org.apache.hadoop.hive.ql.metadata.Table table) throws public static String relativeToAbsolutePath(HiveConf conf, String location) throws SemanticException { try { - boolean testMode = conf.getBoolVar(HiveConf.ConfVars.HIVETESTMODE) - || conf.getBoolVar(HiveConf.ConfVars.HIVEEXIMTESTMODE);; + boolean testMode = conf.getBoolVar(HiveConf.ConfVars.HIVE_TEST_MODE) + || conf.getBoolVar(HiveConf.ConfVars.HIVE_EXIM_TEST_MODE);; if (testMode) { URI uri = new Path(location).toUri(); FileSystem fs = FileSystem.get(uri, conf); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ExplainSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ExplainSemanticAnalyzer.java index baa31c224ea8..ddd42d1590af 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ExplainSemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ExplainSemanticAnalyzer.java @@ -221,7 +221,7 @@ public void analyzeInternal(ASTNode ast) throws SemanticException { ctx.getCalcitePlan()); work.setAppendTaskType( - HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVEEXPLAINDEPENDENCYAPPENDTASKTYPES)); + HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_EXPLAIN_DEPENDENCY_APPEND_TASK_TYPES)); ExplainTask explTask = (ExplainTask) TaskFactory.get(work); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezProcContext.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezProcContext.java index 6042c0982149..6688a4256f7c 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezProcContext.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezProcContext.java @@ -173,7 +173,7 @@ public GenTezProcContext(HiveConf conf, ParseContext parseContext, this.inputs = inputs; this.outputs = outputs; this.currentTask = (TezTask) TaskFactory.get( - new TezWork(conf.getVar(HiveConf.ConfVars.HIVEQUERYID), conf)); + new TezWork(conf.getVar(HiveConf.ConfVars.HIVE_QUERY_ID), conf)); this.leafOperatorToFollowingWork = new LinkedHashMap, BaseWork>(); this.linkOpWithWorkMap = new LinkedHashMap, Map>(); this.linkWorkWithReduceSinkMap = new LinkedHashMap>(); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezUtils.java index 1d2eab7e1449..a2512500e37b 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezUtils.java @@ -46,7 +46,6 @@ import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.hooks.WriteEntity; import org.apache.hadoop.hive.ql.lib.*; -import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.hadoop.hive.ql.optimizer.GenMapRedUtils; import org.apache.hadoop.hive.ql.plan.*; import org.apache.hadoop.hive.ql.plan.TezEdgeProperty.EdgeType; @@ -96,7 +95,7 @@ public static ReduceWork createReduceWork( float maxPartitionFactor = context.conf.getFloatVar(HiveConf.ConfVars.TEZ_MAX_PARTITION_FACTOR); float minPartitionFactor = context.conf.getFloatVar(HiveConf.ConfVars.TEZ_MIN_PARTITION_FACTOR); - long bytesPerReducer = context.conf.getLongVar(HiveConf.ConfVars.BYTESPERREDUCER); + long bytesPerReducer = context.conf.getLongVar(HiveConf.ConfVars.BYTES_PER_REDUCER); int defaultTinyBufferSize = context.conf.getIntVar(HiveConf.ConfVars.TEZ_SIMPLE_CUSTOM_EDGE_TINY_BUFFER_SIZE_MB); ReduceWork reduceWork = new ReduceWork(Utilities.REDUCENAME + context.nextSequenceNumber()); @@ -126,7 +125,7 @@ public static ReduceWork createReduceWork( if (isAutoReduceParallelism && reduceSink.getConf().getReducerTraits().contains(AUTOPARALLEL)) { // configured limit for reducers - final int maxReducers = context.conf.getIntVar(HiveConf.ConfVars.MAXREDUCERS); + final int maxReducers = context.conf.getIntVar(HiveConf.ConfVars.MAX_REDUCERS); // estimated number of reducers final int nReducers = reduceSink.getConf().getNumReducers(); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezWork.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezWork.java index 736e562c1afb..e112b55031a4 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezWork.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezWork.java @@ -372,7 +372,7 @@ public Object process(Node nd, Stack stack, if (context.leafOperatorToFollowingWork.containsKey(operator)) { BaseWork followingWork = context.leafOperatorToFollowingWork.get(operator); - long bytesPerReducer = context.conf.getLongVar(HiveConf.ConfVars.BYTESPERREDUCER); + long bytesPerReducer = context.conf.getLongVar(HiveConf.ConfVars.BYTES_PER_REDUCER); LOG.debug("Second pass. Leaf operator: "+operator +" has common downstream work: "+followingWork); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java index 3a18b7af729b..ce036a2b63e8 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java @@ -351,7 +351,7 @@ private void analyzeLoad(ASTNode ast) throws SemanticException { // for managed tables, make sure the file formats match if (TableType.MANAGED_TABLE.equals(ts.tableHandle.getTableType()) - && conf.getBoolVar(HiveConf.ConfVars.HIVECHECKFILEFORMAT)) { + && conf.getBoolVar(HiveConf.ConfVars.HIVE_CHECK_FILEFORMAT)) { ensureFileFormatsMatch(ts, files, fromURI); } inputs.add(toReadEntity(new Path(fromURI))); @@ -557,7 +557,7 @@ private void reparseAndSuperAnalyze(Table table, URI fromURI) throws SemanticExc // Step 3 : parse the query // Set dynamic partitioning to nonstrict so that queries do not need any partition // references. - HiveConf.setVar(conf, HiveConf.ConfVars.DYNAMICPARTITIONINGMODE, "nonstrict"); + HiveConf.setVar(conf, HiveConf.ConfVars.DYNAMIC_PARTITIONING_MODE, "nonstrict"); // Parse the rewritten query string Context rewrittenCtx; rewrittenCtx = new Context(conf); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/MapReduceCompiler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/MapReduceCompiler.java index 2a3b60e52bb6..376ba8708ca1 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/MapReduceCompiler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/MapReduceCompiler.java @@ -185,7 +185,7 @@ private static int getNumberOfReducers(MapredWork mrwork, HiveConf conf) { return mrwork.getReduceWork().getNumReduceTasks(); } - return conf.getIntVar(HiveConf.ConfVars.HADOOPNUMREDUCERS); + return conf.getIntVar(HiveConf.ConfVars.HADOOP_NUM_REDUCERS); } @Override @@ -200,7 +200,7 @@ protected void decideExecMode(List> rootTasks, Context ctx, // user has told us to run in local mode or doesn't want auto-local mode if (ctx.isLocalOnlyExecutionMode() || - !conf.getBoolVar(HiveConf.ConfVars.LOCALMODEAUTO)) { + !conf.getBoolVar(HiveConf.ConfVars.LOCAL_MODE_AUTO)) { return; } @@ -230,11 +230,11 @@ public boolean accept(Path file) { // estimated Input = (num_limit * max_size_per_row) * (estimated_map + 2) // long sizePerRow = HiveConf.getLongVar(conf, - HiveConf.ConfVars.HIVELIMITMAXROWSIZE); + HiveConf.ConfVars.HIVE_LIMIT_MAX_ROW_SIZE); estimatedInput = (globalLimitCtx.getGlobalOffset() + globalLimitCtx.getGlobalLimit()) * sizePerRow; long minSplitSize = HiveConf.getLongVar(conf, - HiveConf.ConfVars.MAPREDMINSPLITSIZE); + HiveConf.ConfVars.MAPRED_MIN_SPLIT_SIZE); long estimatedNumMap = inputSummary.getLength() / minSplitSize + 1; estimatedInput = estimatedInput * (estimatedNumMap + 1); } else { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseUtils.java index db959192db71..972d7997673d 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseUtils.java @@ -571,7 +571,7 @@ private static CalcitePlanner getAnalyzer(HiveConf conf, Context ctx) throws Sem */ public static Map> getFullPartitionSpecs( CommonTree ast, Table table, Configuration conf, boolean canGroupExprs) throws SemanticException { - String defaultPartitionName = HiveConf.getVar(conf, HiveConf.ConfVars.DEFAULTPARTITIONNAME); + String defaultPartitionName = HiveConf.getVar(conf, HiveConf.ConfVars.DEFAULT_PARTITION_NAME); Map colTypes = new HashMap<>(); List partitionKeys = table.getStorageHandler() != null && table.getStorageHandler().alwaysUnpartitioned() ? table.getStorageHandler().getPartitionKeys(table) : table.getPartitionKeys(); @@ -697,7 +697,7 @@ public static ReparseResult parseRewrittenQuery(Context ctx, // Set dynamic partitioning to nonstrict so that queries do not need any partition // references. // TODO: this may be a perf issue as it prevents the optimizer.. or not - HiveConf.setVar(ctx.getConf(), HiveConf.ConfVars.DYNAMICPARTITIONINGMODE, "nonstrict"); + HiveConf.setVar(ctx.getConf(), HiveConf.ConfVars.DYNAMIC_PARTITIONING_MODE, "nonstrict"); // Disable LLAP IO wrapper; doesn't propagate extra ACID columns correctly. HiveConf.setBoolVar(ctx.getConf(), HiveConf.ConfVars.LLAP_IO_ROW_WRAPPER_ENABLED, false); // Parse the rewritten query string diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ReplicationSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ReplicationSemanticAnalyzer.java index f4514d700c0f..62bd84f9f912 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ReplicationSemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ReplicationSemanticAnalyzer.java @@ -56,7 +56,7 @@ import java.util.Collections; import java.util.Objects; -import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.HIVEQUERYID; +import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.HIVE_QUERY_ID; import static org.apache.hadoop.hive.ql.exec.repl.ReplAck.LOAD_ACKNOWLEDGEMENT; import static org.apache.hadoop.hive.ql.parse.HiveParser.TOK_DBNAME; import static org.apache.hadoop.hive.ql.parse.HiveParser.TOK_REPL_CONFIG; @@ -450,7 +450,7 @@ private void setConfigs(ASTNode node) throws SemanticException { for (Map.Entry config : replConfigs.entrySet()) { String key = config.getKey(); // don't set the query id in the config - if (key.equalsIgnoreCase(HIVEQUERYID.varname)) { + if (key.equalsIgnoreCase(HIVE_QUERY_ID.varname)) { String queryTag = config.getValue(); if (!StringUtils.isEmpty(queryTag)) { QueryState.setApplicationTag(conf, queryTag); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java index 769ab25e43e9..3c4dd24e70b6 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java @@ -21,8 +21,8 @@ import static java.util.Objects.nonNull; import static org.apache.commons.lang3.StringUtils.isNotBlank; import static org.apache.hadoop.hive.common.AcidConstants.SOFT_DELETE_TABLE; -import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.DYNAMICPARTITIONCONVERT; -import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.HIVEARCHIVEENABLED; +import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.DYNAMIC_PARTITION_CONVERT; +import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.HIVE_ARCHIVE_ENABLED; import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.HIVE_DEFAULT_STORAGE_HANDLER; import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.HIVESTATSDBCLASS; import static org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_LOCATION; @@ -1206,7 +1206,7 @@ private String processTable(QB qb, ASTNode tabref) throws SemanticException { throw new SemanticException(generateErrorMessage((ASTNode) numerator, "Sampling percentage should be between 0 and 100")); } - int seedNum = conf.getIntVar(ConfVars.HIVESAMPLERANDOMNUM); + int seedNum = conf.getIntVar(ConfVars.HIVE_SAMPLE_RANDOM_NUM); sample = new SplitSample(percent, seedNum); } else if (type.getType() == HiveParser.TOK_ROWCOUNT) { sample = new SplitSample(Integer.parseInt(value)); @@ -1222,7 +1222,7 @@ private String processTable(QB qb, ASTNode tabref) throws SemanticException { } else if (last == 'g' || last == 'G') { length <<= 30; } - int seedNum = conf.getIntVar(ConfVars.HIVESAMPLERANDOMNUM); + int seedNum = conf.getIntVar(ConfVars.HIVE_SAMPLE_RANDOM_NUM); sample = new SplitSample(length, seedNum); } String alias_id = getAliasId(alias, qb); @@ -1270,8 +1270,8 @@ Map getNameToSplitSampleMap() { private void assertCombineInputFormat(Tree numerator, String message) throws SemanticException { String inputFormat = conf.getVar(HiveConf.ConfVars.HIVE_EXECUTION_ENGINE).equals("tez") ? - HiveConf.getVar(conf, HiveConf.ConfVars.HIVETEZINPUTFORMAT): - HiveConf.getVar(conf, HiveConf.ConfVars.HIVEINPUTFORMAT); + HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_TEZ_INPUT_FORMAT): + HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_INPUT_FORMAT); if (!inputFormat.equals(CombineHiveInputFormat.class.getName())) { throw new SemanticException(generateErrorMessage((ASTNode) numerator, message + " sampling is not supported in " + inputFormat)); @@ -1984,8 +1984,8 @@ boolean doPhase1(ASTNode ast, QB qb, Phase1Ctx ctx_1, PlannerContext plannerCtx, qb.getParseInfo().setIsAnalyzeCommand(true); qb.getParseInfo().setNoScanAnalyzeCommand(this.noscan); // Allow analyze the whole table and dynamic partitions - HiveConf.setVar(conf, HiveConf.ConfVars.DYNAMICPARTITIONINGMODE, "nonstrict"); - HiveConf.setVar(conf, HiveConf.ConfVars.HIVEMAPREDMODE, "nonstrict"); + HiveConf.setVar(conf, HiveConf.ConfVars.DYNAMIC_PARTITIONING_MODE, "nonstrict"); + HiveConf.setVar(conf, HiveConf.ConfVars.HIVE_MAPRED_MODE, "nonstrict"); break; @@ -4298,7 +4298,7 @@ private Operator genScriptPlan(ASTNode trfm, QB qb, Operator input) TableDesc outInfo; TableDesc errInfo; TableDesc inInfo; - String defaultSerdeName = conf.getVar(HiveConf.ConfVars.HIVESCRIPTSERDE); + String defaultSerdeName = conf.getVar(HiveConf.ConfVars.HIVE_SCRIPT_SERDE); Class serde; try { @@ -4309,7 +4309,7 @@ private Operator genScriptPlan(ASTNode trfm, QB qb, Operator input) } int fieldSeparator = Utilities.tabCode; - if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVESCRIPTESCAPE)) { + if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_SCRIPT_ESCAPE)) { fieldSeparator = Utilities.ctrlaCode; } @@ -4373,7 +4373,7 @@ private Class getRecordReader(ASTNode node) String name; if (node.getChildCount() == 0) { - name = conf.getVar(HiveConf.ConfVars.HIVESCRIPTRECORDREADER); + name = conf.getVar(HiveConf.ConfVars.HIVE_SCRIPT_RECORD_READER); } else { name = unescapeSQLString(node.getChild(0).getText()); } @@ -4390,7 +4390,7 @@ private Class getDefaultRecordReader() throws SemanticException { String name; - name = conf.getVar(HiveConf.ConfVars.HIVESCRIPTRECORDREADER); + name = conf.getVar(HiveConf.ConfVars.HIVE_SCRIPT_RECORD_READER); try { return (Class) Class.forName(name, true, @@ -4405,7 +4405,7 @@ private Class getRecordWriter(ASTNode node) String name; if (node.getChildCount() == 0) { - name = conf.getVar(HiveConf.ConfVars.HIVESCRIPTRECORDWRITER); + name = conf.getVar(HiveConf.ConfVars.HIVE_SCRIPT_RECORD_WRITER); } else { name = unescapeSQLString(node.getChild(0).getText()); } @@ -5377,13 +5377,13 @@ private Operator genGroupByPlanGroupByOperator(QBParseInfo parseInfo, genericUDAFEvaluators.put(entry.getKey(), genericUDAFEvaluator); } } - float groupByMemoryUsage = HiveConf.getFloatVar(conf, HiveConf.ConfVars.HIVEMAPAGGRHASHMEMORY); + float groupByMemoryUsage = HiveConf.getFloatVar(conf, HiveConf.ConfVars.HIVE_MAP_AGGR_HASH_MEMORY); float memoryThreshold = HiveConf - .getFloatVar(conf, HiveConf.ConfVars.HIVEMAPAGGRMEMORYTHRESHOLD); + .getFloatVar(conf, HiveConf.ConfVars.HIVE_MAP_AGGR_MEMORY_THRESHOLD); float minReductionHashAggr = HiveConf - .getFloatVar(conf, HiveConf.ConfVars.HIVEMAPAGGRHASHMINREDUCTION); + .getFloatVar(conf, HiveConf.ConfVars.HIVE_MAP_AGGR_HASH_MIN_REDUCTION); float minReductionHashAggrLowerBound = HiveConf - .getFloatVar(conf, ConfVars.HIVEMAPAGGRHASHMINREDUCTIONLOWERBOUND); + .getFloatVar(conf, ConfVars.HIVE_MAP_AGGR_HASH_MIN_REDUCTION_LOWER_BOUND); Operator op = putOpInsertMap(OperatorFactory.getAndMakeChild( new GroupByDesc(mode, outputColumnNames, groupByKeys, aggregations, @@ -5646,13 +5646,13 @@ private Operator genGroupByPlanGroupByOperator1(QBParseInfo parseInfo, groupByOutputRowResolver.putExpression(value, new ColumnInfo( field, udaf.returnType, "", false)); } - float groupByMemoryUsage = HiveConf.getFloatVar(conf, HiveConf.ConfVars.HIVEMAPAGGRHASHMEMORY); + float groupByMemoryUsage = HiveConf.getFloatVar(conf, HiveConf.ConfVars.HIVE_MAP_AGGR_HASH_MEMORY); float memoryThreshold = HiveConf - .getFloatVar(conf, HiveConf.ConfVars.HIVEMAPAGGRMEMORYTHRESHOLD); + .getFloatVar(conf, HiveConf.ConfVars.HIVE_MAP_AGGR_MEMORY_THRESHOLD); float minReductionHashAggr = HiveConf - .getFloatVar(conf, HiveConf.ConfVars.HIVEMAPAGGRHASHMINREDUCTION); + .getFloatVar(conf, HiveConf.ConfVars.HIVE_MAP_AGGR_HASH_MIN_REDUCTION); float minReductionHashAggrLowerBound = HiveConf - .getFloatVar(conf, ConfVars.HIVEMAPAGGRHASHMINREDUCTIONLOWERBOUND); + .getFloatVar(conf, ConfVars.HIVE_MAP_AGGR_HASH_MIN_REDUCTION_LOWER_BOUND); // Nothing special needs to be done for grouping sets if // this is the final group by operator, and multiple rows corresponding to the @@ -5827,13 +5827,13 @@ private Operator genGroupByPlanMapGroupByOperator(QB qb, genericUDAFEvaluators.put(entry.getKey(), genericUDAFEvaluator); } } - float groupByMemoryUsage = HiveConf.getFloatVar(conf, HiveConf.ConfVars.HIVEMAPAGGRHASHMEMORY); + float groupByMemoryUsage = HiveConf.getFloatVar(conf, HiveConf.ConfVars.HIVE_MAP_AGGR_HASH_MEMORY); float memoryThreshold = HiveConf - .getFloatVar(conf, HiveConf.ConfVars.HIVEMAPAGGRMEMORYTHRESHOLD); + .getFloatVar(conf, HiveConf.ConfVars.HIVE_MAP_AGGR_MEMORY_THRESHOLD); float minReductionHashAggr = HiveConf - .getFloatVar(conf, HiveConf.ConfVars.HIVEMAPAGGRHASHMINREDUCTION); + .getFloatVar(conf, HiveConf.ConfVars.HIVE_MAP_AGGR_HASH_MIN_REDUCTION); float minReductionHashAggrLowerBound = HiveConf - .getFloatVar(conf, ConfVars.HIVEMAPAGGRHASHMINREDUCTIONLOWERBOUND); + .getFloatVar(conf, ConfVars.HIVE_MAP_AGGR_HASH_MIN_REDUCTION_LOWER_BOUND); Operator op = putOpInsertMap(OperatorFactory.getAndMakeChild( new GroupByDesc(GroupByDesc.Mode.HASH, outputColumnNames, groupByKeys, aggregations, false, groupByMemoryUsage, memoryThreshold, minReductionHashAggr, minReductionHashAggrLowerBound, @@ -6364,13 +6364,13 @@ private Operator genGroupByPlanGroupByOperator2MR(QBParseInfo parseInfo, groupByOutputRowResolver2.putExpression(value, new ColumnInfo( field, udaf.returnType, "", false)); } - float groupByMemoryUsage = HiveConf.getFloatVar(conf, HiveConf.ConfVars.HIVEMAPAGGRHASHMEMORY); + float groupByMemoryUsage = HiveConf.getFloatVar(conf, HiveConf.ConfVars.HIVE_MAP_AGGR_HASH_MEMORY); float memoryThreshold = HiveConf - .getFloatVar(conf, HiveConf.ConfVars.HIVEMAPAGGRMEMORYTHRESHOLD); + .getFloatVar(conf, HiveConf.ConfVars.HIVE_MAP_AGGR_MEMORY_THRESHOLD); float minReductionHashAggr = HiveConf - .getFloatVar(conf, HiveConf.ConfVars.HIVEMAPAGGRHASHMINREDUCTION); + .getFloatVar(conf, HiveConf.ConfVars.HIVE_MAP_AGGR_HASH_MIN_REDUCTION); float minReductionHashAggrLowerBound = HiveConf - .getFloatVar(conf, ConfVars.HIVEMAPAGGRHASHMINREDUCTIONLOWERBOUND); + .getFloatVar(conf, ConfVars.HIVE_MAP_AGGR_HASH_MIN_REDUCTION_LOWER_BOUND); Operator op = putOpInsertMap(OperatorFactory.getAndMakeChild( new GroupByDesc(GroupByDesc.Mode.FINAL, outputColumnNames, groupByKeys, aggregations, @@ -7108,9 +7108,9 @@ private Operator genBucketingSortingDest(String dest, Operator input, QB qb, if (enforceBucketing) { Operation acidOp = AcidUtils.isFullAcidTable(dest_tab) ? getAcidType(table_desc.getOutputFileFormatClass(), dest, AcidUtils.isInsertOnlyTable(dest_tab)) : Operation.NOT_ACID; - int maxReducers = conf.getIntVar(HiveConf.ConfVars.MAXREDUCERS); - if (conf.getIntVar(HiveConf.ConfVars.HADOOPNUMREDUCERS) > 0) { - maxReducers = conf.getIntVar(HiveConf.ConfVars.HADOOPNUMREDUCERS); + int maxReducers = conf.getIntVar(HiveConf.ConfVars.MAX_REDUCERS); + if (conf.getIntVar(HiveConf.ConfVars.HADOOP_NUM_REDUCERS) > 0) { + maxReducers = conf.getIntVar(HiveConf.ConfVars.HADOOP_NUM_REDUCERS); } int numBuckets = dest_tab.getNumBuckets(); if (numBuckets > maxReducers) { @@ -7856,8 +7856,8 @@ protected Operator genFileSinkPlan(String dest, QB qb, Operator input) cols = ct.cols; colTypes = ct.colTypes; dpCtx = new DynamicPartitionCtx(partitionColumnNames, - conf.getVar(HiveConf.ConfVars.DEFAULTPARTITIONNAME), - conf.getIntVar(HiveConf.ConfVars.DYNAMICPARTITIONMAXPARTSPERNODE)); + conf.getVar(HiveConf.ConfVars.DEFAULT_PARTITION_NAME), + conf.getIntVar(HiveConf.ConfVars.DYNAMIC_PARTITION_MAX_PARTS_PER_NODE)); qbm.setDPCtx(dest, dpCtx); isPartitioned = true; } else { @@ -8472,7 +8472,7 @@ private FileSinkDesc createFileSinkDesc(String dest, TableDesc table_desc, throw new IllegalStateException("Unexpected dest_type=" + dest_tab); } FileSinkDesc fileSinkDesc = new FileSinkDesc(queryTmpdir, table_desc, - conf.getBoolVar(HiveConf.ConfVars.COMPRESSRESULT), currentTableId, rsCtx.isMultiFileSpray(), + conf.getBoolVar(HiveConf.ConfVars.COMPRESS_RESULT), currentTableId, rsCtx.isMultiFileSpray(), canBeMerged, rsCtx.getNumFiles(), rsCtx.getTotalFiles(), rsCtx.getPartnCols(), dpCtx, dest_path, mmWriteId, isMmCtas, isInsertOverwrite, qb.getIsQuery(), qb.isCTAS() || qb.isMaterializedView(), isDirectInsert, acidOperation, @@ -8706,8 +8706,8 @@ private DynamicPartitionCtx checkDynPart(QB qb, QBMetaData qbm, Table dest_tab, if (dpCtx == null) { dest_tab.validatePartColumnNames(partSpec, false); dpCtx = new DynamicPartitionCtx(partSpec, - conf.getVar(HiveConf.ConfVars.DEFAULTPARTITIONNAME), - conf.getIntVar(HiveConf.ConfVars.DYNAMICPARTITIONMAXPARTSPERNODE)); + conf.getVar(HiveConf.ConfVars.DEFAULT_PARTITION_NAME), + conf.getIntVar(HiveConf.ConfVars.DYNAMIC_PARTITION_MAX_PARTS_PER_NODE)); qbm.setDPCtx(dest, dpCtx); } @@ -8720,7 +8720,7 @@ private DynamicPartitionCtx checkDynPart(QB qb, QBMetaData qbm, Table dest_tab, } private static void verifyDynamicPartitionEnabled(HiveConf conf, QB qb, String dest) throws SemanticException { - if (!HiveConf.getBoolVar(conf, HiveConf.ConfVars.DYNAMICPARTITIONING)) { // allow DP + if (!HiveConf.getBoolVar(conf, HiveConf.ConfVars.DYNAMIC_PARTITIONING)) { // allow DP throw new SemanticException(generateErrorMessage(qb.getParseInfo().getDestForClause(dest), ErrorMsg.DYNAMIC_PARTITION_DISABLED.getMsg())); } @@ -8757,8 +8757,8 @@ String fixCtasColumnName(String colName) { private void checkAcidConstraints() { /* LOG.info("Modifying config values for ACID write"); - conf.setBoolVar(ConfVars.HIVEOPTREDUCEDEDUPLICATION, true); - conf.setIntVar(ConfVars.HIVEOPTREDUCEDEDUPLICATIONMINREDUCER, 1); + conf.setBoolVar(ConfVars.HIVE_OPT_REDUCE_DEDUPLICATION, true); + conf.setIntVar(ConfVars.HIVE_OPT_REDUCE_DEDUPLICATION_MIN_REDUCER, 1); These props are now enabled elsewhere (see commit diffs). It would be better instead to throw if they are not set. For exmaple, if user has set hive.optimize.reducededuplication=false for some reason, we'll run a query contrary to what they wanted... But throwing now would be @@ -8872,7 +8872,7 @@ private Operator genConversionSelectOperator(String dest, QB qb, Operator input, // Check column number List tableFields = oi.getAllStructFieldRefs(); - boolean dynPart = HiveConf.getBoolVar(conf, HiveConf.ConfVars.DYNAMICPARTITIONING); + boolean dynPart = HiveConf.getBoolVar(conf, HiveConf.ConfVars.DYNAMIC_PARTITIONING); List rowFields = opParseCtx.get(input).getRowResolver().getColumnInfos(); int inColumnCnt = rowFields.size(); int outColumnCnt = tableFields.size(); @@ -8943,7 +8943,7 @@ private Operator genConversionSelectOperator(String dest, QB qb, Operator input, new ExprNodeColumnDesc(inputTypeInfo, inputColumn.getInternalName(), "", true); // Cast input column to destination column type if necessary. - if (conf.getBoolVar(DYNAMICPARTITIONCONVERT)) { + if (conf.getBoolVar(DYNAMIC_PARTITION_CONVERT)) { if (parts != null && !parts.isEmpty()) { String destPartitionName = dpCtx.getDPColNames().get(dpColIdx); FieldSchema destPartitionFieldSchema = parts.stream() @@ -10102,13 +10102,13 @@ private Operator genMapGroupByForSemijoin(List fields, Operator inpu } // Generate group-by operator - float groupByMemoryUsage = HiveConf.getFloatVar(conf, HiveConf.ConfVars.HIVEMAPAGGRHASHMEMORY); + float groupByMemoryUsage = HiveConf.getFloatVar(conf, HiveConf.ConfVars.HIVE_MAP_AGGR_HASH_MEMORY); float memoryThreshold = HiveConf - .getFloatVar(conf, HiveConf.ConfVars.HIVEMAPAGGRMEMORYTHRESHOLD); + .getFloatVar(conf, HiveConf.ConfVars.HIVE_MAP_AGGR_MEMORY_THRESHOLD); float minReductionHashAggr = HiveConf - .getFloatVar(conf, HiveConf.ConfVars.HIVEMAPAGGRHASHMINREDUCTION); + .getFloatVar(conf, HiveConf.ConfVars.HIVE_MAP_AGGR_HASH_MIN_REDUCTION); float minReductionHashAggrLowerBound = HiveConf - .getFloatVar(conf, ConfVars.HIVEMAPAGGRHASHMINREDUCTIONLOWERBOUND); + .getFloatVar(conf, ConfVars.HIVE_MAP_AGGR_HASH_MIN_REDUCTION_LOWER_BOUND); Operator op = putOpInsertMap(OperatorFactory.getAndMakeChild( new GroupByDesc(GroupByDesc.Mode.HASH, outputColumnNames, groupByKeys, aggregations, false, groupByMemoryUsage, memoryThreshold, minReductionHashAggr, minReductionHashAggrLowerBound, @@ -10212,7 +10212,7 @@ private List getMapSideJoinTables(QB qb) { ASTNode hint = (ASTNode) hints.getChild(pos); if (((ASTNode) hint.getChild(0)).getToken().getType() == HintParser.TOK_MAPJOIN) { // the user has specified to ignore mapjoin hint - if (!conf.getBoolVar(HiveConf.ConfVars.HIVEIGNOREMAPJOINHINT) + if (!conf.getBoolVar(HiveConf.ConfVars.HIVE_IGNORE_MAPJOIN_HINT) && !conf.getVar(HiveConf.ConfVars.HIVE_EXECUTION_ENGINE).equals("tez")) { ASTNode hintTblNames = (ASTNode) hint.getChild(1); int numCh = hintTblNames.getChildCount(); @@ -11364,7 +11364,7 @@ private Operator genBodyPlan(QB qb, Operator input, Map aliasT // If we can put multiple group bys in a single reducer, determine suitable groups of // expressions, otherwise treat all the expressions as a single group - if (conf.getBoolVar(HiveConf.ConfVars.HIVEMULTIGROUPBYSINGLEREDUCER)) { + if (conf.getBoolVar(HiveConf.ConfVars.HIVE_MULTI_GROUPBY_SINGLE_REDUCER)) { try { commonGroupByDestGroups = getCommonGroupByDestGroups(qb, inputs); } catch (SemanticException e) { @@ -11396,8 +11396,8 @@ private Operator genBodyPlan(QB qb, Operator input, Map aliasT if (commonGroupByDestGroup.size() == 1 || (qbp.getAggregationExprsForClause(firstDest).size() == 0 && getGroupByForClause(qbp, firstDest).size() == 0) || - conf.getBoolVar(HiveConf.ConfVars.HIVEGROUPBYSKEW) || - !conf.getBoolVar(HiveConf.ConfVars.HIVEMULTIGROUPBYSINGLEREDUCER)) { + conf.getBoolVar(HiveConf.ConfVars.HIVE_GROUPBY_SKEW) || + !conf.getBoolVar(HiveConf.ConfVars.HIVE_MULTI_GROUPBY_SINGLE_REDUCER)) { // Go over all the destination tables for (String dest : commonGroupByDestGroup) { @@ -11415,7 +11415,7 @@ private Operator genBodyPlan(QB qb, Operator input, Map aliasT && (qbp.getSelForClause(dest).getToken().getType() != HiveParser.TOK_SELECTDI || qbp.getWindowingExprsForClause(dest) == null)) { // multiple distincts is not supported with skew in data - if (conf.getBoolVar(HiveConf.ConfVars.HIVEGROUPBYSKEW) && + if (conf.getBoolVar(HiveConf.ConfVars.HIVE_GROUPBY_SKEW) && qbp.getDistinctFuncExprsForClause(dest).size() > 1) { throw new SemanticException(ErrorMsg.UNSUPPORTED_MULTIPLE_DISTINCTS. getMsg()); @@ -11440,13 +11440,13 @@ private Operator genBodyPlan(QB qb, Operator input, Map aliasT qbp.setSelExprForClause(dest, genSelectDIAST(rr)); } } - if (conf.getBoolVar(HiveConf.ConfVars.HIVEMAPSIDEAGGREGATE)) { - if (!conf.getBoolVar(HiveConf.ConfVars.HIVEGROUPBYSKEW)) { + if (conf.getBoolVar(HiveConf.ConfVars.HIVE_MAPSIDE_AGGREGATE)) { + if (!conf.getBoolVar(HiveConf.ConfVars.HIVE_GROUPBY_SKEW)) { curr = genGroupByPlanMapAggrNoSkew(dest, qb, curr); } else { curr = genGroupByPlanMapAggr2MR(dest, qb, curr); } - } else if (conf.getBoolVar(HiveConf.ConfVars.HIVEGROUPBYSKEW)) { + } else if (conf.getBoolVar(HiveConf.ConfVars.HIVE_GROUPBY_SKEW)) { curr = genGroupByPlan2MR(dest, qb, curr); } else { curr = genGroupByPlan1MR(dest, qb, curr); @@ -11501,13 +11501,13 @@ private Operator genPostGroupByBodyPlan(Operator curr, String dest, QB qb, || getGroupByForClause(qbp, dest).size() > 0) && qbp.getSelForClause(dest).getToken().getType() == HiveParser.TOK_SELECTDI && qbp.getWindowingExprsForClause(dest) != null) { - if (conf.getBoolVar(HiveConf.ConfVars.HIVEMAPSIDEAGGREGATE)) { - if (!conf.getBoolVar(HiveConf.ConfVars.HIVEGROUPBYSKEW)) { + if (conf.getBoolVar(HiveConf.ConfVars.HIVE_MAPSIDE_AGGREGATE)) { + if (!conf.getBoolVar(HiveConf.ConfVars.HIVE_GROUPBY_SKEW)) { curr = genGroupByPlanMapAggrNoSkew(dest, qb, curr); } else { curr = genGroupByPlanMapAggr2MR(dest, qb, curr); } - } else if (conf.getBoolVar(HiveConf.ConfVars.HIVEGROUPBYSKEW)) { + } else if (conf.getBoolVar(HiveConf.ConfVars.HIVE_GROUPBY_SKEW)) { curr = genGroupByPlan2MR(dest, qb, curr); } else { curr = genGroupByPlan1MR(dest, qb, curr); @@ -12086,13 +12086,13 @@ samplePredicate, true, new SampleDesc(ts.getNumerator(), new RowSchema(rwsch.getColumnInfos()), top); } } else { - boolean testMode = conf.getBoolVar(ConfVars.HIVETESTMODE); + boolean testMode = conf.getBoolVar(ConfVars.HIVE_TEST_MODE); if (testMode) { String tabName = tab.getTableName(); // has the user explicitly asked not to sample this table String unSampleTblList = conf - .getVar(ConfVars.HIVETESTMODENOSAMPLE); + .getVar(ConfVars.HIVE_TEST_MODE_NOSAMPLE); String[] unSampleTbls = unSampleTblList.split(","); boolean unsample = false; for (String unSampleTbl : unSampleTbls) { @@ -12121,7 +12121,7 @@ samplePredicate, true, new SampleDesc(ts.getNumerator(), LOG.info("No need for sample filter"); } else { // The table is not bucketed, add a dummy filter :: rand() - int freq = conf.getIntVar(ConfVars.HIVETESTMODESAMPLEFREQ); + int freq = conf.getIntVar(ConfVars.HIVE_TEST_MODE_SAMPLE_FREQ); TableSample tsSample = new TableSample(1, freq); tsSample.setInputPruning(false); qb.getParseInfo().setTabSample(alias, tsSample); @@ -13203,7 +13203,7 @@ void analyzeInternal(ASTNode ast, Supplier pcf) throws SemanticE // Add the transformation that computes the lineage information. Set postExecHooks = Sets.newHashSet(Splitter.on(",").trimResults() .omitEmptyStrings() - .split(Strings.nullToEmpty(HiveConf.getVar(conf, HiveConf.ConfVars.POSTEXECHOOKS)))); + .split(Strings.nullToEmpty(HiveConf.getVar(conf, HiveConf.ConfVars.POST_EXEC_HOOKS)))); if (postExecHooks.contains("org.apache.hadoop.hive.ql.hooks.PostExecutePrinter") || postExecHooks.contains("org.apache.hadoop.hive.ql.hooks.LineageLogger") || postExecHooks.contains("org.apache.atlas.hive.hook.HiveHook")) { @@ -13667,7 +13667,7 @@ public void validate() throws SemanticException { LOG.debug("validated " + usedp.getName()); LOG.debug(usedp.getTable().getTableName()); - if (!AcidUtils.isTransactionalTable(tbl) && conf.getBoolVar(HIVEARCHIVEENABLED)) { + if (!AcidUtils.isTransactionalTable(tbl) && conf.getBoolVar(HIVE_ARCHIVE_ENABLED)) { // Do not check for ACID; it does not create new parts and this is expensive as hell. // TODO: add an API to get table name list for archived parts with a single call; // nobody uses this so we could skip the whole thing. @@ -14021,7 +14021,7 @@ ASTNode analyzeCreateTable( throw new SemanticException(ErrorMsg.CTAS_COLLST_COEXISTENCE.getMsg()); } if (partCols.size() != 0 || bucketCols.size() != 0) { - boolean dynPart = HiveConf.getBoolVar(conf, HiveConf.ConfVars.DYNAMICPARTITIONING); + boolean dynPart = HiveConf.getBoolVar(conf, HiveConf.ConfVars.DYNAMIC_PARTITIONING); if (dynPart == false) { throw new SemanticException(ErrorMsg.CTAS_PARCOL_COEXISTENCE.getMsg()); } else { @@ -14338,7 +14338,7 @@ ASTNode analyzeCreateTable( if(lStats != null && lStats.length != 0) { // Don't throw an exception if the target location only contains the staging-dirs for (FileStatus lStat : lStats) { - if (!lStat.getPath().getName().startsWith(HiveConf.getVar(conf, HiveConf.ConfVars.STAGINGDIR))) { + if (!lStat.getPath().getName().startsWith(HiveConf.getVar(conf, HiveConf.ConfVars.STAGING_DIR))) { throw new SemanticException(ErrorMsg.CTAS_LOCATION_NONEMPTY.getMsg(location)); } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/StorageFormat.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/StorageFormat.java index b01be0740631..50f08ff1f4ed 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/StorageFormat.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/StorageFormat.java @@ -180,9 +180,9 @@ public void processStorageFormat(String name) throws SemanticException { if (serde == null) { // RCFile supports a configurable SerDe if (name.equalsIgnoreCase(IOConstants.RCFILE)) { - serde = ensureClassExists(HiveConf.getVar(conf, HiveConf.ConfVars.HIVEDEFAULTRCFILESERDE)); + serde = ensureClassExists(HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_DEFAULT_RCFILE_SERDE)); } else { - serde = ensureClassExists(HiveConf.getVar(conf, HiveConf.ConfVars.HIVEDEFAULTSERDE)); + serde = ensureClassExists(HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_DEFAULT_SERDE)); } } } @@ -197,8 +197,8 @@ public void fillDefaultStorageFormat(boolean isExternal, boolean isMaterializedV HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_MATERIALIZED_VIEW_FILE_FORMAT); serde = HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_MATERIALIZED_VIEW_SERDE); } else { - defaultFormat = HiveConf.getVar(conf, HiveConf.ConfVars.HIVEDEFAULTFILEFORMAT); - defaultManagedFormat = HiveConf.getVar(conf, HiveConf.ConfVars.HIVEDEFAULTMANAGEDFILEFORMAT); + defaultFormat = HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_DEFAULT_FILEFORMAT); + defaultManagedFormat = HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_DEFAULT_MANAGED_FILEFORMAT); } if (!isExternal && !"none".equals(defaultManagedFormat)) { @@ -211,7 +211,7 @@ public void fillDefaultStorageFormat(boolean isExternal, boolean isMaterializedV } else { processStorageFormat(defaultFormat); if (defaultFormat.equalsIgnoreCase(IOConstants.RCFILE)) { - serde = HiveConf.getVar(conf, HiveConf.ConfVars.HIVEDEFAULTRCFILESERDE); + serde = HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_DEFAULT_RCFILE_SERDE); } } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java index 1095b13e1a3e..062a3fb4d344 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java @@ -261,7 +261,7 @@ public void compile(final ParseContext pCtx, // For the FetchTask, the limit optimization requires we fetch all the rows // in memory and count how many rows we get. It's not practical if the // limit factor is too big - int fetchLimit = HiveConf.getIntVar(conf, HiveConf.ConfVars.HIVELIMITOPTMAXFETCH); + int fetchLimit = HiveConf.getIntVar(conf, HiveConf.ConfVars.HIVE_LIMIT_OPT_MAX_FETCH); if (globalLimitCtx.isEnable() && globalLimitCtx.getGlobalLimit() > fetchLimit) { LOG.info("For FetchTask, LIMIT " + globalLimitCtx.getGlobalLimit() + " > " + fetchLimit + ". Doesn't qualify limit optimization."); @@ -764,12 +764,12 @@ protected abstract void generateTaskTree(List> rootTasks, ParseContext p protected void runDynPartitionSortOptimizations(ParseContext parseContext, HiveConf hConf) throws SemanticException { // run Sorted dynamic partition optimization - if(HiveConf.getBoolVar(hConf, HiveConf.ConfVars.DYNAMICPARTITIONING) && - HiveConf.getVar(hConf, HiveConf.ConfVars.DYNAMICPARTITIONINGMODE).equals("nonstrict") && - !HiveConf.getBoolVar(hConf, HiveConf.ConfVars.HIVEOPTLISTBUCKETING)) { + if(HiveConf.getBoolVar(hConf, HiveConf.ConfVars.DYNAMIC_PARTITIONING) && + HiveConf.getVar(hConf, HiveConf.ConfVars.DYNAMIC_PARTITIONING_MODE).equals("nonstrict") && + !HiveConf.getBoolVar(hConf, HiveConf.ConfVars.HIVE_OPT_LIST_BUCKETING)) { new SortedDynPartitionOptimizer().transform(parseContext); - if(HiveConf.getBoolVar(hConf, HiveConf.ConfVars.HIVEOPTREDUCEDEDUPLICATION)) { + if(HiveConf.getBoolVar(hConf, HiveConf.ConfVars.HIVE_OPT_REDUCE_DEDUPLICATION)) { // Dynamic sort partition adds an extra RS therefore need to de-dup new ReduceSinkDeDuplication().transform(parseContext); // there is an issue with dedup logic wherein SELECT is created with wrong columns diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/TezCompiler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/TezCompiler.java index 57f1f491b315..6c17e9878893 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/TezCompiler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/TezCompiler.java @@ -195,7 +195,7 @@ protected void optimizeOperatorPlan(ParseContext pCtx) throws SemanticException } // need to run this; to get consistent filterop conditions(for operator tree matching) - if (procCtx.conf.getBoolVar(ConfVars.HIVEOPTCONSTANTPROPAGATION)) { + if (procCtx.conf.getBoolVar(ConfVars.HIVE_OPT_CONSTANT_PROPAGATION)) { new ConstantPropagate(ConstantPropagateOption.SHORTCUT).transform(procCtx.parseContext); } @@ -205,15 +205,15 @@ protected void optimizeOperatorPlan(ParseContext pCtx) throws SemanticException perfLogger.perfLogEnd(this.getClass().getName(), PerfLogger.TEZ_COMPILER, "Setup stats in the operator plan"); // run Sorted dynamic partition optimization - if(HiveConf.getBoolVar(procCtx.conf, HiveConf.ConfVars.DYNAMICPARTITIONING) && - HiveConf.getVar(procCtx.conf, HiveConf.ConfVars.DYNAMICPARTITIONINGMODE).equals("nonstrict") && - !HiveConf.getBoolVar(procCtx.conf, HiveConf.ConfVars.HIVEOPTLISTBUCKETING)) { + if(HiveConf.getBoolVar(procCtx.conf, HiveConf.ConfVars.DYNAMIC_PARTITIONING) && + HiveConf.getVar(procCtx.conf, HiveConf.ConfVars.DYNAMIC_PARTITIONING_MODE).equals("nonstrict") && + !HiveConf.getBoolVar(procCtx.conf, HiveConf.ConfVars.HIVE_OPT_LIST_BUCKETING)) { perfLogger.perfLogBegin(this.getClass().getName(), PerfLogger.TEZ_COMPILER); new SortedDynPartitionOptimizer().transform(procCtx.parseContext); perfLogger.perfLogEnd(this.getClass().getName(), PerfLogger.TEZ_COMPILER, "Sorted dynamic partition optimization"); } - if(HiveConf.getBoolVar(procCtx.conf, HiveConf.ConfVars.HIVEOPTREDUCEDEDUPLICATION)) { + if(HiveConf.getBoolVar(procCtx.conf, HiveConf.ConfVars.HIVE_OPT_REDUCE_DEDUPLICATION)) { perfLogger.perfLogBegin(this.getClass().getName(), PerfLogger.TEZ_COMPILER); // Dynamic sort partition adds an extra RS therefore need to de-dup new ReduceSinkDeDuplication().transform(procCtx.parseContext); @@ -233,7 +233,7 @@ protected void optimizeOperatorPlan(ParseContext pCtx) throws SemanticException new BucketVersionPopulator().transform(pCtx); perfLogger.perfLogBegin(this.getClass().getName(), PerfLogger.TEZ_COMPILER); - if(procCtx.conf.getBoolVar(ConfVars.HIVEOPTJOINREDUCEDEDUPLICATION)) { + if(procCtx.conf.getBoolVar(ConfVars.HIVE_OPT_JOIN_REDUCE_DEDUPLICATION)) { new ReduceSinkJoinDeDuplication().transform(procCtx.parseContext); } perfLogger.perfLogEnd(this.getClass().getName(), PerfLogger.TEZ_COMPILER, "Run reduce sink after join algorithm selection"); @@ -253,7 +253,7 @@ protected void optimizeOperatorPlan(ParseContext pCtx) throws SemanticException // of "and true and true" conditions. // Rather than run the full constant folding just need to shortcut AND/OR expressions // involving constant true/false values. - if(procCtx.conf.getBoolVar(ConfVars.HIVEOPTCONSTANTPROPAGATION)) { + if(procCtx.conf.getBoolVar(ConfVars.HIVE_OPT_CONSTANT_PROPAGATION)) { new ConstantPropagate(ConstantPropagateOption.SHORTCUT).transform(procCtx.parseContext); } @@ -481,7 +481,7 @@ private void runStatsDependentOptimizations(OptimizeTezProcContext procCtx) thro new SetReducerParallelism()); opRules.put(new RuleRegExp("Convert Join to Map-join", JoinOperator.getOperatorName() + "%"), new ConvertJoinMapJoin()); - if (procCtx.conf.getBoolVar(ConfVars.HIVEMAPAGGRHASHMINREDUCTIONSTATSADJUST)) { + if (procCtx.conf.getBoolVar(ConfVars.HIVE_MAP_AGGR_HASH_MIN_REDUCTION_STATS_ADJUST)) { opRules.put(new RuleRegExp("Set min reduction - GBy (Hash)", GroupByOperator.getOperatorName() + "%"), new SetHashGroupByMinReduction()); @@ -681,7 +681,7 @@ protected void generateTaskTree(List> rootTasks, ParseContext pCtx, for (BaseWork w : baseWorkList) { // work should be the smallest unit for memory allocation w.setReservedMemoryMB( - (int)(conf.getLongVar(ConfVars.HIVECONVERTJOINNOCONDITIONALTASKTHRESHOLD) / (1024 * 1024))); + (int)(conf.getLongVar(ConfVars.HIVE_CONVERT_JOIN_NOCONDITIONAL_TASK_THRESHOLD) / (1024 * 1024))); } } @@ -788,13 +788,13 @@ protected void optimizeTaskPlan(List> rootTasks, ParseContext pCtx, PhysicalContext physicalCtx = new PhysicalContext(conf, pCtx, pCtx.getContext(), rootTasks, pCtx.getFetchTask()); - if (conf.getBoolVar(HiveConf.ConfVars.HIVENULLSCANOPTIMIZE)) { + if (conf.getBoolVar(HiveConf.ConfVars.HIVE_NULL_SCAN_OPTIMIZE)) { physicalCtx = new NullScanOptimizer().resolve(physicalCtx); } else { LOG.debug("Skipping null scan query optimization"); } - if (conf.getBoolVar(HiveConf.ConfVars.HIVEMETADATAONLYQUERIES)) { + if (conf.getBoolVar(HiveConf.ConfVars.HIVE_METADATA_ONLY_QUERIES)) { physicalCtx = new MetadataOnlyOptimizer().resolve(physicalCtx); } else { LOG.debug("Skipping metadata only query optimization"); @@ -818,14 +818,14 @@ protected void optimizeTaskPlan(List> rootTasks, ParseContext pCtx, LOG.debug("Skipping vectorization"); } - if (!"none".equalsIgnoreCase(conf.getVar(HiveConf.ConfVars.HIVESTAGEIDREARRANGE))) { + if (!"none".equalsIgnoreCase(conf.getVar(HiveConf.ConfVars.HIVE_STAGE_ID_REARRANGE))) { physicalCtx = new StageIDsRearranger().resolve(physicalCtx); } else { LOG.debug("Skipping stage id rearranger"); } if ((conf.getBoolVar(HiveConf.ConfVars.HIVE_TEZ_ENABLE_MEMORY_MANAGER)) - && (conf.getBoolVar(HiveConf.ConfVars.HIVEUSEHYBRIDGRACEHASHJOIN))) { + && (conf.getBoolVar(HiveConf.ConfVars.HIVE_USE_HYBRIDGRACE_HASHJOIN))) { physicalCtx = new MemoryDecider().resolve(physicalCtx); } @@ -1488,7 +1488,7 @@ private void removeSemiJoinEdgesForUnion(OptimizeTezProcContext procCtx) throws */ private void removeSemijoinsParallelToMapJoin(OptimizeTezProcContext procCtx) throws SemanticException { - if (!procCtx.conf.getBoolVar(ConfVars.HIVECONVERTJOIN)) { + if (!procCtx.conf.getBoolVar(ConfVars.HIVE_CONVERT_JOIN)) { // Not needed without mapjoin conversion return; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverCommonJoin.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverCommonJoin.java index c1b9b27ff481..59f40b203bae 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverCommonJoin.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverCommonJoin.java @@ -169,7 +169,7 @@ protected Task resolveMapJoinTask( Map aliasToKnownSize = ctx.getAliasToKnownSize(); Map, Set> taskToAliases = ctx.getTaskToAliases(); - long threshold = HiveConf.getLongVar(conf, HiveConf.ConfVars.HIVESMALLTABLESFILESIZE); + long threshold = HiveConf.getLongVar(conf, HiveConf.ConfVars.HIVE_SMALL_TABLES_FILESIZE); Long bigTableSize = null; Long smallTablesSize = null; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverMergeFiles.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverMergeFiles.java index 0e6816ae4056..c5aecaa9cae1 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverMergeFiles.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverMergeFiles.java @@ -134,9 +134,9 @@ public List> getTasks(HiveConf conf, Object objCtx) { List> resTsks = new ArrayList>(); // check if a map-reduce job is needed to merge the files // If the current size is smaller than the target, merge - long trgtSize = conf.getLongVar(HiveConf.ConfVars.HIVEMERGEMAPFILESSIZE); + long trgtSize = conf.getLongVar(HiveConf.ConfVars.HIVE_MERGE_MAP_FILES_SIZE); long avgConditionSize = conf - .getLongVar(HiveConf.ConfVars.HIVEMERGEMAPFILESAVGSIZE); + .getLongVar(HiveConf.ConfVars.HIVE_MERGE_MAP_FILES_AVG_SIZE); trgtSize = Math.max(trgtSize, avgConditionSize); Task mvTask = ctx.getListTasks().get(0); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/DynamicValue.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/DynamicValue.java index 04129a6adaa5..033df4e9ceff 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/DynamicValue.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/DynamicValue.java @@ -24,10 +24,7 @@ import org.apache.hadoop.hive.ql.exec.DynamicValueRegistry; import org.apache.hadoop.hive.ql.exec.ObjectCache; import org.apache.hadoop.hive.ql.exec.ObjectCacheFactory; -import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.io.sarg.LiteralDelegate; -import org.apache.hadoop.hive.ql.metadata.HiveException; -import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector; import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; @@ -113,7 +110,7 @@ public Object getValue() { try { // Get object cache - String queryId = HiveConf.getVar(conf, HiveConf.ConfVars.HIVEQUERYID); + String queryId = HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_QUERY_ID); ObjectCache cache = ObjectCacheFactory.getCache(conf, queryId, false, true); if (cache == null) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/MapJoinDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/MapJoinDesc.java index 738ace050600..d5117fe768cf 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/MapJoinDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/MapJoinDesc.java @@ -34,7 +34,6 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.exec.MemoryMonitorInfo; import org.apache.hadoop.hive.ql.exec.persistence.MapJoinObjectSerDeContext; -import org.apache.hadoop.hive.ql.optimizer.signature.Signature; import org.apache.hadoop.hive.ql.plan.Explain.Level; import org.apache.hadoop.hive.ql.plan.Explain.Vectorization; import org.apache.hadoop.hive.ql.plan.VectorMapJoinDesc.HashTableImplementationType; @@ -531,7 +530,7 @@ private VectorizationCondition[] createNativeConditions() { conditionList.add( new VectorizationCondition( vectorMapJoinDesc.getUseOptimizedTable(), - HiveConf.ConfVars.HIVEMAPJOINUSEOPTIMIZEDTABLE.varname)); + HiveConf.ConfVars.HIVE_MAPJOIN_USE_OPTIMIZED_TABLE.varname)); conditionList.add( new VectorizationCondition( enabled, diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java index 61d3da32a3f6..188ec71a57ea 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java @@ -1026,7 +1026,7 @@ public static String stripQuotes(String val) { * @return */ public static String removePrefixFromWarehouseConfig(String origiKey) { - String prefix = SessionState.get().getConf().getVar(HiveConf.ConfVars.METASTOREWAREHOUSE); + String prefix = SessionState.get().getConf().getVar(HiveConf.ConfVars.METASTORE_WAREHOUSE); if ((prefix != null) && (prefix.length() > 0)) { //Local file system is using pfile:/// {@link ProxyLocalFileSystem} prefix = prefix.replace("pfile:///", "pfile:/"); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ppd/ExprWalkerProcFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/ppd/ExprWalkerProcFactory.java index b5fd1814c2d6..90a7ff727a10 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/ppd/ExprWalkerProcFactory.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/ppd/ExprWalkerProcFactory.java @@ -405,7 +405,7 @@ private static void extractFinalCandidates(ExprNodeDesc expr, exprInfo.convertedExpr : expr); return; } else if (!FunctionRegistry.isOpAnd(expr) && - HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVEPPDREMOVEDUPLICATEFILTERS)) { + HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_PPD_REMOVE_DUPLICATE_FILTERS)) { ctx.addNonFinalCandidate(exprInfo != null ? exprInfo.alias : null, expr); } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ppd/OpProcFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/ppd/OpProcFactory.java index fa7f5710f6fd..01bb077daab7 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/ppd/OpProcFactory.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/ppd/OpProcFactory.java @@ -174,7 +174,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, OpWalkerInfo owi = (OpWalkerInfo) procCtx; ExprWalkerInfo childInfo = getChildWalkerInfo((Operator) nd, owi); if (childInfo != null && HiveConf.getBoolVar(owi.getParseContext().getConf(), - HiveConf.ConfVars.HIVEPPDREMOVEDUPLICATEFILTERS)) { + HiveConf.ConfVars.HIVE_PPD_REMOVE_DUPLICATE_FILTERS)) { ExprWalkerInfo unpushedPreds = mergeChildrenPred(nd, owi, null, false); return createFilter((Operator)nd, unpushedPreds, owi); } @@ -214,7 +214,7 @@ private void pushRankLimit(PTFOperator ptfOp, OpWalkerInfo owi) throws SemanticE return; } - float threshold = owi.getParseContext().getConf().getFloatVar(HiveConf.ConfVars.HIVELIMITPUSHDOWNMEMORYUSAGE); + float threshold = owi.getParseContext().getConf().getFloatVar(HiveConf.ConfVars.HIVE_LIMIT_PUSHDOWN_MEMORY_USAGE); if (threshold <= 0 || threshold >= 1) { return; } @@ -356,7 +356,7 @@ private void pushRankLimitToRedSink(PTFOperator ptfOp, HiveConf conf, int rLimit return; } - float threshold = conf.getFloatVar(HiveConf.ConfVars.HIVELIMITPUSHDOWNMEMORYUSAGE); + float threshold = conf.getFloatVar(HiveConf.ConfVars.HIVE_LIMIT_PUSHDOWN_MEMORY_USAGE); ReduceSinkOperator rSink = (ReduceSinkOperator) gP; ReduceSinkDesc rDesc = rSink.getConf(); @@ -392,7 +392,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, Object o = super.process(nd, stack, procCtx, nodeOutputs); OpWalkerInfo owi = (OpWalkerInfo) procCtx; if (HiveConf.getBoolVar(owi.getParseContext().getConf(), - HiveConf.ConfVars.HIVEPPDREMOVEDUPLICATEFILTERS)) { + HiveConf.ConfVars.HIVE_PPD_REMOVE_DUPLICATE_FILTERS)) { // The lateral view join is allowed to have a filter pushed through it. // We need to remove the filter candidate here once it has been applied. // If we do not remove it here, the candidates will be cleared out through @@ -440,7 +440,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, TableScanOperator tsOp = (TableScanOperator) nd; mergeWithChildrenPred(tsOp, owi, null, null); if (HiveConf.getBoolVar(owi.getParseContext().getConf(), - HiveConf.ConfVars.HIVEPPDREMOVEDUPLICATEFILTERS)) { + HiveConf.ConfVars.HIVE_PPD_REMOVE_DUPLICATE_FILTERS)) { // remove all the candidate filter operators // when we get to the TS removeAllCandidates(owi); @@ -495,7 +495,7 @@ Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, logExpr(nd, ewi); owi.putPrunedPreds((Operator) nd, ewi); if (HiveConf.getBoolVar(owi.getParseContext().getConf(), - HiveConf.ConfVars.HIVEPPDREMOVEDUPLICATEFILTERS)) { + HiveConf.ConfVars.HIVE_PPD_REMOVE_DUPLICATE_FILTERS)) { // add this filter for deletion, if it does not have non-final candidates owi.addCandidateFilterOp((FilterOperator)op); Map> residual = ewi.getResidualPredicates(true); @@ -505,7 +505,7 @@ Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, // merge it with children predicates boolean hasUnpushedPredicates = mergeWithChildrenPred(nd, owi, ewi, null); if (HiveConf.getBoolVar(owi.getParseContext().getConf(), - HiveConf.ConfVars.HIVEPPDREMOVEDUPLICATEFILTERS)) { + HiveConf.ConfVars.HIVE_PPD_REMOVE_DUPLICATE_FILTERS)) { if (hasUnpushedPredicates) { ExprWalkerInfo unpushedPreds = mergeChildrenPred(nd, owi, null, false); return createFilter((Operator)nd, unpushedPreds, owi); @@ -611,7 +611,7 @@ protected Set getAliases(Node nd) throws SemanticException { protected Object handlePredicates(Node nd, ExprWalkerInfo prunePreds, OpWalkerInfo owi) throws SemanticException { if (HiveConf.getBoolVar(owi.getParseContext().getConf(), - HiveConf.ConfVars.HIVEPPDREMOVEDUPLICATEFILTERS)) { + HiveConf.ConfVars.HIVE_PPD_REMOVE_DUPLICATE_FILTERS)) { return createFilter((Operator)nd, prunePreds.getResidualPredicates(true), owi); } return null; @@ -685,7 +685,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, if (operator.getNumChild() == 1 && operator.getChildOperators().get(0) instanceof JoinOperator) { if (HiveConf.getBoolVar(owi.getParseContext().getConf(), - HiveConf.ConfVars.HIVEPPDRECOGNIZETRANSITIVITY)) { + HiveConf.ConfVars.HIVE_PPD_RECOGNIZE_TRANSITIVITY)) { JoinOperator child = (JoinOperator) operator.getChildOperators().get(0); int targetPos = child.getParentOperators().indexOf(operator); applyFilterTransitivity(child, targetPos, owi); @@ -724,7 +724,7 @@ private void applyFilterTransitivity(JoinOperator join, int targetPos, OpWalkerI ExprWalkerInfo rsPreds = owi.getPrunedPreds(target); boolean recogniseColumnEqualities = HiveConf.getBoolVar(owi.getParseContext().getConf(), - HiveConf.ConfVars.HIVEPPD_RECOGNIZE_COLUMN_EQUALITIES); + HiveConf.ConfVars.HIVE_PPD_RECOGNIZE_COLUMN_EQUALITIES); for (int sourcePos = 0; sourcePos < parentOperators.size(); sourcePos++) { ReduceSinkOperator source = (ReduceSinkOperator) parentOperators.get(sourcePos); List sourceKeys = source.getConf().getKeyCols(); @@ -1095,7 +1095,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, Set includes = getQualifiedAliases((Operator) nd, owi); boolean hasUnpushedPredicates = mergeWithChildrenPred(nd, owi, null, includes); if (hasUnpushedPredicates && HiveConf.getBoolVar(owi.getParseContext().getConf(), - HiveConf.ConfVars.HIVEPPDREMOVEDUPLICATEFILTERS)) { + HiveConf.ConfVars.HIVE_PPD_REMOVE_DUPLICATE_FILTERS)) { if (includes != null || nd instanceof ReduceSinkOperator) { owi.getCandidateFilterOps().clear(); } else { @@ -1257,7 +1257,7 @@ protected static Object createFilter(Operator op, boolean pushFilterToStorage; HiveConf hiveConf = owi.getParseContext().getConf(); pushFilterToStorage = - hiveConf.getBoolVar(HiveConf.ConfVars.HIVEOPTPPD_STORAGE); + hiveConf.getBoolVar(HiveConf.ConfVars.HIVE_OPT_PPD_STORAGE); if (pushFilterToStorage) { condn = pushFilterToStorageHandler( (TableScanOperator) op, @@ -1288,7 +1288,7 @@ protected static Object createFilter(Operator op, } if (HiveConf.getBoolVar(owi.getParseContext().getConf(), - HiveConf.ConfVars.HIVEPPDREMOVEDUPLICATEFILTERS)) { + HiveConf.ConfVars.HIVE_PPD_REMOVE_DUPLICATE_FILTERS)) { // remove the candidate filter ops removeCandidates(op, owi); } @@ -1326,7 +1326,7 @@ private static ExprNodeGenericFuncDesc pushFilterToStorageHandler( TableScanDesc tableScanDesc = tableScanOp.getConf(); Table tbl = tableScanDesc.getTableMetadata(); - if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVEOPTINDEXFILTER)) { + if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVE_OPT_INDEX_FILTER)) { // attach the original predicate to the table scan operator for index // optimizations that require the pushed predicate before pcr & later // optimizations are applied diff --git a/ql/src/java/org/apache/hadoop/hive/ql/processors/SetProcessor.java b/ql/src/java/org/apache/hadoop/hive/ql/processors/SetProcessor.java index fbaabe06f258..87547116fed1 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/processors/SetProcessor.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/processors/SetProcessor.java @@ -234,7 +234,7 @@ public Map getHiveVariable() { return ss.getHiveVariables(); } }).substitute(conf, varValue); - if (conf.getBoolVar(HiveConf.ConfVars.HIVECONFVALIDATION)) { + if (conf.getBoolVar(HiveConf.ConfVars.HIVE_CONF_VALIDATION)) { HiveConf.ConfVars confVars = HiveConf.getConfVars(key); if (confVars != null) { if (!confVars.isType(value)) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/scheduled/ScheduledQueryExecutionService.java b/ql/src/java/org/apache/hadoop/hive/ql/scheduled/ScheduledQueryExecutionService.java index d3e6a4722c27..cfa50d5e6369 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/scheduled/ScheduledQueryExecutionService.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/scheduled/ScheduledQueryExecutionService.java @@ -227,7 +227,7 @@ private void processQuery(ScheduledQueryPollResponse q) { conf.set(Constants.SCHEDULED_QUERY_SCHEDULENAME, q.getScheduleKey().getScheduleName()); conf.set(Constants.SCHEDULED_QUERY_USER, q.getUser()); conf.set(Constants.SCHEDULED_QUERY_EXECUTIONID, Long.toString(q.getExecutionId())); - conf.unset(HiveConf.ConfVars.HIVESESSIONID.varname); + conf.unset(HiveConf.ConfVars.HIVE_SESSION_ID.varname); state = new SessionState(conf, q.getUser()); state.setIsHiveServerQuery(true); SessionState.start(state); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/fallback/FallbackHiveAuthorizer.java b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/fallback/FallbackHiveAuthorizer.java index 71a0d22bd6b2..eac288bd88d2 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/fallback/FallbackHiveAuthorizer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/fallback/FallbackHiveAuthorizer.java @@ -221,15 +221,15 @@ public void applyAuthorizationConfigPolicy(HiveConf hiveConf) throws HiveAuthzPl if (sessionCtx.getClientType() == HiveAuthzSessionContext.CLIENT_TYPE.HIVESERVER2 && hiveConf.getBoolVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_ENABLED)) { - // Configure PREEXECHOOKS with DisallowTransformHook to disallow transform queries - String hooks = hiveConf.getVar(HiveConf.ConfVars.PREEXECHOOKS).trim(); + // Configure PRE_EXEC_HOOKS with DisallowTransformHook to disallow transform queries + String hooks = hiveConf.getVar(HiveConf.ConfVars.PRE_EXEC_HOOKS).trim(); if (hooks.isEmpty()) { hooks = DisallowTransformHook.class.getName(); } else { hooks = hooks + "," + DisallowTransformHook.class.getName(); } LOG.debug("Configuring hooks : " + hooks); - hiveConf.setVar(HiveConf.ConfVars.PREEXECHOOKS, hooks); + hiveConf.setVar(HiveConf.ConfVars.PRE_EXEC_HOOKS, hooks); SettableConfigUpdater.setHiveConfWhiteList(hiveConf); String curBlackList = hiveConf.getVar(HiveConf.ConfVars.HIVE_SERVER2_BUILTIN_UDF_BLACKLIST); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLStdHiveAccessController.java b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLStdHiveAccessController.java index 12ebba63acb4..f1203dc36610 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLStdHiveAccessController.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLStdHiveAccessController.java @@ -628,15 +628,15 @@ public void applyAuthorizationConfigPolicy(HiveConf hiveConf) throws HiveAuthzPl if (sessionCtx.getClientType() == CLIENT_TYPE.HIVESERVER2 && hiveConf.getBoolVar(ConfVars.HIVE_AUTHORIZATION_ENABLED)) { - // Configure PREEXECHOOKS with DisallowTransformHook to disallow transform queries - String hooks = hiveConf.getVar(ConfVars.PREEXECHOOKS).trim(); + // Configure PRE_EXEC_HOOKS with DisallowTransformHook to disallow transform queries + String hooks = hiveConf.getVar(ConfVars.PRE_EXEC_HOOKS).trim(); if (hooks.isEmpty()) { hooks = DisallowTransformHook.class.getName(); } else { hooks = hooks + "," + DisallowTransformHook.class.getName(); } LOG.debug("Configuring hooks : " + hooks); - hiveConf.setVar(ConfVars.PREEXECHOOKS, hooks); + hiveConf.setVar(ConfVars.PRE_EXEC_HOOKS, hooks); SettableConfigUpdater.setHiveConfWhiteList(hiveConf); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/session/ClearDanglingScratchDir.java b/ql/src/java/org/apache/hadoop/hive/ql/session/ClearDanglingScratchDir.java index 62105dcec5a6..576a38d19603 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/session/ClearDanglingScratchDir.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/session/ClearDanglingScratchDir.java @@ -19,11 +19,9 @@ import java.io.File; import java.io.IOException; -import java.io.OutputStream; import java.util.ArrayList; import java.util.List; -import com.google.common.annotations.VisibleForTesting; import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.GnuParser; import org.apache.commons.cli.HelpFormatter; @@ -101,7 +99,7 @@ public static void main(String[] args) throws Exception { if (cli.hasOption("s")) { rootHDFSDir = cli.getOptionValue("s"); } else { - rootHDFSDir = HiveConf.getVar(conf, HiveConf.ConfVars.SCRATCHDIR); + rootHDFSDir = HiveConf.getVar(conf, HiveConf.ConfVars.SCRATCH_DIR); } ClearDanglingScratchDir clearDanglingScratchDirMain = new ClearDanglingScratchDir(dryRun, verbose, true, rootHDFSDir, conf); @@ -186,7 +184,7 @@ public void run() { return; } consoleMessage("Removing " + scratchDirToRemove.size() + " scratch directories"); - String localTmpDir = HiveConf.getVar(conf, HiveConf.ConfVars.LOCALSCRATCHDIR); + String localTmpDir = HiveConf.getVar(conf, HiveConf.ConfVars.LOCAL_SCRATCH_DIR); for (Path scratchDir : scratchDirToRemove) { if (dryRun) { System.out.println(scratchDir); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java b/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java index cb7ed48a58a5..3ba5fb3361f1 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java @@ -391,7 +391,7 @@ public void deleteTmpErrOutputFile() { public boolean getIsSilent() { if(sessionConf != null) { - return sessionConf.getBoolVar(HiveConf.ConfVars.HIVESESSIONSILENT); + return sessionConf.getBoolVar(HiveConf.ConfVars.HIVE_SESSION_SILENT); } else { return isSilent; } @@ -407,7 +407,7 @@ public boolean isHiveServerQuery() { public void setIsSilent(boolean isSilent) { if(sessionConf != null) { - sessionConf.setBoolVar(HiveConf.ConfVars.HIVESESSIONSILENT, isSilent); + sessionConf.setBoolVar(HiveConf.ConfVars.HIVE_SESSION_SILENT, isSilent); } this.isSilent = isSilent; } @@ -463,13 +463,13 @@ public SessionState(HiveConf conf, String userName, CleanupService cleanupServic if (LOG.isDebugEnabled()) { LOG.debug("SessionState user: " + userName); } - isSilent = conf.getBoolVar(HiveConf.ConfVars.HIVESESSIONSILENT); + isSilent = conf.getBoolVar(HiveConf.ConfVars.HIVE_SESSION_SILENT); resourceMaps = new ResourceMaps(); // Must be deterministic order map for consistent q-test output across Java versions overriddenConfigurations = new LinkedHashMap(); // if there isn't already a session name, go ahead and create it. - if (StringUtils.isEmpty(conf.getVar(HiveConf.ConfVars.HIVESESSIONID))) { - conf.setVar(HiveConf.ConfVars.HIVESESSIONID, makeSessionId()); + if (StringUtils.isEmpty(conf.getVar(HiveConf.ConfVars.HIVE_SESSION_ID))) { + conf.setVar(HiveConf.ConfVars.HIVE_SESSION_ID, makeSessionId()); getConsole().printInfo("Hive Session ID = " + getSessionId()); } // Using system classloader as the parent. Using thread context @@ -500,7 +500,7 @@ public void setHiveVariables(Map hiveVariables) { } public String getSessionId() { - return (sessionConf.getVar(HiveConf.ConfVars.HIVESESSIONID)); + return (sessionConf.getVar(HiveConf.ConfVars.HIVE_SESSION_ID)); } public void updateThreadName() { @@ -781,14 +781,14 @@ private void createSessionDirs(String userName) throws IOException { HiveConf conf = getConf(); Path rootHDFSDirPath = createRootHDFSDir(conf); // Now create session specific dirs - String scratchDirPermission = HiveConf.getVar(conf, HiveConf.ConfVars.SCRATCHDIRPERMISSION); + String scratchDirPermission = HiveConf.getVar(conf, HiveConf.ConfVars.SCRATCH_DIR_PERMISSION); Path path; // 1. HDFS scratch dir path = new Path(rootHDFSDirPath, userName); hdfsScratchDirURIString = path.toUri().toString(); createPath(conf, path, scratchDirPermission, false, false); // 2. Local scratch dir - path = new Path(HiveConf.getVar(conf, HiveConf.ConfVars.LOCALSCRATCHDIR)); + path = new Path(HiveConf.getVar(conf, HiveConf.ConfVars.LOCAL_SCRATCH_DIR)); createPath(conf, path, scratchDirPermission, true, false); // 3. Download resources dir path = new Path(HiveConf.getVar(conf, HiveConf.ConfVars.DOWNLOADED_RESOURCES_DIR)); @@ -812,7 +812,7 @@ private void createSessionDirs(String userName) throws IOException { hdfsSessionPathLockFile = fs.create(new Path(hdfsSessionPath, LOCK_FILE_NAME), true); } // 6. Local session path - localSessionPath = new Path(HiveConf.getVar(conf, HiveConf.ConfVars.LOCALSCRATCHDIR), sessionId); + localSessionPath = new Path(HiveConf.getVar(conf, HiveConf.ConfVars.LOCAL_SCRATCH_DIR), sessionId); createPath(conf, localSessionPath, scratchDirPermission, true, true); conf.set(LOCAL_SESSION_PATH_KEY, localSessionPath.toUri().toString()); // 7. HDFS temp table space @@ -837,7 +837,7 @@ private void createSessionDirs(String userName) throws IOException { * @throws IOException */ private Path createRootHDFSDir(HiveConf conf) throws IOException { - Path rootHDFSDirPath = new Path(HiveConf.getVar(conf, HiveConf.ConfVars.SCRATCHDIR)); + Path rootHDFSDirPath = new Path(HiveConf.getVar(conf, HiveConf.ConfVars.SCRATCH_DIR)); Utilities.ensurePathIsWritable(rootHDFSDirPath, conf); return rootHDFSDirPath; } @@ -1077,8 +1077,8 @@ public void setActiveAuthorizer(Object authorizer) { * @throws IOException */ private static File createTempFile(HiveConf conf) throws IOException { - String lScratchDir = HiveConf.getVar(conf, HiveConf.ConfVars.LOCALSCRATCHDIR); - String sessionID = conf.getVar(HiveConf.ConfVars.HIVESESSIONID); + String lScratchDir = HiveConf.getVar(conf, HiveConf.ConfVars.LOCAL_SCRATCH_DIR); + String sessionID = conf.getVar(HiveConf.ConfVars.HIVE_SESSION_ID); return FileUtils.createTempFile(lScratchDir, sessionID, ".pipeout"); } @@ -1462,10 +1462,10 @@ public void loadAuxJars() throws IOException { public void loadReloadableAuxJars() throws IOException { LOG.info("Reloading auxiliary JAR files"); - final String renewableJarPath = sessionConf.getVar(ConfVars.HIVERELOADABLEJARS); + final String renewableJarPath = sessionConf.getVar(ConfVars.HIVE_RELOADABLE_JARS); // do nothing if this property is not specified or empty if (StringUtils.isBlank(renewableJarPath)) { - LOG.warn("Configuration {} not specified", ConfVars.HIVERELOADABLEJARS); + LOG.warn("Configuration {} not specified", ConfVars.HIVE_RELOADABLE_JARS); return; } @@ -2113,7 +2113,7 @@ public void setupQueryCurrentTimestamp() { // Provide a facility to set current timestamp during tests if (sessionConf.getBoolVar(ConfVars.HIVE_IN_TEST)) { String overrideTimestampString = - HiveConf.getVar(sessionConf, HiveConf.ConfVars.HIVETESTCURRENTTIMESTAMP, (String)null); + HiveConf.getVar(sessionConf, HiveConf.ConfVars.HIVE_TEST_CURRENT_TIMESTAMP, (String)null); if (overrideTimestampString != null && overrideTimestampString.length() > 0) { TimestampTZ zonedDateTime = TimestampTZUtil.convert( Timestamp.valueOf(overrideTimestampString), sessionConf.getLocalTimeZone()); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsNoJobTask.java b/ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsNoJobTask.java index 723f64f594c7..244d5cd0b710 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsNoJobTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsNoJobTask.java @@ -231,7 +231,7 @@ public void run() { } ThreadPoolExecutor tpE = null; List> futures = null; - int numThreadsFactor = HiveConf.getIntVar(jc, HiveConf.ConfVars.BASICSTATSTASKSMAXTHREADSFACTOR); + int numThreadsFactor = HiveConf.getIntVar(jc, HiveConf.ConfVars.BASIC_STATS_TASKS_MAX_THREADS_FACTOR); if (fileList.size() > 1 && numThreadsFactor > 0) { int numThreads = Math.min(fileList.size(), numThreadsFactor * Runtime.getRuntime().availableProcessors()); ThreadFactory threadFactory = new ThreadFactoryBuilder().setNameFormat("Basic-Stats-Thread-%d").build(); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/stats/ColStatsProcessor.java b/ql/src/java/org/apache/hadoop/hive/ql/stats/ColStatsProcessor.java index e2777a128bc4..e6f945bc864f 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/stats/ColStatsProcessor.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/stats/ColStatsProcessor.java @@ -148,7 +148,7 @@ private boolean constructColumnStatsFromPackedRows(Table tbl, List op : baseWork.getAllOperators()) { String operatorId = op.getOperatorId(); OperatorStats operatorStats = null; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java index 1ba231546b46..239f57b69b3e 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java @@ -605,7 +605,7 @@ public static ColStatistics getColStatsForPartCol(ColumnInfo ci,PartitionIterabl partCS.setAvgColLen(StatsUtils.getAvgColLenOf(conf, ci.getObjectInspector(), partCS.getColumnType())); partCS.setRange(getRangePartitionColumn(partList, ci.getInternalName(), - ci.getType().getTypeName(), conf.getVar(ConfVars.DEFAULTPARTITIONNAME))); + ci.getType().getTypeName(), conf.getVar(ConfVars.DEFAULT_PARTITION_NAME))); return partCS; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/MRCompactor.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/MRCompactor.java index 5bf1c0bffb14..3443ea204558 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/MRCompactor.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/MRCompactor.java @@ -425,7 +425,7 @@ private void setColumnTypes(JobConf job, List cols) { } job.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS, colNames.toString()); job.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS_TYPES, colTypes.toString()); - HiveConf.setVar(job, HiveConf.ConfVars.HIVEINPUTFORMAT, HiveInputFormat.class.getName()); + HiveConf.setVar(job, HiveConf.ConfVars.HIVE_INPUT_FORMAT, HiveInputFormat.class.getName()); } public JobConf getMrJob() { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/service/CompactionService.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/service/CompactionService.java index 814ed9cd046b..9d1885ad9c3a 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/service/CompactionService.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/service/CompactionService.java @@ -68,8 +68,8 @@ protected boolean isDynPartAbort(Table t, CompactionInfo ci) { } protected void failCompactionIfSetForTest() { - if(conf.getBoolVar(HiveConf.ConfVars.HIVE_IN_TEST) && conf.getBoolVar(HiveConf.ConfVars.HIVETESTMODEFAILCOMPACTION)) { - throw new RuntimeException(HiveConf.ConfVars.HIVETESTMODEFAILCOMPACTION.name() + "=true"); + if(conf.getBoolVar(HiveConf.ConfVars.HIVE_IN_TEST) && conf.getBoolVar(HiveConf.ConfVars.HIVE_TEST_MODE_FAIL_COMPACTION)) { + throw new RuntimeException(HiveConf.ConfVars.HIVE_TEST_MODE_FAIL_COMPACTION.name() + "=true"); } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFRegExp.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFRegExp.java index 8522abd0ee68..63fbcc0e7af1 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFRegExp.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFRegExp.java @@ -64,7 +64,7 @@ public class GenericUDFRegExp extends GenericUDF { @Override public void configure(MapredContext context) { if (context != null) { - if(HiveConf.getBoolVar(context.getJobConf(), HiveConf.ConfVars.HIVEUSEGOOGLEREGEXENGINE)){ + if(HiveConf.getBoolVar(context.getJobConf(), HiveConf.ConfVars.HIVE_USE_GOOGLE_REGEX_ENGINE)){ this.useGoogleRegexEngine=true; } } @@ -75,7 +75,7 @@ public void configure(MapredContext context) { public ObjectInspector initialize(ObjectInspector[] arguments) throws UDFArgumentException { SessionState ss = SessionState.get(); if (ss != null) { - this.useGoogleRegexEngine = ss.getConf().getBoolVar(HiveConf.ConfVars.HIVEUSEGOOGLEREGEXENGINE); + this.useGoogleRegexEngine = ss.getConf().getBoolVar(HiveConf.ConfVars.HIVE_USE_GOOGLE_REGEX_ENGINE); } checkArgsSize(arguments, 2, 2); @@ -93,7 +93,7 @@ public ObjectInspector initialize(ObjectInspector[] arguments) throws UDFArgumen regexConst = getConstantStringValue(arguments, 1); if (regexConst != null) { if(!useGoogleRegexEngine){ - //if(!HiveConf.getVar(hiveConf, HiveConf.ConfVars.HIVEUSEGOOGLEREGEXENGINE)){ + //if(!HiveConf.getVar(hiveConf, HiveConf.ConfVars.HIVE_USE_GOOGLE_REGEX_ENGINE)){ patternConst = Pattern.compile(regexConst); }else{ patternConstR2j = com.google.re2j.Pattern.compile(regexConst); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDTFGetSplits.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDTFGetSplits.java index 97d04676e76b..b1c3b767deca 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDTFGetSplits.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDTFGetSplits.java @@ -238,7 +238,7 @@ protected SplitResult getSplitResult(boolean generateLightWeightSplits) + ConfVars.LLAP_HS2_ENABLE_COORDINATOR.varname + " enabled"); } ApplicationId extClientAppId = coordinator.createExtClientAppId(); - String externalDagName = SessionState.get().getConf().getVar(ConfVars.HIVEQUERYNAME); + String externalDagName = SessionState.get().getConf().getVar(ConfVars.HIVE_QUERY_NAME); StringBuilder sb = new StringBuilder(); sb.append("Generated appID ").append(extClientAppId.toString()).append(" for LLAP splits"); @@ -271,8 +271,8 @@ private PlanFragment createPlanFragment(String query, ApplicationId splitsAppId) throws HiveException { HiveConf conf = new HiveConf(SessionState.get().getConf()); - HiveConf.setVar(conf, ConfVars.HIVEFETCHTASKCONVERSION, "none"); - HiveConf.setVar(conf, ConfVars.HIVEQUERYRESULTFILEFORMAT, PlanUtils.LLAP_OUTPUT_FORMAT_KEY); + HiveConf.setVar(conf, ConfVars.HIVE_FETCH_TASK_CONVERSION, "none"); + HiveConf.setVar(conf, ConfVars.HIVE_QUERY_RESULT_FILEFORMAT, PlanUtils.LLAP_OUTPUT_FORMAT_KEY); String originalMode = HiveConf.getVar(conf, ConfVars.HIVE_EXECUTION_MODE); @@ -451,7 +451,7 @@ private SplitResult getSplits(JobConf job, TezWork work, Schema schema, Applicat // Update the queryId to use the generated extClientAppId. See comment below about // why this is done. - HiveConf.setVar(wxConf, HiveConf.ConfVars.HIVEQUERYID, extClientAppId.toString()); + HiveConf.setVar(wxConf, HiveConf.ConfVars.HIVE_QUERY_ID, extClientAppId.toString()); Vertex wx = utils.createVertex(wxConf, mapWork, scratchDir, work, DagUtils.createTezLrMap(appJarLr, null)); String vertexName = wx.getName(); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/WindowingTableFunction.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/WindowingTableFunction.java index 8bf51e5b8c3c..fb342e27153d 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/WindowingTableFunction.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/WindowingTableFunction.java @@ -213,7 +213,7 @@ private boolean streamingPossible(Configuration cfg, WindowFunctionDef wFnDef) return true; } - int windowLimit = HiveConf.getIntVar(cfg, ConfVars.HIVEJOINCACHESIZE); + int windowLimit = HiveConf.getIntVar(cfg, ConfVars.HIVE_JOIN_CACHE_SIZE); if (windowLimit < (start.getAmt() + end.getAmt() + 1)) { return false; @@ -271,7 +271,7 @@ private int[] setCanAcceptInputAsStream(Configuration cfg) throws HiveException return null; } - int windowLimit = HiveConf.getIntVar(cfg, ConfVars.HIVEJOINCACHESIZE); + int windowLimit = HiveConf.getIntVar(cfg, ConfVars.HIVE_JOIN_CACHE_SIZE); if (windowLimit < (endPos - startPos + 1)) { return null; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/util/HiveStrictManagedMigration.java b/ql/src/java/org/apache/hadoop/hive/ql/util/HiveStrictManagedMigration.java index 1dd9d8bf9db9..1d4b8fa22d03 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/util/HiveStrictManagedMigration.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/util/HiveStrictManagedMigration.java @@ -574,7 +574,7 @@ private void readControlConfigs(FileSystem fs, Path path) { }); if (runOptions.shouldModifyManagedTableLocation || runOptions.shouldMoveExternal) { Configuration oldConf = new Configuration(conf); - HiveConf.setVar(oldConf, HiveConf.ConfVars.METASTOREWAREHOUSE, runOptions.oldWarehouseRoot); + HiveConf.setVar(oldConf, HiveConf.ConfVars.METASTORE_WAREHOUSE, runOptions.oldWarehouseRoot); oldWh = ThreadLocal.withInitial(() -> { try { @@ -650,7 +650,7 @@ static WarehouseRootCheckResult checkOldWarehouseRoot(RunOptions runOptions, Hiv shouldMoveExternal = false; } else { String currentPathString = shouldModifyManagedTableLocation ? - HiveConf.getVar(conf, HiveConf.ConfVars.METASTOREWAREHOUSE) : + HiveConf.getVar(conf, HiveConf.ConfVars.METASTORE_WAREHOUSE) : HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_METASTORE_WAREHOUSE_EXTERNAL); if (arePathsEqual(conf, runOptions.oldWarehouseRoot, currentPathString)) { LOG.info("oldWarehouseRoot is the same as the target path {}." diff --git a/ql/src/java/org/apache/hadoop/hive/ql/util/UpgradeTool.java b/ql/src/java/org/apache/hadoop/hive/ql/util/UpgradeTool.java index 67df6a7bcec4..8871bb54e349 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/util/UpgradeTool.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/util/UpgradeTool.java @@ -175,7 +175,7 @@ private static IMetaStoreClient getHMS(HiveConf conf) { return RetryingMetaStoreClient.getProxy(conf, true); } catch (MetaException e) { throw new RuntimeException("Error connecting to Hive Metastore URI: " - + conf.getVar(HiveConf.ConfVars.METASTOREURIS) + ". " + e.getMessage(), e); + + conf.getVar(HiveConf.ConfVars.METASTORE_URIS) + ". " + e.getMessage(), e); } } /** diff --git a/ql/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandler.java b/ql/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandler.java index 4f9f1d8e2fcb..8de9cbe93e10 100644 --- a/ql/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandler.java +++ b/ql/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandler.java @@ -1469,9 +1469,9 @@ public void showLocks() throws Exception { * 5.1.30 * * 2. Hack in the c'tor of this class - * conf.setVar(HiveConf.ConfVars.METASTORECONNECTURLKEY, "jdbc:mysql://localhost/metastore"); + * conf.setVar(HiveConf.ConfVars.METASTORE_CONNECT_URL_KEY, "jdbc:mysql://localhost/metastore"); * conf.setVar(HiveConf.ConfVars.METASTORE_CONNECTION_USER_NAME, "hive"); - * conf.setVar(HiveConf.ConfVars.METASTOREPWD, "hive"); + * conf.setVar(HiveConf.ConfVars.METASTORE_PWD, "hive"); * conf.setVar(HiveConf.ConfVars.METASTORE_CONNECTION_DRIVER, "com.mysql.jdbc.Driver"); * 3. Remove TxnDbUtil.prepDb(); in TxnHandler.checkQFileTestHack() * diff --git a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands.java b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands.java index 1e2fe3303afe..46c8e8244564 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands.java @@ -65,7 +65,6 @@ import org.apache.hadoop.hive.metastore.api.ShowLocksResponse; import org.apache.hadoop.hive.metastore.api.TxnInfo; import org.apache.hadoop.hive.metastore.api.TxnState; -import org.apache.hadoop.hive.metastore.api.AbortCompactResponse; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.metastore.txn.AcidHouseKeeperService; import org.apache.hadoop.hive.metastore.utils.TestTxnDbUtil; @@ -123,7 +122,7 @@ void initHiveConf() { //TestTxnCommandsWithSplitUpdateAndVectorization has the vectorized version //of these tests. HiveConf.setBoolVar(hiveConf, HiveConf.ConfVars.HIVE_VECTORIZATION_ENABLED, false); - HiveConf.setVar(hiveConf, HiveConf.ConfVars.DYNAMICPARTITIONINGMODE, "nonstrict"); + HiveConf.setVar(hiveConf, HiveConf.ConfVars.DYNAMIC_PARTITIONING_MODE, "nonstrict"); HiveConf.setBoolVar(hiveConf, HiveConf.ConfVars.HIVE_ACID_DROP_PARTITION_USE_BASE, false); HiveConf.setBoolVar(hiveConf, HiveConf.ConfVars.HIVE_ACID_RENAME_PARTITION_MAKE_COPY, false); HiveConf.setBoolVar(hiveConf, HiveConf.ConfVars.HIVE_ACID_CREATE_TABLE_USE_SUFFIX, false); @@ -163,9 +162,9 @@ public void testInsertOverwrite() throws Exception { List rs = runStatementOnDriver("select a from " + Table.ACIDTBL + " where b = 2"); Assert.assertEquals(1, rs.size()); Assert.assertEquals("1", rs.get(0)); - hiveConf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEROLLBACKTXN, true); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_TEST_MODE_ROLLBACK_TXN, true); runStatementOnDriver("insert overwrite table " + Table.ACIDTBL + " values(3,2)"); - hiveConf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEROLLBACKTXN, false); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_TEST_MODE_ROLLBACK_TXN, false); runStatementOnDriver("insert into " + Table.ACIDTBL + " values(5,6)"); rs = runStatementOnDriver("select a from " + Table.ACIDTBL + " order by a"); Assert.assertEquals(2, rs.size()); @@ -1463,9 +1462,9 @@ public void testMoreBucketsThanReducers() throws Exception { // todo: try using set VerifyNumReducersHook.num.reducers=10; d.destroy(); HiveConf hc = new HiveConf(hiveConf); - hc.setIntVar(HiveConf.ConfVars.MAXREDUCERS, 1); + hc.setIntVar(HiveConf.ConfVars.MAX_REDUCERS, 1); //this is used in multiple places, SemanticAnalyzer.getBucketingSortingDest() among others - hc.setIntVar(HiveConf.ConfVars.HADOOPNUMREDUCERS, 1); + hc.setIntVar(HiveConf.ConfVars.HADOOP_NUM_REDUCERS, 1); hc.setBoolVar(HiveConf.ConfVars.HIVE_EXPLAIN_USER, false); d = new Driver(hc); d.setMaxRows(10000); @@ -1483,9 +1482,9 @@ public void testMoreBucketsThanReducers2() throws Exception { //see bucket_num_reducers.q bucket_num_reducers2.q d.destroy(); HiveConf hc = new HiveConf(hiveConf); - hc.setIntVar(HiveConf.ConfVars.MAXREDUCERS, 2); + hc.setIntVar(HiveConf.ConfVars.MAX_REDUCERS, 2); //this is used in multiple places, SemanticAnalyzer.getBucketingSortingDest() among others - hc.setIntVar(HiveConf.ConfVars.HADOOPNUMREDUCERS, 2); + hc.setIntVar(HiveConf.ConfVars.HADOOP_NUM_REDUCERS, 2); d = new Driver(hc); d.setMaxRows(10000); runStatementOnDriver("create table fourbuckets (a int, b int) clustered by (a) into 4 buckets stored as orc TBLPROPERTIES ('transactional'='true')"); @@ -2589,15 +2588,15 @@ public void testFetchTaskCachingWithConversion() throws Exception { runStatementOnDriver("insert into table fetch_task_table values (1,2), (3,4), (5,6)"); List expectedRes = runStatementOnDriver("select * from fetch_task_table"); - hiveConf.setBoolVar(HiveConf.ConfVars.HIVEFETCHTASKCACHING, true); - hiveConf.setVar(HiveConf.ConfVars.HIVEFETCHTASKCONVERSION, "none"); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_FETCH_TASK_CACHING, true); + hiveConf.setVar(HiveConf.ConfVars.HIVE_FETCH_TASK_CONVERSION, "none"); d.run("select * from fetch_task_table"); Assert.assertFalse(d.getFetchTask().isCachingEnabled()); d.getFetchTask().fetch(actualRes); Assert.assertEquals(actualRes, expectedRes); actualRes.clear(); - hiveConf.setVar(HiveConf.ConfVars.HIVEFETCHTASKCONVERSION, "more"); + hiveConf.setVar(HiveConf.ConfVars.HIVE_FETCH_TASK_CONVERSION, "more"); d.run("select * from fetch_task_table"); Assert.assertTrue(d.getFetchTask().isCachingEnabled()); d.getFetchTask().fetch(actualRes); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java index b96ec876db20..9b2edfa10f57 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java @@ -159,7 +159,7 @@ void initHiveConf() { HiveConf.setBoolVar(hiveConf, HiveConf.ConfVars.HIVE_VECTORIZATION_ENABLED, false); //TestTxnCommands2WithAbortCleanupUsingCompactionCycle has the tests with abort cleanup in compaction cycle MetastoreConf.setBoolVar(hiveConf, MetastoreConf.ConfVars.COMPACTOR_CLEAN_ABORTS_USING_CLEANER, true); - HiveConf.setBoolVar(hiveConf, HiveConf.ConfVars.HIVEOPTIMIZEMETADATAQUERIES, false); + HiveConf.setBoolVar(hiveConf, HiveConf.ConfVars.HIVE_OPTIMIZE_METADATA_QUERIES, false); HiveConf.setBoolVar(hiveConf, HiveConf.ConfVars.HIVE_ACID_TRUNCATE_USE_BASE, false); } @@ -204,8 +204,8 @@ public void testOrcNoPPD() throws Exception { * @throws Exception */ private void testOrcPPD(boolean enablePPD) throws Exception { - boolean originalPpd = hiveConf.getBoolVar(HiveConf.ConfVars.HIVEOPTINDEXFILTER); - hiveConf.setBoolVar(HiveConf.ConfVars.HIVEOPTINDEXFILTER, enablePPD);//enables ORC PPD + boolean originalPpd = hiveConf.getBoolVar(HiveConf.ConfVars.HIVE_OPT_INDEX_FILTER); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_OPT_INDEX_FILTER, enablePPD);//enables ORC PPD //create delta_0001_0001_0000 (should push predicate here) runStatementOnDriver("insert into " + Table.ACIDTBL + "(a,b) " + makeValuesClause(new int[][]{{1, 2}, {3, 4}})); List explain; @@ -263,7 +263,7 @@ private void testOrcPPD(boolean enablePPD) throws Exception { List rs1 = runStatementOnDriver(query); int [][] resultData = new int[][] {{3, 5}, {5, 6}, {9, 10}}; Assert.assertEquals("Update failed", stringifyValues(resultData), rs1); - hiveConf.setBoolVar(HiveConf.ConfVars.HIVEOPTINDEXFILTER, originalPpd); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_OPT_INDEX_FILTER, originalPpd); } static void assertExplainHasString(String string, List queryPlan, String errMsg) { @@ -1076,15 +1076,15 @@ public void testValidTxnsBookkeeping() throws Exception { @Test public void testSimpleRead() throws Exception { - hiveConf.setVar(HiveConf.ConfVars.HIVEFETCHTASKCONVERSION, "more"); + hiveConf.setVar(HiveConf.ConfVars.HIVE_FETCH_TASK_CONVERSION, "more"); int[][] tableData = {{1,2},{3,3}}; runStatementOnDriver("insert into " + Table.ACIDTBL + " " + makeValuesClause(tableData)); int[][] tableData2 = {{5,3}}; //this will cause next txn to be marked aborted but the data is still written to disk - hiveConf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEROLLBACKTXN, true); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_TEST_MODE_ROLLBACK_TXN, true); runStatementOnDriver("insert into " + Table.ACIDTBL + " " + makeValuesClause(tableData2)); assert hiveConf.get(ValidTxnWriteIdList.VALID_TABLES_WRITEIDS_KEY) == null : "previous txn should've cleaned it"; - //so now if HIVEFETCHTASKCONVERSION were to use a stale value, it would use a + //so now if HIVE_FETCH_TASK_CONVERSION were to use a stale value, it would use a //ValidWriteIdList with HWM=MAX_LONG, i.e. include the data for aborted txn List rs = runStatementOnDriver("select * from " + Table.ACIDTBL); Assert.assertEquals("Extra data", 2, rs.size()); @@ -1225,7 +1225,7 @@ void testInitiatorWithMultipleFailedCompactionsForVariousTblProperties(String tb //generate enough delta files so that Initiator can trigger auto compaction runStatementOnDriver("insert into " + tblName + " values(" + (i + 1) + ", 'foo'),(" + (i + 2) + ", 'bar'),(" + (i + 3) + ", 'baz')"); } - hiveConf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEFAILCOMPACTION, true); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_TEST_MODE_FAIL_COMPACTION, true); MetastoreConf.setBoolVar(hiveConf, MetastoreConf.ConfVars.COMPACTOR_INITIATOR_ON, true); MetastoreConf.setBoolVar(hiveConf, MetastoreConf.ConfVars.COMPACTOR_CLEANER_ON, true); @@ -1267,7 +1267,7 @@ void testInitiatorWithMultipleFailedCompactionsForVariousTblProperties(String tb MetastoreConf.getIntVar(hiveConf, MetastoreConf.ConfVars.COMPACTOR_HISTORY_RETENTION_FAILED) + MetastoreConf.getIntVar(hiveConf, MetastoreConf.ConfVars.COMPACTOR_HISTORY_RETENTION_DID_NOT_INITIATE)), countCompacts(txnHandler)); - hiveConf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEFAILCOMPACTION, false); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_TEST_MODE_FAIL_COMPACTION, false); txnHandler.compact(new CompactionRequest("default", tblName, CompactionType.MINOR)); //at this point "show compactions" should have (COMPACTOR_HISTORY_RETENTION_FAILED) failed + 1 initiated (explicitly by user) checkCompactionState(new CompactionsByState( @@ -1563,9 +1563,9 @@ private void writeBetweenWorkerAndCleanerForVariousTblProperties(String tblPrope runWorker(hiveConf); //delete something, but make sure txn is rolled back - hiveConf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEROLLBACKTXN, true); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_TEST_MODE_ROLLBACK_TXN, true); runStatementOnDriver("delete from " + tblName + " where a = 1"); - hiveConf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEROLLBACKTXN, false); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_TEST_MODE_ROLLBACK_TXN, false); List expected = new ArrayList<>(); expected.add("1\tfoo"); @@ -1622,7 +1622,7 @@ private void writeBetweenWorkerAndCleanerForVariousTblProperties(String tblPrope public void testFailHeartbeater() throws Exception { // Fail heartbeater, so that we can get a RuntimeException from the query. // More specifically, it's the original IOException thrown by either MR's or Tez's progress monitoring loop. - hiveConf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEFAILHEARTBEATER, true); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_TEST_MODE_FAIL_HEARTBEATER, true); Exception exception = null; try { runStatementOnDriver("insert into " + Table.ACIDTBL + "(a,b) " + makeValuesClause(new int[][]{{1, 2}, {3, 4}})); @@ -1630,7 +1630,7 @@ public void testFailHeartbeater() throws Exception { exception = e; } Assert.assertNotNull(exception); - Assert.assertTrue(exception.getMessage().contains("HIVETESTMODEFAILHEARTBEATER=true")); + Assert.assertTrue(exception.getMessage().contains("HIVE_TEST_MODE_FAIL_HEARTBEATER=true")); } @Test @@ -1692,9 +1692,9 @@ public void testCompactWithDelete() throws Exception { public void testNoHistory() throws Exception { int[][] tableData = {{1,2},{3,4}}; runStatementOnDriver("insert into " + Table.ACIDTBL + "(a,b) " + makeValuesClause(tableData)); - hiveConf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEROLLBACKTXN, true); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_TEST_MODE_ROLLBACK_TXN, true); runStatementOnDriver("insert into " + Table.ACIDTBL + "(a,b) " + makeValuesClause(tableData)); - hiveConf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEROLLBACKTXN, false); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_TEST_MODE_ROLLBACK_TXN, false); runStatementOnDriver("alter table "+ Table.ACIDTBL + " compact 'MAJOR'"); runWorker(hiveConf); @@ -1764,7 +1764,7 @@ protected void testACIDwithSchemaEvolutionForVariousTblProperties(String tblProp } protected void createAbortLowWaterMark() throws Exception{ - hiveConf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEROLLBACKTXN, true); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_TEST_MODE_ROLLBACK_TXN, true); runStatementOnDriver("select * from " + Table.ACIDTBL); // wait for metastore.txn.opentxn.timeout Thread.sleep(1000); @@ -1774,7 +1774,7 @@ protected void createAbortLowWaterMark() throws Exception{ @Test public void testETLSplitStrategyForACID() throws Exception { hiveConf.setVar(HiveConf.ConfVars.HIVE_ORC_SPLIT_STRATEGY, "ETL"); - hiveConf.setBoolVar(HiveConf.ConfVars.HIVEOPTINDEXFILTER, true); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_OPT_INDEX_FILTER, true); runStatementOnDriver("insert into " + Table.ACIDTBL + " values(1,2)"); runStatementOnDriver("alter table " + Table.ACIDTBL + " compact 'MAJOR'"); runWorker(hiveConf); @@ -2437,9 +2437,9 @@ public void testCleanerForTxnToWriteId() throws Exception { int[][] tableData5 = {{5, 6}}; runStatementOnDriver("insert into " + Table.ACIDTBLPART + " partition(p=3) (a,b) " + makeValuesClause(tableData3)); - hiveConf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEROLLBACKTXN, true); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_TEST_MODE_ROLLBACK_TXN, true); runStatementOnDriver("insert into " + Table.ACIDTBL + "(a,b) " + makeValuesClause(tableData4)); - hiveConf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEROLLBACKTXN, false); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_TEST_MODE_ROLLBACK_TXN, false); // Keep an open txn which refers to the aborted txn. Context ctx = new Context(hiveConf); @@ -2448,9 +2448,9 @@ public void testCleanerForTxnToWriteId() throws Exception { txnMgr.getValidTxns(); // Start an INSERT statement transaction and roll back this transaction. - hiveConf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEROLLBACKTXN, true); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_TEST_MODE_ROLLBACK_TXN, true); runStatementOnDriver("insert into " + Table.ACIDTBL + "(a,b) " + makeValuesClause(tableData5)); - hiveConf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEROLLBACKTXN, false); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_TEST_MODE_ROLLBACK_TXN, false); runStatementOnDriver("insert into " + Table.ACIDTBL + "(a,b) " + makeValuesClause(tableData5)); @@ -2505,9 +2505,9 @@ public void testMmTableAbortWithCompaction() throws Exception { Assert.assertEquals("1", r1.get(0)); // 2. Let a transaction be aborted - hiveConf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEROLLBACKTXN, true); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_TEST_MODE_ROLLBACK_TXN, true); runStatementOnDriver("insert into " + Table.MMTBL + "(a,b) values(3,4)"); - hiveConf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEROLLBACKTXN, false); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_TEST_MODE_ROLLBACK_TXN, false); // There should be 1 delta and 1 base directory. The base one is the aborted one. verifyDeltaDirAndResult(2, Table.MMTBL.toString(), "", resultData1); @@ -2541,9 +2541,9 @@ public void testMmTableAbortWithCompaction() throws Exception { // 7. add few more rows runStatementOnDriver("insert into " + Table.MMTBL + "(a,b) values(7,8)"); // 8. add one more aborted delta - hiveConf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEROLLBACKTXN, true); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_TEST_MODE_ROLLBACK_TXN, true); runStatementOnDriver("insert into " + Table.MMTBL + "(a,b) values(9,10)"); - hiveConf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEROLLBACKTXN, false); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_TEST_MODE_ROLLBACK_TXN, false); // 9. Perform a MAJOR compaction, expectation is it should remove aborted base dir runStatementOnDriver("alter table "+ Table.MMTBL + " compact 'MAJOR'"); @@ -2575,9 +2575,9 @@ public void testMmTableAbortWithCompactionNoCleanup() throws Exception { Assert.assertEquals("2", r1.get(0)); // 2. Let a transaction be aborted - hiveConf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEROLLBACKTXN, true); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_TEST_MODE_ROLLBACK_TXN, true); runStatementOnDriver("insert into " + Table.MMTBL + " values(3,4)"); - hiveConf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEROLLBACKTXN, false); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_TEST_MODE_ROLLBACK_TXN, false); // There should be 1 delta and 1 base directory. The base one is the aborted one. verifyDeltaDirAndResult(3, Table.MMTBL.toString(), "", resultData1); r1 = runStatementOnDriver("select count(*) from " + Table.MMTBL); @@ -2597,9 +2597,9 @@ public void testMmTableAbortWithCompactionNoCleanup() throws Exception { // 4. add few more rows runStatementOnDriver("insert into " + Table.MMTBL + "(a,b) values(7,8)"); // 5. add one more aborted delta - hiveConf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEROLLBACKTXN, true); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_TEST_MODE_ROLLBACK_TXN, true); runStatementOnDriver("insert into " + Table.MMTBL + "(a,b) values(9,10)"); - hiveConf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEROLLBACKTXN, false); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_TEST_MODE_ROLLBACK_TXN, false); verifyDeltaDirAndResult(5, Table.MMTBL.toString(), "", resultData3); // 6. Perform a MAJOR compaction, expectation is it should remove aborted delta dir @@ -2629,9 +2629,9 @@ public void testDynPartInsertWithAborts() throws Exception { verifyDeltaDirAndResult(1, Table.ACIDTBLPART.toString(), "p=p1", resultData); // forcing a txn to abort before addDynamicPartitions - hiveConf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEFAILLOADDYNAMICPARTITION, true); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_TEST_MODE_FAIL_LOAD_DYNAMIC_PARTITION, true); runStatementOnDriverWithAbort("insert into " + Table.ACIDTBLPART + " partition(p) values(3,3,'p1'),(4,4,'p1')"); - hiveConf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEFAILLOADDYNAMICPARTITION, false); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_TEST_MODE_FAIL_LOAD_DYNAMIC_PARTITION, false); verifyDeltaDirAndResult(2, Table.ACIDTBLPART.toString(), "p=p1", resultData); int count = TestTxnDbUtil @@ -2666,10 +2666,10 @@ public void testDynPartInsertWithMultiPartitionAborts() throws Exception { Assert.assertEquals("4", r1.get(0)); // forcing a txn to abort before addDynamicPartitions - hiveConf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEFAILLOADDYNAMICPARTITION, true); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_TEST_MODE_FAIL_LOAD_DYNAMIC_PARTITION, true); runStatementOnDriverWithAbort("insert into " + Table.ACIDTBLPART + " partition(p) values(3,3,'p1'),(4,4,'p1')"); runStatementOnDriverWithAbort("insert into " + Table.ACIDTBLPART + " partition(p) values(3,3,'p2'),(4,4,'p2')"); - hiveConf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEFAILLOADDYNAMICPARTITION, false); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_TEST_MODE_FAIL_LOAD_DYNAMIC_PARTITION, false); verifyDeltaDirAndResult(2, Table.ACIDTBLPART.toString(), "p=p1", resultData); verifyDeltaDirAndResult(2, Table.ACIDTBLPART.toString(), "p=p2", resultData); r1 = runStatementOnDriver("select count(*) from " + Table.ACIDTBLPART); @@ -2709,9 +2709,9 @@ public void testDynPartIOWWithAborts() throws Exception { verifyDeltaDirAndResult(1, Table.ACIDTBLPART.toString(), "p=p1", resultData); // forcing a txn to abort before addDynamicPartitions - hiveConf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEFAILLOADDYNAMICPARTITION, true); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_TEST_MODE_FAIL_LOAD_DYNAMIC_PARTITION, true); runStatementOnDriverWithAbort("insert overwrite table " + Table.ACIDTBLPART + " partition(p) values(3,3,'p1'),(4,4,'p1')"); - hiveConf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEFAILLOADDYNAMICPARTITION, false); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_TEST_MODE_FAIL_LOAD_DYNAMIC_PARTITION, false); verifyDeltaDirAndResult(1, Table.ACIDTBLPART.toString(), "p=p1", resultData); verifyBaseDir(1, Table.ACIDTBLPART.toString(), "p=p1"); @@ -2748,10 +2748,10 @@ public void testDynPartIOWWithMultiPartitionAborts() throws Exception { Assert.assertEquals("4", r1.get(0)); // forcing a txn to abort before addDynamicPartitions - hiveConf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEFAILLOADDYNAMICPARTITION, true); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_TEST_MODE_FAIL_LOAD_DYNAMIC_PARTITION, true); runStatementOnDriverWithAbort("insert overwrite table " + Table.ACIDTBLPART + " partition(p) values(3,3,'p1'),(4,4,'p1')"); runStatementOnDriverWithAbort("insert overwrite table " + Table.ACIDTBLPART + " partition(p) values(3,3,'p2'),(4,4,'p2')"); - hiveConf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEFAILLOADDYNAMICPARTITION, false); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_TEST_MODE_FAIL_LOAD_DYNAMIC_PARTITION, false); verifyDeltaDirAndResult(1, Table.ACIDTBLPART.toString(), "p=p1", resultData); verifyBaseDir(1, Table.ACIDTBLPART.toString(), "p=p1"); verifyDeltaDirAndResult(1, Table.ACIDTBLPART.toString(), "p=p2", resultData); @@ -2796,9 +2796,9 @@ public void testDynPartUpdateWithAborts() throws Exception { verifyDeltaDirAndResult(2, Table.ACIDTBLPART.toString(), "p=p1", resultData1); // forcing a txn to abort before addDynamicPartitions - hiveConf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEFAILLOADDYNAMICPARTITION, true); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_TEST_MODE_FAIL_LOAD_DYNAMIC_PARTITION, true); runStatementOnDriverWithAbort("update " + Table.ACIDTBLPART + " set b=a+2 where a<5"); - hiveConf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEFAILLOADDYNAMICPARTITION, false); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_TEST_MODE_FAIL_LOAD_DYNAMIC_PARTITION, false); verifyDeltaDirAndResult(2, Table.ACIDTBLPART.toString(), "p=p1", resultData1); verifyDeleteDeltaDir(1, Table.ACIDTBLPART.toString(), "p=p1"); @@ -2836,12 +2836,12 @@ public void testDynPartMergeWithAborts() throws Exception { runStatementOnDriver("insert into " + TestTxnCommands2.Table.NONACIDORCTBL + " " + makeValuesClause(sourceVals)); // forcing a txn to abort before addDynamicPartitions - hiveConf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEFAILLOADDYNAMICPARTITION, true); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_TEST_MODE_FAIL_LOAD_DYNAMIC_PARTITION, true); runStatementOnDriverWithAbort("merge into " + Table.ACIDTBLPART + " using " + TestTxnCommands2.Table.NONACIDORCTBL + " as s ON " + Table.ACIDTBLPART + ".a = s.a " + "when matched then update set b = s.b " + "when not matched then insert values(s.a, s.b, 'newpart')"); - hiveConf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEFAILLOADDYNAMICPARTITION, false); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_TEST_MODE_FAIL_LOAD_DYNAMIC_PARTITION, false); verifyDeltaDirAndResult(2, Table.ACIDTBLPART.toString(), "p=p1", resultData); verifyDeleteDeltaDir(1, Table.ACIDTBLPART.toString(), "p=p1"); verifyDeltaDirAndResult(1, Table.ACIDTBLPART.toString(), "p=newpart", resultData); @@ -2886,9 +2886,9 @@ public void testFullACIDAbortWithMinorMajorCompaction() throws Exception { Assert.assertEquals("1", r1.get(0)); // 2. Let a transaction be aborted - hiveConf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEROLLBACKTXN, true); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_TEST_MODE_ROLLBACK_TXN, true); runStatementOnDriver("insert into " + Table.ACIDTBL + "(a,b) values(3,4)"); - hiveConf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEROLLBACKTXN, false); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_TEST_MODE_ROLLBACK_TXN, false); // There should be 2 delta directories. verifyDeltaDirAndResult(2, Table.ACIDTBL.toString(), "", resultData1); @@ -2921,9 +2921,9 @@ public void testFullACIDAbortWithMinorMajorCompaction() throws Exception { runStatementOnDriver("insert into " + Table.ACIDTBL + "(a,b) values(7,8)"); runStatementOnDriver("insert into " + Table.ACIDTBL + "(a,b) values(9,10)"); // 7. add one more aborted delta - hiveConf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEROLLBACKTXN, true); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_TEST_MODE_ROLLBACK_TXN, true); runStatementOnDriver("insert into " + Table.ACIDTBL + "(a,b) values(11,12)"); - hiveConf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEROLLBACKTXN, false); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_TEST_MODE_ROLLBACK_TXN, false); // 8. Perform a MAJOR compaction runStatementOnDriver("alter table "+ Table.ACIDTBL + " compact 'MAJOR'"); @@ -2953,9 +2953,9 @@ public void testFullACIDAbortWithMajorCompaction() throws Exception { Assert.assertEquals("2", r1.get(0)); // 2. Let a transaction be aborted - hiveConf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEROLLBACKTXN, true); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_TEST_MODE_ROLLBACK_TXN, true); runStatementOnDriver("insert into " + Table.ACIDTBL + "(a,b) values(5,6)"); - hiveConf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEROLLBACKTXN, false); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_TEST_MODE_ROLLBACK_TXN, false); // There should be 2 delta and 1 base directory. The base one is the aborted one. verifyDeltaDirAndResult(3, Table.ACIDTBL.toString(), "", resultData1); r1 = runStatementOnDriver("select count(*) from " + Table.ACIDTBL); @@ -2987,9 +2987,9 @@ public void testFullACIDAbortWithCompactionNoCleanup() throws Exception { verifyDeltaDirAndResult(2, Table.ACIDTBL.toString(), "", resultData1); // 2. abort one txns - hiveConf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEROLLBACKTXN, true); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_TEST_MODE_ROLLBACK_TXN, true); runStatementOnDriver("insert into " + Table.ACIDTBL + "(a,b) values(5,6)"); - hiveConf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEROLLBACKTXN, false); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_TEST_MODE_ROLLBACK_TXN, false); verifyDeltaDirAndResult(3, Table.ACIDTBL.toString(), "", resultData1); // 3. Perform a MAJOR compaction. @@ -3019,14 +3019,14 @@ public void testFullACIDAbortWithManyPartitions() throws Exception { verifyDeltaDirAndResult(2, Table.ACIDTBLPART.toString(), "p=p3", resultData1); // 2. abort two txns in each partition - hiveConf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEROLLBACKTXN, true); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_TEST_MODE_ROLLBACK_TXN, true); runStatementOnDriver("insert into " + Table.ACIDTBLPART + " partition(p='p1') (a,b) values(5,6)"); runStatementOnDriver("insert into " + Table.ACIDTBLPART + " partition(p='p2') (a,b) values(5,6)"); runStatementOnDriver("insert into " + Table.ACIDTBLPART + " partition(p='p3') (a,b) values(5,6)"); runStatementOnDriver("insert into " + Table.ACIDTBLPART + " partition(p='p1') (a,b) values(5,6)"); runStatementOnDriver("insert into " + Table.ACIDTBLPART + " partition(p='p2') (a,b) values(5,6)"); runStatementOnDriver("insert into " + Table.ACIDTBLPART + " partition(p='p3') (a,b) values(5,6)"); - hiveConf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEROLLBACKTXN, false); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_TEST_MODE_ROLLBACK_TXN, false); verifyDeltaDirAndResult(4, Table.ACIDTBLPART.toString(), "p=p1", resultData1); verifyDeltaDirAndResult(4, Table.ACIDTBLPART.toString(), "p=p2", resultData1); verifyDeltaDirAndResult(4, Table.ACIDTBLPART.toString(), "p=p3", resultData1); @@ -3398,7 +3398,7 @@ public void testCompactionOutputDirectoryNamesOnPartitionsAndOldDeltasDeleted() public void testShowCompactionOrder() throws Exception { d.destroy(); - hiveConf.setVar(HiveConf.ConfVars.DYNAMICPARTITIONINGMODE, "nonstrict"); + hiveConf.setVar(HiveConf.ConfVars.DYNAMIC_PARTITIONING_MODE, "nonstrict"); d = new Driver(hiveConf); //generate some compaction history runStatementOnDriver("drop database if exists mydb1 cascade"); @@ -3434,7 +3434,7 @@ public void testShowCompactionOrder() throws Exception { runStatementOnDriver("insert into T values(1,4)");//makes delta_2_2 in T2 //create failed compaction attempt so that compactor txn is aborted - HiveConf.setBoolVar(hiveConf, HiveConf.ConfVars.HIVETESTMODEFAILCOMPACTION, true); + HiveConf.setBoolVar(hiveConf, HiveConf.ConfVars.HIVE_TEST_MODE_FAIL_COMPACTION, true); runStatementOnDriver("alter table T compact 'minor'"); TestTxnCommands2.runWorker(hiveConf); // Verify compaction order @@ -3454,7 +3454,7 @@ public void testShowCompactionOrder() throws Exception { public void testAbortCompaction() throws Exception { d.destroy(); - hiveConf.setVar(HiveConf.ConfVars.DYNAMICPARTITIONINGMODE, "nonstrict"); + hiveConf.setVar(HiveConf.ConfVars.DYNAMIC_PARTITIONING_MODE, "nonstrict"); d = new Driver(hiveConf); //generate some compaction history runStatementOnDriver("drop database if exists mydb1 cascade"); @@ -3491,7 +3491,7 @@ public void testAbortCompaction() throws Exception { runStatementOnDriver("insert into myT1 values(1,4)");//makes delta_2_2 in T2 //create failed compaction attempt so that compactor txn is aborted - HiveConf.setBoolVar(hiveConf, HiveConf.ConfVars.HIVETESTMODEFAILCOMPACTION, true); + HiveConf.setBoolVar(hiveConf, HiveConf.ConfVars.HIVE_TEST_MODE_FAIL_COMPACTION, true); runStatementOnDriver("alter table myT1 compact 'minor'"); TestTxnCommands2.runWorker(hiveConf); // Verify compaction order diff --git a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands3.java b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands3.java index c5a2639bdafc..b3fe87a8be4d 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands3.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands3.java @@ -426,7 +426,7 @@ public void testCompactionAbort() throws Exception { runStatementOnDriver("insert into T values(1,4)");//makes delta_2_2 in T2 //create failed compaction attempt so that compactor txn is aborted - HiveConf.setBoolVar(hiveConf, HiveConf.ConfVars.HIVETESTMODEFAILCOMPACTION, true); + HiveConf.setBoolVar(hiveConf, HiveConf.ConfVars.HIVE_TEST_MODE_FAIL_COMPACTION, true); runStatementOnDriver("alter table T compact 'minor'"); runWorker(hiveConf); @@ -456,7 +456,7 @@ public void testCompactionAbort() throws Exception { Assert.assertEquals(1, TestTxnDbUtil.countQueryAgent(hiveConf, "select count(*) from TXN_COMPONENTS where TC_WRITEID=" + highestCompactWriteId)); //now make a successful compactor run so that next Cleaner run actually cleans - HiveConf.setBoolVar(hiveConf, HiveConf.ConfVars.HIVETESTMODEFAILCOMPACTION, false); + HiveConf.setBoolVar(hiveConf, HiveConf.ConfVars.HIVE_TEST_MODE_FAIL_COMPACTION, false); runStatementOnDriver("alter table T compact 'minor'"); runWorker(hiveConf); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommandsForMmTable.java b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommandsForMmTable.java index ba2b75cd9b07..5b243d2022b1 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommandsForMmTable.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommandsForMmTable.java @@ -28,7 +28,6 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.common.FileUtils; import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.metastore.utils.TestTxnDbUtil; import org.apache.hadoop.hive.ql.io.AcidUtils; import org.junit.Assert; @@ -88,9 +87,9 @@ public void setUp() throws Exception { } void setUpInternalExtended(boolean isOrcFormat) throws Exception { - hiveConf.setBoolVar(HiveConf.ConfVars.DYNAMICPARTITIONING, true); - hiveConf.setVar(HiveConf.ConfVars.HIVEMAPREDMODE, "nonstrict"); - hiveConf.setVar(HiveConf.ConfVars.HIVEFETCHTASKCONVERSION, "none"); + hiveConf.setBoolVar(HiveConf.ConfVars.DYNAMIC_PARTITIONING, true); + hiveConf.setVar(HiveConf.ConfVars.HIVE_MAPRED_MODE, "nonstrict"); + hiveConf.setVar(HiveConf.ConfVars.HIVE_FETCH_TASK_CONVERSION, "none"); hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "true"); hiveConf.setVar(HiveConf.ConfVars.HIVE_TXN_MANAGER, "org.apache.hadoop.hive.ql.lockmgr.DbTxnManager"); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnExIm.java b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnExIm.java index 95cf36f6e3ba..9b7fab9ac704 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnExIm.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnExIm.java @@ -584,9 +584,9 @@ public void testMMExportAborted() throws Exception { runStatementOnDriver("create table T (a int, b int)"); runStatementOnDriver("create table Tstage (a int, b int)"); - HiveConf.setBoolVar(hiveConf, HiveConf.ConfVars.HIVETESTMODEROLLBACKTXN, true); + HiveConf.setBoolVar(hiveConf, HiveConf.ConfVars.HIVE_TEST_MODE_ROLLBACK_TXN, true); runStatementOnDriver("insert into Tstage" + TestTxnCommands2.makeValuesClause(dataAbort)); - HiveConf.setBoolVar(hiveConf, HiveConf.ConfVars.HIVETESTMODEROLLBACKTXN, false); + HiveConf.setBoolVar(hiveConf, HiveConf.ConfVars.HIVE_TEST_MODE_ROLLBACK_TXN, false); runStatementOnDriver("insert into Tstage" + TestTxnCommands2.makeValuesClause(data)); runStatementOnDriver("export table Tstage to '" + getWarehouseDir() + "/1'"); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnNoBuckets.java b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnNoBuckets.java index a00886bb9cd9..7409179f4368 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnNoBuckets.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnNoBuckets.java @@ -19,10 +19,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hive.conf.Constants; import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.metastore.IMetaStoreClient; -import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; import org.apache.hadoop.hive.metastore.api.ShowCompactRequest; import org.apache.hadoop.hive.metastore.api.ShowCompactResponse; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; @@ -41,7 +38,6 @@ import org.slf4j.LoggerFactory; import java.io.File; -import java.util.ArrayList; import java.util.HashSet; import java.util.List; import java.util.Map; @@ -311,7 +307,7 @@ public void testCtasEmpty() throws Exception { @Test public void testInsertToAcidWithUnionRemove() throws Exception { hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_OPTIMIZE_UNION_REMOVE, true); - hiveConf.setVar(HiveConf.ConfVars.HIVEFETCHTASKCONVERSION, "none"); + hiveConf.setVar(HiveConf.ConfVars.HIVE_FETCH_TASK_CONVERSION, "none"); d.close(); d = new Driver(hiveConf); int[][] values = {{1,2},{3,4},{5,6},{7,8},{9,10}}; @@ -350,7 +346,7 @@ public void testInsertToAcidWithUnionRemove() throws Exception { @Test public void testInsertOverwriteToAcidWithUnionRemove() throws Exception { hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_OPTIMIZE_UNION_REMOVE, true); - hiveConf.setVar(HiveConf.ConfVars.HIVEFETCHTASKCONVERSION, "none"); + hiveConf.setVar(HiveConf.ConfVars.HIVE_FETCH_TASK_CONVERSION, "none"); d.close(); d = new Driver(hiveConf); int[][] values = {{1, 2}, {3, 4}, {5, 6}, {7, 8}, {9, 10}}; @@ -374,9 +370,9 @@ public void testInsertOverwriteToAcidWithUnionRemove() throws Exception { @Test public void testToAcidConversionMultiBucket() throws Exception { //need to disable these so that automatic merge doesn't merge the files - hiveConf.setBoolVar(HiveConf.ConfVars.HIVEMERGEMAPFILES, false); - hiveConf.setBoolVar(HiveConf.ConfVars.HIVEMERGEMAPREDFILES, false); - hiveConf.setBoolVar(HiveConf.ConfVars.HIVEMERGETEZFILES, false); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_MERGE_MAPFILES, false); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_MERGE_MAPRED_FILES, false); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_MERGE_TEZ_FILES, false); d.close(); d = new Driver(hiveConf); @@ -404,7 +400,7 @@ public void testToAcidConversionMultiBucket() throws Exception { //now do Insert from Union here to create data files in sub dirs hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_OPTIMIZE_UNION_REMOVE, true); - hiveConf.setVar(HiveConf.ConfVars.HIVEFETCHTASKCONVERSION, "none"); + hiveConf.setVar(HiveConf.ConfVars.HIVE_FETCH_TASK_CONVERSION, "none"); d.close(); d = new Driver(hiveConf); runStatementOnDriver("insert into T(a,b) select a * 10, b * 10 from " + Table.ACIDTBL + @@ -665,7 +661,7 @@ public void testCtasPartitioned() throws Exception { @Test public void testNonAcidToAcidVectorzied() throws Exception { hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_VECTORIZATION_ENABLED, true); - hiveConf.setVar(HiveConf.ConfVars.HIVEFETCHTASKCONVERSION, "none"); + hiveConf.setVar(HiveConf.ConfVars.HIVE_FETCH_TASK_CONVERSION, "none"); //this enables vectorization of ROW__ID hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_VECTORIZATION_ROW_IDENTIFIER_ENABLED, true);//HIVE-12631 runStatementOnDriver("drop table if exists T"); @@ -773,7 +769,7 @@ private void checkExpected(List rs, String[][] expected, String msg) { */ @Test public void testCompactStatsGather() throws Exception { - hiveConf.setIntVar(HiveConf.ConfVars.HIVEOPTSORTDYNAMICPARTITIONTHRESHOLD, -1); + hiveConf.setIntVar(HiveConf.ConfVars.HIVE_OPT_SORT_DYNAMIC_PARTITION_THRESHOLD, -1); runStatementOnDriver("drop table if exists T"); runStatementOnDriver("create table T(a int, b int) partitioned by (p int, q int) " + "stored as orc TBLPROPERTIES ('transactional'='true')"); @@ -916,7 +912,7 @@ public void testEmptyCompactionResult() throws Exception { */ @Test public void testGetPartitionsNoSession() throws Exception { - hiveConf.setIntVar(HiveConf.ConfVars.HIVEOPTSORTDYNAMICPARTITIONTHRESHOLD, -1); + hiveConf.setIntVar(HiveConf.ConfVars.HIVE_OPT_SORT_DYNAMIC_PARTITION_THRESHOLD, -1); runStatementOnDriver("drop table if exists T"); runStatementOnDriver("create table T(a int, b int) partitioned by (p int, q int) " + "stored as orc TBLPROPERTIES ('transactional'='true')"); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/TxnCommandsBaseForTests.java b/ql/src/test/org/apache/hadoop/hive/ql/TxnCommandsBaseForTests.java index 3a9b0cb754cd..b7acefc531c4 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/TxnCommandsBaseForTests.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/TxnCommandsBaseForTests.java @@ -119,10 +119,10 @@ void setUpInternal() throws Exception { + File.separator + "mapred" + File.separator + "staging"); hiveConf.set("mapred.temp.dir", workDir + File.separator + this.getClass().getSimpleName() + File.separator + "mapred" + File.separator + "temp"); - hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); - hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); - hiveConf.set(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, getWarehouseDir()); - hiveConf.setVar(HiveConf.ConfVars.HIVEINPUTFORMAT, HiveInputFormat.class.getName()); + hiveConf.set(HiveConf.ConfVars.PRE_EXEC_HOOKS.varname, ""); + hiveConf.set(HiveConf.ConfVars.POST_EXEC_HOOKS.varname, ""); + hiveConf.set(HiveConf.ConfVars.METASTORE_WAREHOUSE.varname, getWarehouseDir()); + hiveConf.setVar(HiveConf.ConfVars.HIVE_INPUT_FORMAT, HiveInputFormat.class.getName()); hiveConf .setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER, "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory"); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestExecDriver.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestExecDriver.java index 801133d85c61..00a58c4cea6d 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestExecDriver.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestExecDriver.java @@ -89,8 +89,8 @@ public class TestExecDriver { try { queryState = new QueryState.Builder().withHiveConf(new HiveConf(ExecDriver.class)).build(); conf = queryState.getConf(); - conf.setBoolVar(HiveConf.ConfVars.SUBMITVIACHILD, true); - conf.setBoolVar(HiveConf.ConfVars.SUBMITLOCALTASKVIACHILD, true); + conf.setBoolVar(HiveConf.ConfVars.SUBMIT_VIA_CHILD, true); + conf.setBoolVar(HiveConf.ConfVars.SUBMIT_LOCAL_TASK_VIA_CHILD, true); conf.setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER, "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory"); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestExplainTask.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestExplainTask.java index 2ea15cf4924f..099c12bd686c 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestExplainTask.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestExplainTask.java @@ -210,7 +210,7 @@ public void testGetJSONDependenciesJsonShhouldMatch() throws Exception { @Test public void testGetJSONPlan() throws Exception { - uut.conf.setVar(HiveConf.ConfVars.HIVESTAGEIDREARRANGE, "EXECUTION"); + uut.conf.setVar(HiveConf.ConfVars.HIVE_STAGE_ID_REARRANGE, "EXECUTION"); Task mockTask = mockTask(); when(mockTask.getId()).thenReturn("mockTaskId"); ExplainWork explainWorkMock = mockExplainWork(); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestLimitOperator.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestLimitOperator.java index 681435c65c8b..0c5679b223ec 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestLimitOperator.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestLimitOperator.java @@ -64,7 +64,7 @@ private void testGlobalLimitReachedInDaemonOrContainer(boolean isDaemon, int off } HiveConf conf = new HiveConf(); - HiveConf.setVar(conf, HiveConf.ConfVars.HIVEQUERYID, "query-" + random.nextInt(10000)); + HiveConf.setVar(conf, HiveConf.ConfVars.HIVE_QUERY_ID, "query-" + random.nextInt(10000)); HiveConf.setVar(conf, HiveConf.ConfVars.HIVE_EXECUTION_ENGINE, "tez"); conf.set(TezProcessor.HIVE_TEZ_VERTEX_NAME, "Map 1"); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestOperators.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestOperators.java index c82fdf3a1d9a..dcf0483cf057 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestOperators.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestOperators.java @@ -200,11 +200,11 @@ public void testScriptOperatorEnvVarsProcessing() throws Throwable { hconf.set("name", hugeEnvVar); Map env = new HashMap(); - HiveConf.setBoolVar(hconf, HiveConf.ConfVars.HIVESCRIPTTRUNCATEENV, false); + HiveConf.setBoolVar(hconf, HiveConf.ConfVars.HIVE_SCRIPT_TRUNCATE_ENV, false); scriptOperator.addJobConfToEnvironment(hconf, env); assertEquals(20*1024+1, env.get("name").length()); - HiveConf.setBoolVar(hconf, HiveConf.ConfVars.HIVESCRIPTTRUNCATEENV, true); + HiveConf.setBoolVar(hconf, HiveConf.ConfVars.HIVE_SCRIPT_TRUNCATE_ENV, true); scriptOperator.addJobConfToEnvironment(hconf, env); assertEquals(20*1024, env.get("name").length()); @@ -223,7 +223,7 @@ public void testScriptOperatorBlacklistedEnvVarsProcessing() { Map env = new HashMap(); - HiveConf.setVar(hconf, HiveConf.ConfVars.HIVESCRIPT_ENV_BLACKLIST, "foobar"); + HiveConf.setVar(hconf, HiveConf.ConfVars.HIVE_SCRIPT_ENV_BLACKLIST, "foobar"); hconf.set("foobar", "foobar"); hconf.set("barfoo", "barfoo"); scriptOperator.addJobConfToEnvironment(hconf, env); @@ -423,7 +423,7 @@ public InputSplit[] getSplits(JobConf job, int splits) throws IOException { public void testFetchOperatorContext() throws Exception { HiveConf conf = new HiveConf(); conf.set("hive.support.concurrency", "false"); - conf.setVar(HiveConf.ConfVars.HIVEMAPREDMODE, "nonstrict"); + conf.setVar(HiveConf.ConfVars.HIVE_MAPRED_MODE, "nonstrict"); conf.setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER, "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory"); SessionState.start(conf); @@ -459,7 +459,7 @@ public void testNoConditionalTaskSizeForLlap() { ConvertJoinMapJoin convertJoinMapJoin = new ConvertJoinMapJoin(); long defaultNoConditionalTaskSize = 1024L * 1024L * 1024L; HiveConf hiveConf = new HiveConf(); - hiveConf.setLongVar(HiveConf.ConfVars.HIVECONVERTJOINNOCONDITIONALTASKTHRESHOLD, defaultNoConditionalTaskSize); + hiveConf.setLongVar(HiveConf.ConfVars.HIVE_CONVERT_JOIN_NOCONDITIONAL_TASK_THRESHOLD, defaultNoConditionalTaskSize); LlapClusterStateForCompile llapInfo = null; if ("llap".equalsIgnoreCase(hiveConf.getVar(HiveConf.ConfVars.HIVE_EXECUTION_MODE))) { @@ -577,7 +577,7 @@ public void testLlapMemoryOversubscriptionMaxExecutorsPerQueryCalculation() { // 5. Configure hive conf and Build group by operator HiveConf hconf = new HiveConf(); - HiveConf.setIntVar(hconf, HiveConf.ConfVars.HIVEGROUPBYMAPINTERVAL, 1); + HiveConf.setIntVar(hconf, HiveConf.ConfVars.HIVE_GROUPBY_MAP_INTERVAL, 1); // 6. test hash aggr without grouping sets System.out.println("---------------Begin to test hash group by without grouping sets-------------"); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestUtilities.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestUtilities.java index 15106909734e..f87d6c40f17f 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestUtilities.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestUtilities.java @@ -36,7 +36,6 @@ import java.io.File; import java.io.IOException; import java.util.ArrayList; -import java.util.HashMap; import java.util.HashSet; import java.util.LinkedHashMap; import java.util.List; @@ -345,7 +344,7 @@ public void testGetInputPathsWithEmptyPartitions() throws Exception { List inputPaths = new ArrayList<>(); try { - Path scratchDir = new Path(HiveConf.getVar(jobConf, HiveConf.ConfVars.LOCALSCRATCHDIR)); + Path scratchDir = new Path(HiveConf.getVar(jobConf, HiveConf.ConfVars.LOCAL_SCRATCH_DIR)); List inputPaths1 = Utilities.getInputPaths(jobConf, mapWork1, scratchDir, mock(Context.class), false); @@ -414,7 +413,7 @@ public void testGetInputPathsWithMultipleThreadsAndEmptyPartitions() throws Exce try { fs.mkdirs(testTablePath); List inputPaths = Utilities.getInputPaths(jobConf, mapWork, - new Path(HiveConf.getVar(jobConf, HiveConf.ConfVars.LOCALSCRATCHDIR)), mock(Context.class), false); + new Path(HiveConf.getVar(jobConf, HiveConf.ConfVars.LOCAL_SCRATCH_DIR)), mock(Context.class), false); assertEquals(inputPaths.size(), numPartitions); for (int i = 0; i < numPartitions; i++) { @@ -542,7 +541,7 @@ public void testGetInputPathsWithMultipleThreads() throws Exception { private void runTestGetInputPaths(JobConf jobConf, int numOfPartitions) throws Exception { MapWork mapWork = new MapWork(); - Path scratchDir = new Path(HiveConf.getVar(jobConf, HiveConf.ConfVars.LOCALSCRATCHDIR)); + Path scratchDir = new Path(HiveConf.getVar(jobConf, HiveConf.ConfVars.LOCAL_SCRATCH_DIR)); Map> pathToAliasTable = new LinkedHashMap<>(); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/errors/TestTaskLogProcessor.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/errors/TestTaskLogProcessor.java index e95e9c32918d..404ac4a41e07 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/exec/errors/TestTaskLogProcessor.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/errors/TestTaskLogProcessor.java @@ -231,7 +231,7 @@ public void testMapAggrMemErrorHeuristic() throws Exception { String solution = eas.getSolution(); assertNotNull(solution); assertTrue(solution.length() > 0); - String confName = HiveConf.ConfVars.HIVEMAPAGGRHASHMEMORY.toString(); + String confName = HiveConf.ConfVars.HIVE_MAP_AGGR_HASH_MEMORY.toString(); assertTrue(solution.contains(confName)); } diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/mr/TestMapRedTask.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/mr/TestMapRedTask.java index 40712eb96d48..d4f983c62c03 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/exec/mr/TestMapRedTask.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/mr/TestMapRedTask.java @@ -69,7 +69,7 @@ public void mrTaskSumbitViaChildWithImpersonation() throws IOException, LoginExc QueryState queryState = new QueryState.Builder().build(); HiveConf conf= queryState.getConf(); - conf.setBoolVar(HiveConf.ConfVars.SUBMITVIACHILD, true); + conf.setBoolVar(HiveConf.ConfVars.SUBMIT_VIA_CHILD, true); MapredWork mrWork = new MapredWork(); mrWork.setMapWork(Mockito.mock(MapWork.class)); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/TestVectorLimitOperator.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/TestVectorLimitOperator.java index 74a4ad0b61b9..ef2232c626ab 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/TestVectorLimitOperator.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/TestVectorLimitOperator.java @@ -87,7 +87,7 @@ private void testGlobalLimitReachedInDaemonOrContainer(boolean isDaemon, int off } HiveConf conf = new HiveConf(); - HiveConf.setVar(conf, HiveConf.ConfVars.HIVEQUERYID, "query-" + random.nextInt(10000)); + HiveConf.setVar(conf, HiveConf.ConfVars.HIVE_QUERY_ID, "query-" + random.nextInt(10000)); HiveConf.setVar(conf, HiveConf.ConfVars.HIVE_EXECUTION_ENGINE, "tez"); conf.set(TezProcessor.HIVE_TEZ_VERTEX_NAME, "Map 1"); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/hooks/TestHiveHooks.java b/ql/src/test/org/apache/hadoop/hive/ql/hooks/TestHiveHooks.java index f5bc5bab5a25..b6284aff329f 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/hooks/TestHiveHooks.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/hooks/TestHiveHooks.java @@ -61,9 +61,9 @@ public static class SemanticAnalysisHook implements HiveSemanticAnalyzerHook { @Test public void testLoadHooksFromConf() throws Exception { HiveConf hiveConf = new HiveConf(); - hiveConf.setVar(HiveConf.ConfVars.PREEXECHOOKS, + hiveConf.setVar(HiveConf.ConfVars.PRE_EXEC_HOOKS, PreExecHook.class.getName() + "," + PreExecHook.class.getName()); - hiveConf.setVar(HiveConf.ConfVars.POSTEXECHOOKS, + hiveConf.setVar(HiveConf.ConfVars.POST_EXEC_HOOKS, PostExecHook.class.getName()); hiveConf.setVar(HiveConf.ConfVars.SEMANTIC_ANALYZER_HOOK, SemanticAnalysisHook.class.getName()); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/hooks/TestHooks.java b/ql/src/test/org/apache/hadoop/hive/ql/hooks/TestHooks.java index f1a9a44e1fe5..e0dccc9f5834 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/hooks/TestHooks.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/hooks/TestHooks.java @@ -55,7 +55,7 @@ public void testRedactLogString() throws Exception { HiveConf conf = new HiveConf(TestHooks.class); String str; - HiveConf.setVar(conf, HiveConf.ConfVars.QUERYREDACTORHOOKS, SimpleQueryRedactor.class.getName()); + HiveConf.setVar(conf, HiveConf.ConfVars.QUERY_REDACTOR_HOOKS, SimpleQueryRedactor.class.getName()); str = HookUtils.redactLogString(null, null); assertEquals(str, null); @@ -70,7 +70,7 @@ public void testRedactLogString() throws Exception { @Test public void testQueryRedactor() throws Exception { HiveConf conf = new HiveConf(TestHooks.class); - HiveConf.setVar(conf, HiveConf.ConfVars.QUERYREDACTORHOOKS, + HiveConf.setVar(conf, HiveConf.ConfVars.QUERY_REDACTOR_HOOKS, SimpleQueryRedactor.class.getName()); conf .setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER, diff --git a/ql/src/test/org/apache/hadoop/hive/ql/io/StorageFormats.java b/ql/src/test/org/apache/hadoop/hive/ql/io/StorageFormats.java index 0adaf0a700eb..a929bb9d820f 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/io/StorageFormats.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/io/StorageFormats.java @@ -26,16 +26,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.ql.io.RCFileInputFormat; -import org.apache.hadoop.hive.ql.io.RCFileOutputFormat; -import org.apache.hadoop.hive.ql.io.RCFileStorageFormatDescriptor; -import org.apache.hadoop.hive.ql.io.StorageFormatDescriptor; -import org.apache.hadoop.hive.ql.io.avro.AvroContainerInputFormat; -import org.apache.hadoop.hive.ql.io.avro.AvroContainerOutputFormat; -import org.apache.hadoop.hive.ql.io.orc.OrcSerde; -import org.apache.hadoop.hive.serde2.avro.AvroSerDe; import org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe; -import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe; import static org.junit.Assert.assertTrue; @@ -104,9 +95,9 @@ public static Collection asParameters() { String serdeClass = descriptor.getSerde(); if (serdeClass == null) { if (descriptor instanceof RCFileStorageFormatDescriptor) { - serdeClass = HiveConf.getVar(conf, HiveConf.ConfVars.HIVEDEFAULTRCFILESERDE); + serdeClass = HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_DEFAULT_RCFILE_SERDE); } else { - serdeClass = HiveConf.getVar(conf, HiveConf.ConfVars.HIVEDEFAULTSERDE); + serdeClass = HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_DEFAULT_SERDE); } } diff --git a/ql/src/test/org/apache/hadoop/hive/ql/io/TestRCFile.java b/ql/src/test/org/apache/hadoop/hive/ql/io/TestRCFile.java index 73096b36720b..073b930531b2 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/io/TestRCFile.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/io/TestRCFile.java @@ -643,7 +643,7 @@ public void testSync() throws IOException { RCFileInputFormat inputFormat = new RCFileInputFormat(); JobConf jobconf = new JobConf(cloneConf); jobconf.set("mapred.input.dir", testDir.toString()); - HiveConf.setLongVar(jobconf, HiveConf.ConfVars.MAPREDMINSPLITSIZE, fileLen); + HiveConf.setLongVar(jobconf, HiveConf.ConfVars.MAPRED_MIN_SPLIT_SIZE, fileLen); InputSplit[] splits = inputFormat.getSplits(jobconf, 1); RCFileRecordReader rr = new RCFileRecordReader(jobconf, (FileSplit)splits[0]); long lastSync = 0; @@ -710,7 +710,7 @@ private void writeThenReadByRecordReader(int intervalRecordCount, RCFileInputFormat inputFormat = new RCFileInputFormat(); JobConf jonconf = new JobConf(cloneConf); jonconf.set("mapred.input.dir", testDir.toString()); - HiveConf.setLongVar(jonconf, HiveConf.ConfVars.MAPREDMINSPLITSIZE, minSplitSize); + HiveConf.setLongVar(jonconf, HiveConf.ConfVars.MAPRED_MIN_SPLIT_SIZE, minSplitSize); InputSplit[] splits = inputFormat.getSplits(jonconf, splitNumber); assertEquals("splits length should be " + splitNumber, splitNumber, splits.length); int readCount = 0; @@ -796,7 +796,7 @@ public void testRCFileHeader(char[] expected, Configuration conf) @Test public void testNonExplicitRCFileHeader() throws IOException, SerDeException { Configuration conf = new Configuration(); - conf.setBoolean(HiveConf.ConfVars.HIVEUSEEXPLICITRCFILEHEADER.varname, false); + conf.setBoolean(HiveConf.ConfVars.HIVE_USE_EXPLICIT_RCFILE_HEADER.varname, false); char[] expected = new char[] {'S', 'E', 'Q'}; testRCFileHeader(expected, conf); } @@ -804,7 +804,7 @@ public void testNonExplicitRCFileHeader() throws IOException, SerDeException { @Test public void testExplicitRCFileHeader() throws IOException, SerDeException { Configuration conf = new Configuration(); - conf.setBoolean(HiveConf.ConfVars.HIVEUSEEXPLICITRCFILEHEADER.varname, true); + conf.setBoolean(HiveConf.ConfVars.HIVE_USE_EXPLICIT_RCFILE_HEADER.varname, true); char[] expected = new char[] {'R', 'C', 'F'}; testRCFileHeader(expected, conf); } diff --git a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java index e885c634c054..c986d10ab4cf 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java @@ -1728,8 +1728,8 @@ public void testSplitGenerator() throws Exception { new MockBlock("host0", "host3-2", "host3-3"), new MockBlock("host4-1", "host4-2", "host4-3"), new MockBlock("host5-1", "host5-2", "host5-3"))); - HiveConf.setLongVar(conf, HiveConf.ConfVars.MAPREDMAXSPLITSIZE, 300); - HiveConf.setLongVar(conf, HiveConf.ConfVars.MAPREDMINSPLITSIZE, 200); + HiveConf.setLongVar(conf, HiveConf.ConfVars.MAPRED_MAX_SPLIT_SIZE, 300); + HiveConf.setLongVar(conf, HiveConf.ConfVars.MAPRED_MIN_SPLIT_SIZE, 200); OrcInputFormat.Context context = new OrcInputFormat.Context(conf); OrcInputFormat.SplitGenerator splitter = new OrcInputFormat.SplitGenerator(new OrcInputFormat.SplitInfo(context, fs, @@ -1752,8 +1752,8 @@ public void testSplitGenerator() throws Exception { assertEquals(1800, result.getStart()); assertEquals(200, result.getLength()); // test min = 0, max = 0 generates each stripe - HiveConf.setLongVar(conf, HiveConf.ConfVars.MAPREDMAXSPLITSIZE, 0); - HiveConf.setLongVar(conf, HiveConf.ConfVars.MAPREDMINSPLITSIZE, 0); + HiveConf.setLongVar(conf, HiveConf.ConfVars.MAPRED_MAX_SPLIT_SIZE, 0); + HiveConf.setLongVar(conf, HiveConf.ConfVars.MAPRED_MIN_SPLIT_SIZE, 0); context = new OrcInputFormat.Context(conf); splitter = new OrcInputFormat.SplitGenerator(new OrcInputFormat.SplitInfo(context, fs, fs.getFileStatus(new Path("/a/file")), null, null, true, @@ -1777,8 +1777,8 @@ public void testProjectedColumnSize() throws Exception { new MockBlock("host0", "host3-2", "host3-3"), new MockBlock("host4-1", "host4-2", "host4-3"), new MockBlock("host5-1", "host5-2", "host5-3"))); - HiveConf.setLongVar(conf, HiveConf.ConfVars.MAPREDMAXSPLITSIZE, 300); - HiveConf.setLongVar(conf, HiveConf.ConfVars.MAPREDMINSPLITSIZE, 200); + HiveConf.setLongVar(conf, HiveConf.ConfVars.MAPRED_MAX_SPLIT_SIZE, 300); + HiveConf.setLongVar(conf, HiveConf.ConfVars.MAPRED_MIN_SPLIT_SIZE, 200); conf.setBoolean(ColumnProjectionUtils.READ_ALL_COLUMNS, false); conf.set(ColumnProjectionUtils.READ_COLUMN_IDS_CONF_STR, "0"); OrcInputFormat.Context context = new OrcInputFormat.Context(conf); @@ -1802,8 +1802,8 @@ public void testProjectedColumnSize() throws Exception { assertEquals(43792, result.getProjectedColumnsUncompressedSize()); // test min = 0, max = 0 generates each stripe - HiveConf.setLongVar(conf, HiveConf.ConfVars.MAPREDMAXSPLITSIZE, 0); - HiveConf.setLongVar(conf, HiveConf.ConfVars.MAPREDMINSPLITSIZE, 0); + HiveConf.setLongVar(conf, HiveConf.ConfVars.MAPRED_MAX_SPLIT_SIZE, 0); + HiveConf.setLongVar(conf, HiveConf.ConfVars.MAPRED_MIN_SPLIT_SIZE, 0); context = new OrcInputFormat.Context(conf); splitter = new OrcInputFormat.SplitGenerator(new OrcInputFormat.SplitInfo(context, fs, fs.getFileStatus(new Path("/a/file")), null, null, true, @@ -1822,8 +1822,8 @@ public void testProjectedColumnSize() throws Exception { } // single split - HiveConf.setLongVar(conf, HiveConf.ConfVars.MAPREDMAXSPLITSIZE, 1000); - HiveConf.setLongVar(conf, HiveConf.ConfVars.MAPREDMINSPLITSIZE, 100000); + HiveConf.setLongVar(conf, HiveConf.ConfVars.MAPRED_MAX_SPLIT_SIZE, 1000); + HiveConf.setLongVar(conf, HiveConf.ConfVars.MAPRED_MIN_SPLIT_SIZE, 100000); context = new OrcInputFormat.Context(conf); splitter = new OrcInputFormat.SplitGenerator(new OrcInputFormat.SplitInfo(context, fs, fs.getFileStatus(new Path("/a/file")), null, null, true, @@ -3785,13 +3785,13 @@ public void testRowNumberUniquenessInDifferentSplits() throws Exception { // Save the conf variable values so that they can be restored later. long oldDefaultStripeSize = conf.getLong(OrcConf.STRIPE_SIZE.getHiveConfName(), -1L); - long oldMaxSplitSize = conf.getLong(HiveConf.ConfVars.MAPREDMAXSPLITSIZE.varname, -1L); + long oldMaxSplitSize = conf.getLong(HiveConf.ConfVars.MAPRED_MAX_SPLIT_SIZE.varname, -1L); // Set the conf variable values for this test. long newStripeSize = 10000L; // 10000 bytes per stripe long newMaxSplitSize = 100L; // 1024 bytes per split conf.setLong(OrcConf.STRIPE_SIZE.getHiveConfName(), newStripeSize); - conf.setLong(HiveConf.ConfVars.MAPREDMAXSPLITSIZE.varname, newMaxSplitSize); + conf.setLong(HiveConf.ConfVars.MAPRED_MAX_SPLIT_SIZE.varname, newMaxSplitSize); AbstractSerDe serde = new OrcSerde(); HiveOutputFormat outFormat = new OrcOutputFormat(); @@ -3838,10 +3838,10 @@ public void testRowNumberUniquenessInDifferentSplits() throws Exception { conf.unset(OrcConf.STRIPE_SIZE.getHiveConfName()); } if (oldMaxSplitSize != -1L) { - conf.setLong(HiveConf.ConfVars.MAPREDMAXSPLITSIZE.varname, oldMaxSplitSize); + conf.setLong(HiveConf.ConfVars.MAPRED_MAX_SPLIT_SIZE.varname, oldMaxSplitSize); } else { // this means that nothing was set for default stripe size previously, so we should unset it. - conf.unset(HiveConf.ConfVars.MAPREDMAXSPLITSIZE.varname); + conf.unset(HiveConf.ConfVars.MAPRED_MAX_SPLIT_SIZE.varname); } } diff --git a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcSplitElimination.java b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcSplitElimination.java index f046191ae47e..0b6d57636d38 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcSplitElimination.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcSplitElimination.java @@ -127,8 +127,8 @@ public void testSplitEliminationSmallMaxSplit() throws Exception { 100000, CompressionKind.NONE, 10000, 10000); writeData(writer); writer.close(); - HiveConf.setLongVar(conf, HiveConf.ConfVars.MAPREDMINSPLITSIZE, 1000); - HiveConf.setLongVar(conf, HiveConf.ConfVars.MAPREDMAXSPLITSIZE, 5000); + HiveConf.setLongVar(conf, HiveConf.ConfVars.MAPRED_MIN_SPLIT_SIZE, 1000); + HiveConf.setLongVar(conf, HiveConf.ConfVars.MAPRED_MAX_SPLIT_SIZE, 5000); InputFormat in = new OrcInputFormat(); FileInputFormat.setInputPaths(conf, testFilePath.toString()); @@ -197,8 +197,8 @@ public void testSplitEliminationLargeMaxSplit() throws Exception { 100000, CompressionKind.NONE, 10000, 10000); writeData(writer); writer.close(); - HiveConf.setLongVar(conf, HiveConf.ConfVars.MAPREDMINSPLITSIZE, 1000); - HiveConf.setLongVar(conf, HiveConf.ConfVars.MAPREDMAXSPLITSIZE, 150000); + HiveConf.setLongVar(conf, HiveConf.ConfVars.MAPRED_MIN_SPLIT_SIZE, 1000); + HiveConf.setLongVar(conf, HiveConf.ConfVars.MAPRED_MAX_SPLIT_SIZE, 150000); InputFormat in = new OrcInputFormat(); FileInputFormat.setInputPaths(conf, testFilePath.toString()); @@ -278,8 +278,8 @@ public void testSplitEliminationComplexExpr() throws Exception { 100000, CompressionKind.NONE, 10000, 10000); writeData(writer); writer.close(); - HiveConf.setLongVar(conf, HiveConf.ConfVars.MAPREDMINSPLITSIZE, 1000); - HiveConf.setLongVar(conf, HiveConf.ConfVars.MAPREDMAXSPLITSIZE, 150000); + HiveConf.setLongVar(conf, HiveConf.ConfVars.MAPRED_MIN_SPLIT_SIZE, 1000); + HiveConf.setLongVar(conf, HiveConf.ConfVars.MAPRED_MAX_SPLIT_SIZE, 150000); InputFormat in = new OrcInputFormat(); FileInputFormat.setInputPaths(conf, testFilePath.toString()); @@ -696,10 +696,10 @@ private static String toString(FileSplit fs) { private void setupExternalCacheConfig(boolean isPpd, String paths) { FileInputFormat.setInputPaths(conf, paths); conf.set(ConfVars.HIVE_ORC_SPLIT_STRATEGY.varname, "ETL"); - conf.setLong(HiveConf.ConfVars.MAPREDMINSPLITSIZE.varname, 1000); - conf.setLong(HiveConf.ConfVars.MAPREDMAXSPLITSIZE.varname, 5000); + conf.setLong(HiveConf.ConfVars.MAPRED_MIN_SPLIT_SIZE.varname, 1000); + conf.setLong(HiveConf.ConfVars.MAPRED_MAX_SPLIT_SIZE.varname, 5000); conf.setBoolean(ConfVars.HIVE_ORC_MS_FOOTER_CACHE_PPD.varname, isPpd); - conf.setBoolean(ConfVars.HIVEOPTINDEXFILTER.varname, isPpd); + conf.setBoolean(ConfVars.HIVE_OPT_INDEX_FILTER.varname, isPpd); } private ObjectInspector createIO() { diff --git a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestVectorizedOrcAcidRowBatchReader.java b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestVectorizedOrcAcidRowBatchReader.java index b192da437ea7..2c80d3ee19ee 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestVectorizedOrcAcidRowBatchReader.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestVectorizedOrcAcidRowBatchReader.java @@ -383,13 +383,13 @@ public void testDeleteEventFilteringOn2() throws Exception { @Test public void testDeleteEventFilteringOnWithoutIdx2() throws Exception { HiveConf.setBoolVar(conf, HiveConf.ConfVars.FILTER_DELETE_EVENTS, true); - HiveConf.setBoolVar(conf, HiveConf.ConfVars.HIVETESTMODEACIDKEYIDXSKIP, true); + HiveConf.setBoolVar(conf, HiveConf.ConfVars.HIVE_TEST_MODE_ACID_KEY_IDX_SKIP, true); testDeleteEventFiltering2(); } @Test public void testDeleteEventFilteringOnWithoutIdx3() throws Exception { HiveConf.setBoolVar(conf, HiveConf.ConfVars.FILTER_DELETE_EVENTS, true); - HiveConf.setBoolVar(conf, HiveConf.ConfVars.HIVETESTMODEACIDKEYIDXSKIP, true); + HiveConf.setBoolVar(conf, HiveConf.ConfVars.HIVE_TEST_MODE_ACID_KEY_IDX_SKIP, true); conf.set("orc.stripe.size", "1000"); testDeleteEventFiltering(); } @@ -398,7 +398,7 @@ private void testDeleteEventFiltering2() throws Exception { boolean filterOn = HiveConf.getBoolVar(conf, HiveConf.ConfVars.FILTER_DELETE_EVENTS); boolean skipKeyIdx = - HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVETESTMODEACIDKEYIDXSKIP); + HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_TEST_MODE_ACID_KEY_IDX_SKIP); int bucket = 1; AcidOutputFormat.Options options = new AcidOutputFormat.Options(conf) .filesystem(fs) diff --git a/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/ITestDbTxnManager.java b/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/ITestDbTxnManager.java index e6806bdf7332..17328b1281e3 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/ITestDbTxnManager.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/ITestDbTxnManager.java @@ -50,7 +50,7 @@ public static void setupDb() throws Exception { .toLowerCase(); rule = getDatabaseRule(metastoreType).setVerbose(false); - conf.setVar(HiveConf.ConfVars.METASTOREDBTYPE, metastoreType.toUpperCase()); + conf.setVar(HiveConf.ConfVars.METASTORE_DB_TYPE, metastoreType.toUpperCase()); MetastoreConf.setVar(conf, MetastoreConf.ConfVars.CONNECT_URL_KEY, rule.getJdbcUrl()); MetastoreConf.setVar(conf, MetastoreConf.ConfVars.CONNECTION_DRIVER, rule.getJdbcDriver()); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java b/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java index dc20b552c1af..f7a02cc76746 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java @@ -502,7 +502,7 @@ public void testMetastoreTablesCleanup() throws Exception { Assert.assertEquals(5, count); // Fail some inserts, so that we have records in TXN_COMPONENTS - conf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEROLLBACKTXN, true); + conf.setBoolVar(HiveConf.ConfVars.HIVE_TEST_MODE_ROLLBACK_TXN, true); driver.run("insert into temp.T10 values (9, 9)"); driver.run("insert into temp.T11 values (10, 10)"); driver.run("insert into temp.T12p partition (ds='today', hour='1') values (11, 11)"); @@ -510,7 +510,7 @@ public void testMetastoreTablesCleanup() throws Exception { count = TestTxnDbUtil.countQueryAgent(conf, "select count(*) from \"TXN_COMPONENTS\" " + "where \"TC_DATABASE\"='temp' and \"TC_TABLE\" in ('t10', 't11', 't12p', 't13p')"); Assert.assertEquals(4, count); - conf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEROLLBACKTXN, false); + conf.setBoolVar(HiveConf.ConfVars.HIVE_TEST_MODE_ROLLBACK_TXN, false); // Drop a table/partition; corresponding records in TXN_COMPONENTS and COMPLETED_TXN_COMPONENTS should disappear count = TestTxnDbUtil.countQueryAgent(conf, "select count(*) from \"TXN_COMPONENTS\" " + @@ -580,7 +580,7 @@ public void testMetastoreTablesCleanup() throws Exception { // Tables need at least 2 delta files to compact, and minor compaction was just run, so insert driver.run("insert into temp.T11 values (14, 14)"); driver.run("insert into temp.T12p partition (ds='tomorrow', hour='2') values (15, 15)"); - conf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEFAILCOMPACTION, true); + conf.setBoolVar(HiveConf.ConfVars.HIVE_TEST_MODE_FAIL_COMPACTION, true); driver.run("alter table temp.T11 compact 'major'"); count = TestTxnDbUtil.countQueryAgent(conf, "select count(*) from \"COMPACTION_QUEUE\" " + "where \"CQ_DATABASE\"='temp' and \"CQ_TABLE\"='t11' and \"CQ_STATE\"='i' and \"CQ_TYPE\"='a'"); @@ -606,7 +606,7 @@ public void testMetastoreTablesCleanup() throws Exception { count = TestTxnDbUtil.countQueryAgent(conf, "select count(*) from \"COMPLETED_COMPACTIONS\" " + "where \"CC_DATABASE\"='temp' and \"CC_TABLE\"='t12p' and \"CC_STATE\"='f' and \"CC_TYPE\"='a'"); Assert.assertEquals(1, count); - conf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEFAILCOMPACTION, false); + conf.setBoolVar(HiveConf.ConfVars.HIVE_TEST_MODE_FAIL_COMPACTION, false); // Put 2 records into COMPACTION_QUEUE and do nothing driver.run("alter table temp.T11 compact 'major'"); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java b/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java index 33171ebb0cb2..f823b324b0c5 100755 --- a/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java @@ -943,8 +943,8 @@ public void testHiveRefreshOnConfChange() throws Throwable{ prevHiveObj.getDatabaseCurrent(); //change value of a metavar config param in new hive conf newHconf = new HiveConf(hiveConf); - newHconf.setIntVar(ConfVars.METASTORETHRIFTCONNECTIONRETRIES, - newHconf.getIntVar(ConfVars.METASTORETHRIFTCONNECTIONRETRIES) + 1); + newHconf.setIntVar(ConfVars.METASTORE_THRIFT_CONNECTION_RETRIES, + newHconf.getIntVar(ConfVars.METASTORE_THRIFT_CONNECTION_RETRIES) + 1); newHiveObj = Hive.get(newHconf); assertTrue(prevHiveObj != newHiveObj); } diff --git a/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestSessionHiveMetastoreClientListPartitionsTempTable.java b/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestSessionHiveMetastoreClientListPartitionsTempTable.java index 1c26899ce21e..09d9dc2f5327 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestSessionHiveMetastoreClientListPartitionsTempTable.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestSessionHiveMetastoreClientListPartitionsTempTable.java @@ -260,7 +260,7 @@ private void checkPartitionNames(List expected, short numParts, String o @Test public void testListPartitionNames() throws Exception { Table t = createTable4PartColsParts(getClient()).table; - String defaultPartitionName = HiveConf.getVar(conf, HiveConf.ConfVars.DEFAULTPARTITIONNAME); + String defaultPartitionName = HiveConf.getVar(conf, HiveConf.ConfVars.DEFAULT_PARTITION_NAME); List> testValues = Lists.newArrayList( Lists.newArrayList("1999", defaultPartitionName, "02"), Lists.newArrayList(defaultPartitionName, "02", "10"), diff --git a/ql/src/test/org/apache/hadoop/hive/ql/optimizer/physical/TestNullScanTaskDispatcher.java b/ql/src/test/org/apache/hadoop/hive/ql/optimizer/physical/TestNullScanTaskDispatcher.java index c9fc2a54edd6..07eabd171f55 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/optimizer/physical/TestNullScanTaskDispatcher.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/optimizer/physical/TestNullScanTaskDispatcher.java @@ -85,7 +85,7 @@ public class TestNullScanTaskDispatcher { public void setup() { hiveConf = new HiveConf(); hiveConf.set("fs.mock.impl", MockFileSystem.class.getName()); - hiveConf.setBoolVar(HiveConf.ConfVars.HIVEMETADATAONLYQUERIES, true); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_METADATA_ONLY_QUERIES, true); sessionState = SessionState.start(hiveConf); parseContext = spy(new ParseContext()); context = new Context(hiveConf); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/parse/TestDMLSemanticAnalyzer.java b/ql/src/test/org/apache/hadoop/hive/ql/parse/TestDMLSemanticAnalyzer.java index ac5795295158..211c80aa662b 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/parse/TestDMLSemanticAnalyzer.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/parse/TestDMLSemanticAnalyzer.java @@ -232,7 +232,7 @@ public void setup() throws Exception { conf .setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER, "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory"); - conf.setVar(HiveConf.ConfVars.HIVEMAPREDMODE, "nonstrict"); + conf.setVar(HiveConf.ConfVars.HIVE_MAPRED_MODE, "nonstrict"); conf.setVar(HiveConf.ConfVars.HIVE_TXN_MANAGER, "org.apache.hadoop.hive.ql.lockmgr.DbTxnManager"); conf.setBoolVar(HiveConf.ConfVars.HIVE_IN_TEST, true); conf.set(ValidTxnList.VALID_TXNS_KEY, diff --git a/ql/src/test/org/apache/hadoop/hive/ql/plan/TestConditionalResolverCommonJoin.java b/ql/src/test/org/apache/hadoop/hive/ql/plan/TestConditionalResolverCommonJoin.java index 780fb2a58e22..56c30c103bb2 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/plan/TestConditionalResolverCommonJoin.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/plan/TestConditionalResolverCommonJoin.java @@ -26,7 +26,6 @@ import org.apache.hadoop.hive.ql.exec.Task; import org.junit.Test; -import java.io.Serializable; import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; @@ -70,19 +69,19 @@ public void testResolvingDriverAlias() throws Exception { ctx.setAliasToKnownSize(aliasToKnownSize); HiveConf conf = new HiveConf(); - conf.setLongVar(HiveConf.ConfVars.HIVESMALLTABLESFILESIZE, 4096); + conf.setLongVar(HiveConf.ConfVars.HIVE_SMALL_TABLES_FILESIZE, 4096); // alias3 only can be selected Task resolved = resolver.resolveMapJoinTask(ctx, conf); Assert.assertEquals("alias3", resolved.getId()); - conf.setLongVar(HiveConf.ConfVars.HIVESMALLTABLESFILESIZE, 65536); + conf.setLongVar(HiveConf.ConfVars.HIVE_SMALL_TABLES_FILESIZE, 65536); // alias1, alias2, alias3 all can be selected but overriden by biggest one (alias3) resolved = resolver.resolveMapJoinTask(ctx, conf); Assert.assertEquals("alias3", resolved.getId()); - conf.setLongVar(HiveConf.ConfVars.HIVESMALLTABLESFILESIZE, 2048); + conf.setLongVar(HiveConf.ConfVars.HIVE_SMALL_TABLES_FILESIZE, 2048); // not selected resolved = resolver.resolveMapJoinTask(ctx, conf); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/plan/mapping/TestCounterMapping.java b/ql/src/test/org/apache/hadoop/hive/ql/plan/mapping/TestCounterMapping.java index 3a50356cf8f0..66f508e215ac 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/plan/mapping/TestCounterMapping.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/plan/mapping/TestCounterMapping.java @@ -148,7 +148,7 @@ public void testInConversion() throws ParseException, CommandProcessorException "explain select sum(id_uv) from tu where u in (1,2) group by u"; HiveConf conf = env_setup.getTestCtx().hiveConf; - conf.setIntVar(ConfVars.HIVEPOINTLOOKUPOPTIMIZERMIN, 10); + conf.setIntVar(ConfVars.HIVE_POINT_LOOKUP_OPTIMIZER_MIN, 10); IDriver driver = createDriver(); PlanMapper pm = getMapperForQuery(driver, query); @@ -229,7 +229,7 @@ private static IDriver createDriver() { "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory"); // conf.setVar(HiveConf.ConfVars.SEMANTIC_ANALYZER_HOOK, CheckInputReadEntityDirect.class.getName()); HiveConf.setBoolVar(conf, HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY, false); - HiveConf.setVar(conf, HiveConf.ConfVars.POSTEXECHOOKS, OperatorStatsReaderHook.class.getName()); + HiveConf.setVar(conf, HiveConf.ConfVars.POST_EXEC_HOOKS, OperatorStatsReaderHook.class.getName()); SessionState.start(conf); IDriver driver = DriverFactory.newDriver(conf); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/plan/mapping/TestOperatorCmp.java b/ql/src/test/org/apache/hadoop/hive/ql/plan/mapping/TestOperatorCmp.java index e5fcc3a0d762..dbe7e967f374 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/plan/mapping/TestOperatorCmp.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/plan/mapping/TestOperatorCmp.java @@ -190,7 +190,7 @@ private boolean compareOperators(Operator opL, Operator opR) { private static IDriver createDriver() { HiveConf conf = env_setup.getTestCtx().hiveConf; - conf.setBoolVar(ConfVars.HIVEOPTPPD, false); + conf.setBoolVar(ConfVars.HIVE_OPT_PPD, false); conf.setBoolVar(ConfVars.HIVE_QUERY_REEXECUTION_ENABLED, true); conf.setBoolVar(ConfVars.HIVE_VECTORIZATION_ENABLED, false); conf.setBoolVar(ConfVars.HIVE_QUERY_REEXECUTION_ALWAYS_COLLECT_OPERATOR_STATS, true); @@ -202,7 +202,7 @@ private static IDriver createDriver() { conf.setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER, "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory"); HiveConf.setBoolVar(conf, HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY, false); - HiveConf.setVar(conf, HiveConf.ConfVars.POSTEXECHOOKS, OperatorStatsReaderHook.class.getName()); + HiveConf.setVar(conf, HiveConf.ConfVars.POST_EXEC_HOOKS, OperatorStatsReaderHook.class.getName()); SessionState.start(conf); IDriver driver = DriverFactory.newDriver(conf); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/plan/mapping/TestReOptimization.java b/ql/src/test/org/apache/hadoop/hive/ql/plan/mapping/TestReOptimization.java index b67385737ef1..43e6a820f020 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/plan/mapping/TestReOptimization.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/plan/mapping/TestReOptimization.java @@ -303,7 +303,7 @@ private static IDriver createDriver(String strategies) { conf.setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER, "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory"); HiveConf.setBoolVar(conf, HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY, false); - HiveConf.setVar(conf, HiveConf.ConfVars.POSTEXECHOOKS, OperatorStatsReaderHook.class.getName()); + HiveConf.setVar(conf, HiveConf.ConfVars.POST_EXEC_HOOKS, OperatorStatsReaderHook.class.getName()); SessionState.start(conf); IDriver driver = DriverFactory.newDriver(conf); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/processors/TestSetProcessor.java b/ql/src/test/org/apache/hadoop/hive/ql/processors/TestSetProcessor.java index ddbbef0b0134..8a993686a690 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/processors/TestSetProcessor.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/processors/TestSetProcessor.java @@ -65,13 +65,13 @@ public void setupTest() throws Exception { public void testHiddenConfig() throws Exception { runSetProcessor(""); String output = baos.toString(); - Assert.assertFalse(output.contains(HiveConf.ConfVars.METASTOREPWD.varname + "=")); + Assert.assertFalse(output.contains(HiveConf.ConfVars.METASTORE_PWD.varname + "=")); Assert.assertFalse(output.contains(HiveConf.ConfVars.HIVE_SERVER2_SSL_KEYSTORE_PASSWORD.varname + "=")); } @Test public void testHiddenConfigSetVarName() throws CommandProcessorException { - runSetProcessor(HiveConf.ConfVars.METASTOREPWD.varname); + runSetProcessor(HiveConf.ConfVars.METASTORE_PWD.varname); String output = baos.toString(); Assert.assertTrue(output.contains("hidden")); } diff --git a/ql/src/test/org/apache/hadoop/hive/ql/security/authorization/plugin/TestHivePrivilegeObjectOwnerNameAndType.java b/ql/src/test/org/apache/hadoop/hive/ql/security/authorization/plugin/TestHivePrivilegeObjectOwnerNameAndType.java index 410d9b7d9184..0f762da0abf0 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/security/authorization/plugin/TestHivePrivilegeObjectOwnerNameAndType.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/security/authorization/plugin/TestHivePrivilegeObjectOwnerNameAndType.java @@ -77,8 +77,8 @@ public static void beforeTest() throws Exception { conf.setBoolVar(ConfVars.HIVE_SERVER2_ENABLE_DOAS, false); conf.setBoolVar(ConfVars.HIVE_SUPPORT_CONCURRENCY, true); conf.setVar(ConfVars.HIVE_TXN_MANAGER, DbTxnManager.class.getName()); - conf.setVar(ConfVars.HIVEMAPREDMODE, "nonstrict"); - conf.setVar(ConfVars.DYNAMICPARTITIONINGMODE, "nonstrict"); + conf.setVar(ConfVars.HIVE_MAPRED_MODE, "nonstrict"); + conf.setVar(ConfVars.DYNAMIC_PARTITIONING_MODE, "nonstrict"); TestTxnDbUtil.prepDb(conf); SessionState.start(conf); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/TestSQLStdHiveAccessControllerCLI.java b/ql/src/test/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/TestSQLStdHiveAccessControllerCLI.java index 3a59ad54c4a0..c91622611954 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/TestSQLStdHiveAccessControllerCLI.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/TestSQLStdHiveAccessControllerCLI.java @@ -53,7 +53,7 @@ processedConf, new HadoopDefaultAuthenticator(), getCLISessionCtx() // check that hook to disable transforms has not been added assertFalse("Check for transform query disabling hook", - processedConf.getVar(ConfVars.PREEXECHOOKS).contains(DisallowTransformHook.class.getName())); + processedConf.getVar(ConfVars.PRE_EXEC_HOOKS).contains(DisallowTransformHook.class.getName())); // verify that some dummy param can be set processedConf.verifyAndSet("dummy.param", "dummy.val"); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/TestSQLStdHiveAccessControllerHS2.java b/ql/src/test/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/TestSQLStdHiveAccessControllerHS2.java index b087d3beab26..1fc3663e75bd 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/TestSQLStdHiveAccessControllerHS2.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/TestSQLStdHiveAccessControllerHS2.java @@ -62,7 +62,7 @@ public void testConfigProcessing() throws HiveAuthzPluginException, SecurityExce // check that hook to disable transforms has been added assertTrue("Check for transform query disabling hook", - processedConf.getVar(ConfVars.PREEXECHOOKS).contains(DisallowTransformHook.class.getName())); + processedConf.getVar(ConfVars.PRE_EXEC_HOOKS).contains(DisallowTransformHook.class.getName())); List settableParams = getSettableParams(); verifyParamSettability(settableParams, processedConf); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/session/TestSessionState.java b/ql/src/test/org/apache/hadoop/hive/ql/session/TestSessionState.java index 4c374e8d418a..9f93f096d382 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/session/TestSessionState.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/session/TestSessionState.java @@ -24,7 +24,6 @@ import java.io.File; import java.io.IOException; -import java.lang.reflect.Constructor; import java.lang.reflect.Field; import java.lang.reflect.Method; import java.util.Arrays; @@ -205,7 +204,7 @@ private void generateRefreshJarFiles(String version) throws IOException, Interru @Test public void testReloadAuxJars2() { HiveConf conf = new HiveConf(); - HiveConf.setVar(conf, ConfVars.HIVERELOADABLEJARS, hiveReloadPath); + HiveConf.setVar(conf, ConfVars.HIVE_RELOADABLE_JARS, hiveReloadPath); SessionState ss = new SessionState(conf); SessionState.start(ss); @@ -275,7 +274,7 @@ public void testReflectionCleanup() throws Exception { @Test public void testReloadExistingAuxJars2() { HiveConf conf = new HiveConf(); - HiveConf.setVar(conf, ConfVars.HIVERELOADABLEJARS, hiveReloadPath); + HiveConf.setVar(conf, ConfVars.HIVE_RELOADABLE_JARS, hiveReloadPath); SessionState ss = new SessionState(conf); SessionState.start(ss); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/stats/TestStatsUpdaterThread.java b/ql/src/test/org/apache/hadoop/hive/ql/stats/TestStatsUpdaterThread.java index add1b0b82bca..e2bfa9763b7f 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/stats/TestStatsUpdaterThread.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/stats/TestStatsUpdaterThread.java @@ -74,10 +74,10 @@ String getTestDataDir() { @Before public void setUp() throws Exception { this.hiveConf = new HiveConf(TestStatsUpdaterThread.class); - hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); - hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); - hiveConf.set(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, getTestDataDir()); - hiveConf.setVar(HiveConf.ConfVars.HIVEINPUTFORMAT, HiveInputFormat.class.getName()); + hiveConf.set(HiveConf.ConfVars.PRE_EXEC_HOOKS.varname, ""); + hiveConf.set(HiveConf.ConfVars.POST_EXEC_HOOKS.varname, ""); + hiveConf.set(HiveConf.ConfVars.METASTORE_WAREHOUSE.varname, getTestDataDir()); + hiveConf.setVar(HiveConf.ConfVars.HIVE_INPUT_FORMAT, HiveInputFormat.class.getName()); hiveConf.setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER, "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory"); // hiveConf.setBoolVar(HiveConf.ConfVars.MERGE_CARDINALITY_VIOLATION_CHECK, true); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/util/TestHiveStrictManagedMigration.java b/ql/src/test/org/apache/hadoop/hive/ql/util/TestHiveStrictManagedMigration.java index 386a90fd9445..a10a6aa30f7a 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/util/TestHiveStrictManagedMigration.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/util/TestHiveStrictManagedMigration.java @@ -84,7 +84,7 @@ public void testUpgrade() throws Exception { "--oldWarehouseRoot", oldWarehouse}; HiveConf newConf = new HiveConf(hiveConf); File newWarehouseDir = new File(getTestDataDir(), "newWarehouse"); - newConf.set(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, newWarehouseDir.getAbsolutePath()); + newConf.set(HiveConf.ConfVars.METASTORE_WAREHOUSE.varname, newWarehouseDir.getAbsolutePath()); newConf.set("strict.managed.tables.migration.owner", System.getProperty("user.name")); runMigrationTool(newConf, args); @@ -124,7 +124,7 @@ public void testExternalMove() throws Exception { HiveConf newConf = new HiveConf(hiveConf); File newManagedWarehouseDir = new File(getTestDataDir(), "newManaged"); File newExtWarehouseDir = new File(getTestDataDir(), "newExternal"); - newConf.set(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, newManagedWarehouseDir.getAbsolutePath()); + newConf.set(HiveConf.ConfVars.METASTORE_WAREHOUSE.varname, newManagedWarehouseDir.getAbsolutePath()); newConf.set(HiveConf.ConfVars.HIVE_METASTORE_WAREHOUSE_EXTERNAL.varname, newExtWarehouseDir.getAbsolutePath()); runMigrationTool(newConf, args); Assert.assertTrue(newExtWarehouseDir.exists()); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/util/TestUpgradeTool.java b/ql/src/test/org/apache/hadoop/hive/ql/util/TestUpgradeTool.java index 7974da839123..4f24454056b7 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/util/TestUpgradeTool.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/util/TestUpgradeTool.java @@ -47,7 +47,7 @@ protected String getTestDataDir() { public void testPostUpgrade() throws Exception { int[][] data = {{1, 2}, {3, 4}, {5, 6}}; int[][] dataPart = {{1, 2, 10}, {3, 4, 11}, {5, 6, 12}}; - hiveConf.setVar(HiveConf.ConfVars.DYNAMICPARTITIONINGMODE, "dynamic"); + hiveConf.setVar(HiveConf.ConfVars.DYNAMIC_PARTITIONING_MODE, "dynamic"); runStatementOnDriver("drop table if exists TAcid"); runStatementOnDriver("drop table if exists TAcidPart"); runStatementOnDriver("drop table if exists TFlat"); diff --git a/ql/src/test/results/clientnegative/groupby_cube_multi_gby.q.out b/ql/src/test/results/clientnegative/groupby_cube_multi_gby.q.out index 88b87bb25846..e2491900dd38 100644 --- a/ql/src/test/results/clientnegative/groupby_cube_multi_gby.q.out +++ b/ql/src/test/results/clientnegative/groupby_cube_multi_gby.q.out @@ -14,4 +14,4 @@ POSTHOOK: query: create table t2 like src POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@t2 -FAILED: SemanticException [Error 10315]: Grouping sets aggregations (with rollups or cubes) are not allowed when HIVEMULTIGROUPBYSINGLEREDUCER is turned on. Set hive.multigroupby.singlereducer=false if you want to use grouping sets +FAILED: SemanticException [Error 10315]: Grouping sets aggregations (with rollups or cubes) are not allowed when HIVE_MULTI_GROUPBY_SINGLE_REDUCER is turned on. Set hive.multigroupby.singlereducer=false if you want to use grouping sets diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/DefaultFetchFormatter.java b/serde/src/java/org/apache/hadoop/hive/serde2/DefaultFetchFormatter.java index 1512a0eea0e6..bd0e9f5ed27a 100644 --- a/serde/src/java/org/apache/hadoop/hive/serde2/DefaultFetchFormatter.java +++ b/serde/src/java/org/apache/hadoop/hive/serde2/DefaultFetchFormatter.java @@ -43,7 +43,7 @@ public void initialize(Configuration hconf, Properties props) throws SerDeExcept } private AbstractSerDe initializeSerde(Configuration conf, Properties props) throws SerDeException { - String serdeName = HiveConf.getVar(conf, HiveConf.ConfVars.HIVEFETCHOUTPUTSERDE); + String serdeName = HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_FETCH_OUTPUT_SERDE); Class serdeClass; try { serdeClass = diff --git a/service/src/java/org/apache/hive/service/cli/session/HiveSessionImpl.java b/service/src/java/org/apache/hive/service/cli/session/HiveSessionImpl.java index c14a041ba013..e4151a617cdd 100644 --- a/service/src/java/org/apache/hive/service/cli/session/HiveSessionImpl.java +++ b/service/src/java/org/apache/hive/service/cli/session/HiveSessionImpl.java @@ -139,7 +139,7 @@ public HiveSessionImpl(SessionHandle sessionHandle, TProtocolVersion protocol, this.operationLock = serverConf.getBoolVar( ConfVars.HIVE_SERVER2_PARALLEL_OPS_IN_SESSION) ? null : new Semaphore(1); // Set an explicit session name to control the download directory name - sessionConf.set(ConfVars.HIVESESSIONID.varname, + sessionConf.set(ConfVars.HIVE_SESSION_ID.varname, this.sessionHandle.getHandleIdentifier().toString()); // Use thrift transportable formatter sessionConf.set(SerDeUtils.LIST_SINK_OUTPUT_FORMATTER, ThriftFormatter.class.getName()); @@ -468,7 +468,7 @@ public String getPassword() { @Override public HiveConf getHiveConf() { - sessionConf.setVar(HiveConf.ConfVars.HIVEFETCHOUTPUTSERDE, FETCH_WORK_SERDE_CLASS); + sessionConf.setVar(HiveConf.ConfVars.HIVE_FETCH_OUTPUT_SERDE, FETCH_WORK_SERDE_CLASS); return sessionConf; } diff --git a/service/src/java/org/apache/hive/service/server/HiveServer2.java b/service/src/java/org/apache/hive/service/server/HiveServer2.java index e8eaab550aef..e1650e86f6be 100644 --- a/service/src/java/org/apache/hive/service/server/HiveServer2.java +++ b/service/src/java/org/apache/hive/service/server/HiveServer2.java @@ -52,7 +52,6 @@ import org.apache.curator.framework.api.CuratorEventType; import org.apache.curator.framework.recipes.leader.LeaderLatch; import org.apache.curator.framework.recipes.leader.LeaderLatchListener; -import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.common.JvmPauseMonitor; import org.apache.hadoop.hive.common.LogUtils; import org.apache.hadoop.hive.common.LogUtils.LogInitializationException; @@ -106,7 +105,6 @@ import org.apache.hive.service.ServiceException; import org.apache.hive.service.auth.AuthType; import org.apache.hive.service.auth.saml.HiveSaml2Client; -import org.apache.hive.service.auth.saml.HiveSamlUtils; import org.apache.hive.service.cli.CLIService; import org.apache.hive.service.cli.HiveSQLException; import org.apache.hive.service.cli.session.HiveSession; @@ -1044,7 +1042,7 @@ public static void scheduleClearDanglingScratchDir(HiveConf hiveConf, int initia .daemon(true) .build()); executor.scheduleAtFixedRate(new ClearDanglingScratchDir(false, false, false, - HiveConf.getVar(hiveConf, HiveConf.ConfVars.SCRATCHDIR), hiveConf), initialWaitInSec, + HiveConf.getVar(hiveConf, HiveConf.ConfVars.SCRATCH_DIR), hiveConf), initialWaitInSec, HiveConf.getTimeVar(hiveConf, ConfVars.HIVE_SERVER2_CLEAR_DANGLING_SCRATCH_DIR_INTERVAL, TimeUnit.SECONDS), TimeUnit.SECONDS); } diff --git a/service/src/test/org/apache/hive/service/cli/session/TestSessionCleanup.java b/service/src/test/org/apache/hive/service/cli/session/TestSessionCleanup.java index c19d97abe8f4..e1fbdfcdca3a 100644 --- a/service/src/test/org/apache/hive/service/cli/session/TestSessionCleanup.java +++ b/service/src/test/org/apache/hive/service/cli/session/TestSessionCleanup.java @@ -105,7 +105,7 @@ public void testTempSessionFileCleanup() throws Exception { private String[] getPipeoutFiles(HiveConf hiveConf) { File localScratchDir = new File( - hiveConf.getVar(HiveConf.ConfVars.LOCALSCRATCHDIR)); + hiveConf.getVar(HiveConf.ConfVars.LOCAL_SCRATCH_DIR)); String[] pipeoutFiles = localScratchDir.list(new FilenameFilter() { @Override public boolean accept(File dir, String name) { diff --git a/service/src/test/org/apache/hive/service/cli/session/TestSessionManagerMetrics.java b/service/src/test/org/apache/hive/service/cli/session/TestSessionManagerMetrics.java index a3356658c12e..7f31dce576fb 100644 --- a/service/src/test/org/apache/hive/service/cli/session/TestSessionManagerMetrics.java +++ b/service/src/test/org/apache/hive/service/cli/session/TestSessionManagerMetrics.java @@ -26,7 +26,6 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; -import com.fasterxml.jackson.databind.JsonNode; import org.apache.hadoop.hive.common.metrics.MetricsTestUtils; import org.apache.hadoop.hive.common.metrics.common.MetricsConstant; import org.apache.hadoop.hive.common.metrics.common.MetricsFactory; @@ -44,7 +43,6 @@ import org.apache.hive.service.cli.operation.MetadataOperation; import org.apache.hive.service.cli.operation.OperationManager; import org.apache.hive.service.rpc.thrift.TProtocolVersion; -import org.apache.hive.service.server.HiveServer2; import org.junit.Assert; import org.junit.Before; import org.junit.Test; @@ -76,7 +74,7 @@ public void setup() throws Exception { conf.setBoolVar(HiveConf.ConfVars.HIVE_SERVER2_METRICS_ENABLED, true); conf.setBoolVar(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY, false); conf.setVar(HiveConf.ConfVars.HIVE_METRICS_REPORTER, MetricsReporting.JSON_FILE.name() + "," + MetricsReporting.JMX.name()); - conf.setBoolVar(HiveConf.ConfVars.HIVEOPTIMIZEMETADATAQUERIES, false); + conf.setBoolVar(HiveConf.ConfVars.HIVE_OPTIMIZE_METADATA_QUERIES, false); //NOTES: If we enable operation log, SessionManager will delete operation logs directory on exit, //it maybe impact TestSessionCleanup, because they use the same location ConfVars.HIVE_SERVER2_LOGGING_OPERATION_LOG_LOCATION, // when we run testing in parallel on local machine with -DforkCount=x, it happen. diff --git a/service/src/test/org/apache/hive/service/server/TestHS2HttpServer.java b/service/src/test/org/apache/hive/service/server/TestHS2HttpServer.java index 6c50e8170901..a1e30d3bb5e9 100644 --- a/service/src/test/org/apache/hive/service/server/TestHS2HttpServer.java +++ b/service/src/test/org/apache/hive/service/server/TestHS2HttpServer.java @@ -76,7 +76,7 @@ public static void beforeTests() throws Exception { Integer.valueOf(ConfVars.HIVE_SERVER2_WEBUI_PORT.getDefaultValue())); apiBaseURL = "http://localhost:" + webUIPort + "/api/v1"; hiveConf = new HiveConf(); - hiveConf.set(ConfVars.METASTOREPWD.varname, metastorePasswd); + hiveConf.set(ConfVars.METASTORE_PWD.varname, metastorePasswd); hiveConf.set(ConfVars.HIVE_SERVER2_WEBUI_PORT.varname, webUIPort.toString()); hiveConf.setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER, "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory"); @@ -298,7 +298,7 @@ public void testConfStrippedFromWebUI() throws Exception { if (line.contains(metastorePasswd)) { pwdValFound = line; } - if (line.contains(ConfVars.METASTOREPWD.varname)) { + if (line.contains(ConfVars.METASTORE_PWD.varname)) { pwdKeyFound = line; } } diff --git a/service/src/test/org/apache/hive/service/server/TestHS2HttpServerPam.java b/service/src/test/org/apache/hive/service/server/TestHS2HttpServerPam.java index 04f66b4e5d73..92706af7033a 100644 --- a/service/src/test/org/apache/hive/service/server/TestHS2HttpServerPam.java +++ b/service/src/test/org/apache/hive/service/server/TestHS2HttpServerPam.java @@ -35,19 +35,8 @@ import org.junit.Test; import javax.security.sasl.AuthenticationException; -import java.io.BufferedInputStream; -import java.io.File; -import java.io.FileInputStream; -import java.io.FileOutputStream; -import java.io.IOException; import java.net.HttpURLConnection; import java.net.URL; -import java.security.KeyStore; -import java.security.KeyStoreException; -import java.security.NoSuchAlgorithmException; -import java.security.cert.Certificate; -import java.security.cert.CertificateException; -import java.security.cert.CertificateFactory; import java.util.HashMap; import java.util.Map; @@ -68,7 +57,7 @@ public static void beforeTests() throws Exception { MetaStoreTestUtils.findFreePortExcepting(Integer.valueOf(ConfVars.HIVE_SERVER2_WEBUI_PORT.getDefaultValue())); hiveConf = new HiveConf(); hiveConf.setBoolVar(ConfVars.HIVE_IN_TEST, true); - hiveConf.set(ConfVars.METASTOREPWD.varname, metastorePasswd); + hiveConf.set(ConfVars.METASTORE_PWD.varname, metastorePasswd); hiveConf.set(ConfVars.HIVE_SERVER2_WEBUI_PORT.varname, webUIPort.toString()); hiveConf.setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER, "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory"); diff --git a/service/src/test/org/apache/hive/service/server/TestHS2HttpServerPamConfiguration.java b/service/src/test/org/apache/hive/service/server/TestHS2HttpServerPamConfiguration.java index 3e2ad22bc15a..cf1fc236ce86 100644 --- a/service/src/test/org/apache/hive/service/server/TestHS2HttpServerPamConfiguration.java +++ b/service/src/test/org/apache/hive/service/server/TestHS2HttpServerPamConfiguration.java @@ -66,7 +66,7 @@ public static void beforeTests() throws Exception { hiveConf = new HiveConf(); hiveConf.setBoolVar(ConfVars.HIVE_SERVER2_WEBUI_USE_PAM, true); hiveConf.setBoolVar(ConfVars.HIVE_IN_TEST, false); - hiveConf.set(ConfVars.METASTOREPWD.varname, metastorePasswd); + hiveConf.set(ConfVars.METASTORE_PWD.varname, metastorePasswd); hiveConf.set(ConfVars.HIVE_SERVER2_WEBUI_PORT.varname, webUIPort.toString()); hiveConf.setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER, "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory"); diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java index a12b989c7307..53889e2b6f07 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java @@ -141,7 +141,7 @@ * * An implementation of RawStore that verifies the DummyJdoConnectionUrlHook has already been * applied when this class's setConf method is called, by checking that the value of the - * METASTORECONNECTURLKEY ConfVar has been updated. + * METASTORE_CONNECT_URL_KEY ConfVar has been updated. * * All non-void methods return default values. */ diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java index a6e510e5c4db..380863b716c6 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java @@ -1181,10 +1181,10 @@ public void testConcurrentAddNotifications() throws ExecutionException, Interrup */ // conf.setVar(HiveConf.ConfVars.METASTORE_CONNECTION_DRIVER, "com.mysql.jdbc.Driver"); -// conf.setVar(HiveConf.ConfVars.METASTORECONNECTURLKEY, +// conf.setVar(HiveConf.ConfVars.METASTORE_CONNECT_URL_KEY, // "jdbc:mysql://localhost:3306/metastore_db"); // conf.setVar(HiveConf.ConfVars.METASTORE_CONNECTION_USER_NAME, ""); -// conf.setVar(HiveConf.ConfVars.METASTOREPWD, ""); +// conf.setVar(HiveConf.ConfVars.METASTORE_PWD, ""); /* we have to add this one manually as for tests the db is initialized via the metastoreDiretSQL diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestRetriesInRetryingHMSHandler.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestRetriesInRetryingHMSHandler.java index 4c4905deb0aa..658677751c7d 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestRetriesInRetryingHMSHandler.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestRetriesInRetryingHMSHandler.java @@ -85,7 +85,7 @@ public void testNoRetryInit() throws MetaException { /* * If the init method in HMSHandler throws exception all the times it should be retried until - * HiveConf.ConfVars.HMSHANDLERATTEMPTS is reached before giving up + * HiveConf.ConfVars.HMS_HANDLER_ATTEMPTS is reached before giving up */ @Test public void testRetriesLimit() throws MetaException { diff --git a/streaming/src/java/org/apache/hive/streaming/AbstractRecordWriter.java b/streaming/src/java/org/apache/hive/streaming/AbstractRecordWriter.java index 6d78e70cffbb..73accd90b034 100644 --- a/streaming/src/java/org/apache/hive/streaming/AbstractRecordWriter.java +++ b/streaming/src/java/org/apache/hive/streaming/AbstractRecordWriter.java @@ -147,7 +147,7 @@ public void init(StreamingConnection conn, long minWriteId, long maxWriteId, this.curBatchMaxWriteId = maxWriteId; this.statementId = statementId; this.conf = conn.getHiveConf(); - this.defaultPartitionName = conf.getVar(HiveConf.ConfVars.DEFAULTPARTITIONNAME); + this.defaultPartitionName = conf.getVar(HiveConf.ConfVars.DEFAULT_PARTITION_NAME); this.table = conn.getTable(); String location = table.getSd().getLocation(); try { diff --git a/streaming/src/java/org/apache/hive/streaming/HiveStreamingConnection.java b/streaming/src/java/org/apache/hive/streaming/HiveStreamingConnection.java index a61beb55b8fd..41e356b93da3 100644 --- a/streaming/src/java/org/apache/hive/streaming/HiveStreamingConnection.java +++ b/streaming/src/java/org/apache/hive/streaming/HiveStreamingConnection.java @@ -827,7 +827,7 @@ private void overrideConfSettings(HiveConf conf) { setHiveConf(conf, HiveConf.ConfVars.HIVE_TXN_MANAGER, DbTxnManager.class.getName()); setHiveConf(conf, HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY, true); setHiveConf(conf, MetastoreConf.ConfVars.EXECUTE_SET_UGI.getHiveName()); - setHiveConf(conf, HiveConf.ConfVars.DYNAMICPARTITIONINGMODE, "nonstrict"); + setHiveConf(conf, HiveConf.ConfVars.DYNAMIC_PARTITIONING_MODE, "nonstrict"); if (streamingOptimizations) { setHiveConf(conf, HiveConf.ConfVars.HIVE_ORC_DELTA_STREAMING_OPTIMIZATIONS_ENABLED, true); } diff --git a/streaming/src/test/org/apache/hive/streaming/TestStreamingDynamicPartitioning.java b/streaming/src/test/org/apache/hive/streaming/TestStreamingDynamicPartitioning.java index c548ea7388a6..c8c7a8e26db0 100644 --- a/streaming/src/test/org/apache/hive/streaming/TestStreamingDynamicPartitioning.java +++ b/streaming/src/test/org/apache/hive/streaming/TestStreamingDynamicPartitioning.java @@ -513,7 +513,7 @@ public void testDPTwoLevelNonStringPartitionColumns() throws Exception { assertEquals("7\tfoo\t" + defaultPartitionName + "\t" + defaultPartitionName, res.get(6)); assertEquals("8\tbar\t" + defaultPartitionName + "\t12", res.get(7)); - defaultPartitionName = conf.getVar(HiveConf.ConfVars.DEFAULTPARTITIONNAME); + defaultPartitionName = conf.getVar(HiveConf.ConfVars.DEFAULT_PARTITION_NAME); res = queryTable(driver, "show partitions " + (dbName + "." + tblName)); assertEquals(5, res.size()); assertTrue(res.contains("year=2018/month=2")); @@ -573,7 +573,7 @@ public void testWriteBeforeBegin() throws Exception { assertTrue(exception.getMessage().equals("Transaction state is not OPEN. Missing beginTransaction?")); connection.close(); - String defaultPartitionName = conf.getVar(HiveConf.ConfVars.DEFAULTPARTITIONNAME); + String defaultPartitionName = conf.getVar(HiveConf.ConfVars.DEFAULT_PARTITION_NAME); List res = queryTable(driver, "select * from " + (dbName + "." + tblName) + " order by id"); assertEquals(2, res.size()); assertEquals("1\tfoo\tAsia\t" + defaultPartitionName, res.get(0)); @@ -707,7 +707,7 @@ public void testWriteAfterClose() throws Exception { assertNotNull(exception); assertTrue(exception.getMessage().endsWith("Streaming connection is closed already.")); - String defaultPartitionName = conf.getVar(HiveConf.ConfVars.DEFAULTPARTITIONNAME); + String defaultPartitionName = conf.getVar(HiveConf.ConfVars.DEFAULT_PARTITION_NAME); List res = queryTable(driver, "select * from " + (dbName + "." + tblName) + " order by id"); assertEquals(2, res.size()); assertEquals("1\tfoo\tAsia\t" + defaultPartitionName, res.get(0)); @@ -758,7 +758,7 @@ public void testWriteAfterAbort() throws Exception { } assertNotNull(exception); assertTrue(exception.getMessage().equals("Streaming connection is closed already.")); - String defaultPartitionName = conf.getVar(HiveConf.ConfVars.DEFAULTPARTITIONNAME); + String defaultPartitionName = conf.getVar(HiveConf.ConfVars.DEFAULT_PARTITION_NAME); List res = queryTable(driver, "select * from " + (dbName + "." + tblName) + " order by id"); assertEquals(3, res.size()); assertEquals("1\tfoo\tAsia\t" + defaultPartitionName, res.get(0));