diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedListWithVisibility.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedListWithVisibility.java
index 38c91fcb37c3..25640ed294d5 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedListWithVisibility.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedListWithVisibility.java
@@ -275,7 +275,7 @@ public int runCopier(String outputDir) throws Exception {
}
job.getConfiguration().setBoolean("mapreduce.map.speculative", false);
job.getConfiguration().setBoolean("mapreduce.reduce.speculative", false);
- TableMapReduceUtil.initTableReducerJob(COMMON_TABLE_NAME, null, job, null, null, null, null);
+ TableMapReduceUtil.initTableReducerJob(COMMON_TABLE_NAME, null, job);
TableMapReduceUtil.addDependencyJars(job);
TableMapReduceUtil.addDependencyJarsForClasses(job.getConfiguration(),
AbstractHBaseTool.class);
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CopyTable.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CopyTable.java
index 273271b1867b..8564c105331e 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CopyTable.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CopyTable.java
@@ -181,8 +181,7 @@ public Job createSubmittableJob(String[] args) throws IOException {
}
} else {
initCopyTableMapperReducerJob(job, scan);
- TableMapReduceUtil.initTableReducerJob(dstTableName, null, job, null, peerAddress, null,
- null);
+ TableMapReduceUtil.initTableReducerJob(dstTableName, null, job, null, peerAddress);
}
return job;
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/SyncTable.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/SyncTable.java
index b8b0dceea381..c1cf132d0302 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/SyncTable.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/SyncTable.java
@@ -167,8 +167,7 @@ public Job createSubmittableJob(String[] args) throws IOException {
} else {
// No reducers. Just write straight to table. Call initTableReducerJob
// because it sets up the TableOutputFormat.
- TableMapReduceUtil.initTableReducerJob(targetTableName, null, job, null, targetZkCluster,
- null, null);
+ TableMapReduceUtil.initTableReducerJob(targetTableName, null, job, null, targetZkCluster);
// would be nice to add an option for bulk load instead
}
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
index f189767a7c76..a23393ff804c 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
@@ -549,7 +549,7 @@ public static void initTableReducerJob(String table, Class extends TableReduce
*/
public static void initTableReducerJob(String table, Class extends TableReducer> reducer,
Job job, Class partitioner) throws IOException {
- initTableReducerJob(table, reducer, job, partitioner, null, null, null);
+ initTableReducerJob(table, reducer, job, partitioner, null);
}
/**
@@ -569,15 +569,11 @@ public static void initTableReducerJob(String table, Class extends TableReduce
* <hbase.zookeeper.quorum>:<
* hbase.zookeeper.client.port>:<zookeeper.znode.parent>
*
such as server,server2,server3:2181:/hbase
.
- * @param serverClass redefined hbase.regionserver.class
- * @param serverImpl redefined hbase.regionserver.impl
* @throws IOException When determining the region count fails.
*/
public static void initTableReducerJob(String table, Class extends TableReducer> reducer,
- Job job, Class partitioner, String quorumAddress, String serverClass, String serverImpl)
- throws IOException {
- initTableReducerJob(table, reducer, job, partitioner, quorumAddress, serverClass, serverImpl,
- true);
+ Job job, Class partitioner, String quorumAddress) throws IOException {
+ initTableReducerJob(table, reducer, job, partitioner, quorumAddress, true);
}
/**
@@ -597,16 +593,13 @@ public static void initTableReducerJob(String table, Class extends TableReduce
* <hbase.zookeeper.quorum>:<
* hbase.zookeeper.client.port>:<zookeeper.znode.parent>
*
such as server,server2,server3:2181:/hbase
.
- * @param serverClass redefined hbase.regionserver.class
- * @param serverImpl redefined hbase.regionserver.impl
* @param addDependencyJars upload HBase jars and jars for any of the configured job classes via
* the distributed cache (tmpjars).
* @throws IOException When determining the region count fails.
*/
public static void initTableReducerJob(String table, Class extends TableReducer> reducer,
- Job job, Class partitioner, String quorumAddress, String serverClass, String serverImpl,
- boolean addDependencyJars) throws IOException {
-
+ Job job, Class partitioner, String quorumAddress, boolean addDependencyJars)
+ throws IOException {
Configuration conf = job.getConfiguration();
HBaseConfiguration.merge(conf, HBaseConfiguration.create(conf));
job.setOutputFormatClass(TableOutputFormat.class);
@@ -620,10 +613,6 @@ public static void initTableReducerJob(String table, Class extends TableReduce
ZKConfig.validateClusterKey(quorumAddress);
conf.set(TableOutputFormat.QUORUM_ADDRESS, quorumAddress);
}
- if (serverClass != null && serverImpl != null) {
- conf.set(TableOutputFormat.REGION_SERVER_CLASS, serverClass);
- conf.set(TableOutputFormat.REGION_SERVER_IMPL, serverImpl);
- }
job.setOutputKeyClass(ImmutableBytesWritable.class);
job.setOutputValueClass(Writable.class);
if (partitioner == HRegionPartitioner.class) {
@@ -643,6 +632,72 @@ public static void initTableReducerJob(String table, Class extends TableReduce
initCredentials(job);
}
+ /**
+ * Use this before submitting a TableReduce job. It will appropriately set up the JobConf.
+ * @param table The output table.
+ * @param reducer The reducer class to use.
+ * @param job The current job to adjust. Make sure the passed job is carrying all
+ * necessary HBase configuration.
+ * @param partitioner Partitioner to use. Pass null
to use default partitioner.
+ * @param quorumAddress Distant cluster to write to; default is null for output to the cluster
+ * that is designated in hbase-site.xml
. Set this String to the
+ * zookeeper ensemble of an alternate remote cluster when you would have the
+ * reduce write a cluster that is other than the default; e.g. copying tables
+ * between clusters, the source would be designated by
+ * hbase-site.xml
and this param would have the ensemble address
+ * of the remote cluster. The format to pass is particular. Pass
+ * <hbase.zookeeper.quorum>:<
+ * hbase.zookeeper.client.port>:<zookeeper.znode.parent>
+ *
such as server,server2,server3:2181:/hbase
.
+ * @param serverClass redefined hbase.regionserver.class
+ * @param serverImpl redefined hbase.regionserver.impl
+ * @throws IOException When determining the region count fails.
+ * @deprecated Since 2.5.9, 2.6.1, 2.7.0, will be removed in 4.0.0. The {@code serverClass} and
+ * {@code serverImpl} do not take effect any more, just use
+ * {@link #initTableReducerJob(String, Class, Job, Class, String)} instead.
+ * @see #initTableReducerJob(String, Class, Job, Class, String)
+ */
+ @Deprecated
+ public static void initTableReducerJob(String table, Class extends TableReducer> reducer,
+ Job job, Class partitioner, String quorumAddress, String serverClass, String serverImpl)
+ throws IOException {
+ initTableReducerJob(table, reducer, job, partitioner, quorumAddress);
+ }
+
+ /**
+ * Use this before submitting a TableReduce job. It will appropriately set up the JobConf.
+ * @param table The output table.
+ * @param reducer The reducer class to use.
+ * @param job The current job to adjust. Make sure the passed job is carrying all
+ * necessary HBase configuration.
+ * @param partitioner Partitioner to use. Pass null
to use default partitioner.
+ * @param quorumAddress Distant cluster to write to; default is null for output to the cluster
+ * that is designated in hbase-site.xml
. Set this String to
+ * the zookeeper ensemble of an alternate remote cluster when you would
+ * have the reduce write a cluster that is other than the default; e.g.
+ * copying tables between clusters, the source would be designated by
+ * hbase-site.xml
and this param would have the ensemble
+ * address of the remote cluster. The format to pass is particular. Pass
+ * <hbase.zookeeper.quorum>:<
+ * hbase.zookeeper.client.port>:<zookeeper.znode.parent>
+ *
such as server,server2,server3:2181:/hbase
.
+ * @param serverClass redefined hbase.regionserver.class
+ * @param serverImpl redefined hbase.regionserver.impl
+ * @param addDependencyJars upload HBase jars and jars for any of the configured job classes via
+ * the distributed cache (tmpjars).
+ * @throws IOException When determining the region count fails.
+ * @deprecated Since 2.5.9, 2.6.1, 2.7.0, will be removed in 4.0.0. The {@code serverClass} and
+ * {@code serverImpl} do not take effect any more, just use
+ * {@link #initTableReducerJob(String, Class, Job, Class, String, boolean)} instead.
+ * @see #initTableReducerJob(String, Class, Job, Class, String, boolean)
+ */
+ @Deprecated
+ public static void initTableReducerJob(String table, Class extends TableReducer> reducer,
+ Job job, Class partitioner, String quorumAddress, String serverClass, String serverImpl,
+ boolean addDependencyJars) throws IOException {
+ initTableReducerJob(table, reducer, job, partitioner, quorumAddress, addDependencyJars);
+ }
+
/**
* Ensures that the given number of reduce tasks for the given job configuration does not exceed
* the number of regions for the given table.
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java
index 17c6c0e45511..a8ec67c9b237 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java
@@ -66,16 +66,26 @@ public class TableOutputFormat extends OutputFormat implemen
* Optional job parameter to specify a peer cluster. Used specifying remote cluster when copying
* between hbase clusters (the source is picked up from hbase-site.xml
).
* @see TableMapReduceUtil#initTableReducerJob(String, Class, org.apache.hadoop.mapreduce.Job,
- * Class, String, String, String)
+ * Class, String)
*/
public static final String QUORUM_ADDRESS = OUTPUT_CONF_PREFIX + "quorum";
/** Optional job parameter to specify peer cluster's ZK client port */
public static final String QUORUM_PORT = OUTPUT_CONF_PREFIX + "quorum.port";
- /** Optional specification of the rs class name of the peer cluster */
+ /**
+ * Optional specification of the rs class name of the peer cluster.
+ * @deprecated Since 2.5.9, 2.6.1 and 2.7.0, will be removed in 4.0.0. Does not take effect from
+ * long ago, see HBASE-6044.
+ */
+ @Deprecated
public static final String REGION_SERVER_CLASS = OUTPUT_CONF_PREFIX + "rs.class";
- /** Optional specification of the rs impl name of the peer cluster */
+ /**
+ * Optional specification of the rs impl name of the peer cluster
+ * @deprecated Since 2.5.9, 2.6.1 and 2.7.0, will be removed in 4.0.0. Does not take effect from
+ * long ago, see HBASE-6044.
+ */
+ @Deprecated
public static final String REGION_SERVER_IMPL = OUTPUT_CONF_PREFIX + "rs.impl";
/** The configuration. */
@@ -208,15 +218,9 @@ public void setConf(Configuration otherConf) {
String address = otherConf.get(QUORUM_ADDRESS);
int zkClientPort = otherConf.getInt(QUORUM_PORT, 0);
- String serverClass = otherConf.get(REGION_SERVER_CLASS);
- String serverImpl = otherConf.get(REGION_SERVER_IMPL);
try {
this.conf = HBaseConfiguration.createClusterConf(otherConf, address, OUTPUT_CONF_PREFIX);
-
- if (serverClass != null) {
- this.conf.set(HConstants.REGION_SERVER_IMPL, serverImpl);
- }
if (zkClientPort != 0) {
this.conf.setInt(HConstants.ZOOKEEPER_CLIENT_PORT, zkClientPort);
}