*
* @return Connection object for conf
*/
public static Connection createConnection() throws IOException {
- Configuration conf = HBaseConfiguration.create();
- return createConnection(conf, null, AuthUtil.loginClient(conf));
+ return createConnection(HBaseConfiguration.create());
+ }
+
+ /**
+ * Create a new Connection instance using default HBaseConfiguration. Connection encapsulates all
+ * housekeeping for a connection to the cluster. All tables and interfaces created from returned
+ * connection share zookeeper connection, meta cache, and connections to region servers and
+ * masters.
+ * The caller is responsible for calling {@link Connection#close()} on the returned connection
+ * instance. Typical usage:
+ *
+ *
+ *
+ * @param connectionUri the connection uri for the hbase cluster
+ * @return Connection object for conf
+ */
+ public static Connection createConnection(URI connectionUri) throws IOException {
+ return createConnection(connectionUri, HBaseConfiguration.create());
}
/**
* Create a new Connection instance using the passed conf instance. Connection
* encapsulates all housekeeping for a connection to the cluster. All tables and interfaces
- * created from returned connection share zookeeper connection, meta cache, and connections to
- * region servers and masters.
+ * created from returned connection share zookeeper connection(if used), meta cache, and
+ * connections to region servers and masters.
* The caller is responsible for calling {@link Connection#close()} on the returned connection
* instance. Typical usage:
*
*
*
@@ -137,20 +152,41 @@ public static Connection createConnection(Configuration conf) throws IOException
/**
* Create a new Connection instance using the passed conf instance. Connection
* encapsulates all housekeeping for a connection to the cluster. All tables and interfaces
- * created from returned connection share zookeeper connection, meta cache, and connections to
- * region servers and masters.
+ * created from returned connection share zookeeper connection(if used), meta cache, and
+ * connections to region servers and masters.
* The caller is responsible for calling {@link Connection#close()} on the returned connection
* instance. Typical usage:
*
*
+ *
+ * @param connectionUri the connection uri for the hbase cluster
+ * @param conf configuration
+ * @return Connection object for conf
+ */
+ public static Connection createConnection(URI connectionUri, Configuration conf)
+ throws IOException {
+ return createConnection(connectionUri, conf, null, AuthUtil.loginClient(conf));
+ }
+
+ /**
+ * Create a new Connection instance using the passed conf instance. Connection
+ * encapsulates all housekeeping for a connection to the cluster. All tables and interfaces
+ * created from returned connection share zookeeper connection(if used), meta cache, and
+ * connections to region servers and masters.
+ * The caller is responsible for calling {@link Connection#close()} on the returned connection
+ * instance. Typical usage:
+ *
+ *
*
@@ -166,20 +202,42 @@ public static Connection createConnection(Configuration conf, ExecutorService po
/**
* Create a new Connection instance using the passed conf instance. Connection
* encapsulates all housekeeping for a connection to the cluster. All tables and interfaces
- * created from returned connection share zookeeper connection, meta cache, and connections to
- * region servers and masters.
+ * created from returned connection share zookeeper connection(if used), meta cache, and
+ * connections to region servers and masters.
* The caller is responsible for calling {@link Connection#close()} on the returned connection
* instance. Typical usage:
*
*
+ *
+ * @param connectionUri the connection uri for the hbase cluster
+ * @param conf configuration
+ * @param pool the thread pool to use for batch operations
+ * @return Connection object for conf
+ */
+ public static Connection createConnection(URI connectionUri, Configuration conf,
+ ExecutorService pool) throws IOException {
+ return createConnection(connectionUri, conf, pool, AuthUtil.loginClient(conf));
+ }
+
+ /**
+ * Create a new Connection instance using the passed conf instance. Connection
+ * encapsulates all housekeeping for a connection to the cluster. All tables and interfaces
+ * created from returned connection share zookeeper connection(if used), meta cache, and
+ * connections to region servers and masters.
+ * The caller is responsible for calling {@link Connection#close()} on the returned connection
+ * instance. Typical usage:
+ *
+ *
*
@@ -194,20 +252,42 @@ public static Connection createConnection(Configuration conf, User user) throws
/**
* Create a new Connection instance using the passed conf instance. Connection
* encapsulates all housekeeping for a connection to the cluster. All tables and interfaces
- * created from returned connection share zookeeper connection, meta cache, and connections to
- * region servers and masters.
+ * created from returned connection share zookeeper connection(if used), meta cache, and
+ * connections to region servers and masters.
* The caller is responsible for calling {@link Connection#close()} on the returned connection
* instance. Typical usage:
*
*
+ *
+ * @param connectionUri the connection uri for the hbase cluster
+ * @param conf configuration
+ * @param user the user the connection is for
+ * @return Connection object for conf
+ */
+ public static Connection createConnection(URI connectionUri, Configuration conf, User user)
+ throws IOException {
+ return createConnection(connectionUri, conf, null, user);
+ }
+
+ /**
+ * Create a new Connection instance using the passed conf instance. Connection
+ * encapsulates all housekeeping for a connection to the cluster. All tables and interfaces
+ * created from returned connection share zookeeper connection(if used), meta cache, and
+ * connections to region servers and masters.
+ * The caller is responsible for calling {@link Connection#close()} on the returned connection
+ * instance. Typical usage:
+ *
+ *
*
@@ -224,20 +304,43 @@ public static Connection createConnection(Configuration conf, ExecutorService po
/**
* Create a new Connection instance using the passed conf instance. Connection
* encapsulates all housekeeping for a connection to the cluster. All tables and interfaces
- * created from returned connection share zookeeper connection, meta cache, and connections to
- * region servers and masters.
+ * created from returned connection share zookeeper connection(if used), meta cache, and
+ * connections to region servers and masters.
* The caller is responsible for calling {@link Connection#close()} on the returned connection
* instance. Typical usage:
*
*
+ *
+ * @param connectionUri the connection uri for the hbase cluster
+ * @param conf configuration
+ * @param user the user the connection is for
+ * @param pool the thread pool to use for batch operations
+ * @return Connection object for conf
+ */
+ public static Connection createConnection(URI connectionUri, Configuration conf,
+ ExecutorService pool, User user) throws IOException {
+ return createConnection(connectionUri, conf, pool, user, Collections.emptyMap());
+ }
+
+ /**
+ * Create a new Connection instance using the passed conf instance. Connection
+ * encapsulates all housekeeping for a connection to the cluster. All tables and interfaces
+ * created from returned connection share zookeeper connection(if used), meta cache, and
+ * connections to region servers and masters.
+ * The caller is responsible for calling {@link Connection#close()} on the returned connection
+ * instance. Typical usage:
+ *
+ *
*
@@ -249,6 +352,37 @@ public static Connection createConnection(Configuration conf, ExecutorService po
*/
public static Connection createConnection(Configuration conf, ExecutorService pool,
final User user, Map connectionAttributes) throws IOException {
+ return createConnection(null, conf, pool, user, connectionAttributes);
+ }
+
+ /**
+ * Create a new Connection instance using the passed conf instance. Connection
+ * encapsulates all housekeeping for a connection to the cluster. All tables and interfaces
+ * created from returned connection share zookeeper connection(if used), meta cache, and
+ * connections to region servers and masters.
+ * The caller is responsible for calling {@link Connection#close()} on the returned connection
+ * instance. Typical usage:
+ *
+ *
+ *
+ * @param connectionUri the connection uri for the hbase cluster
+ * @param conf configuration
+ * @param user the user the connection is for
+ * @param pool the thread pool to use for batch operations
+ * @param connectionAttributes attributes to be sent along to server during connection establish
+ * @return Connection object for conf
+ */
+ public static Connection createConnection(URI connectionUri, Configuration conf,
+ ExecutorService pool, final User user, Map connectionAttributes)
+ throws IOException {
Class> clazz = conf.getClass(ConnectionUtils.HBASE_CLIENT_CONNECTION_IMPL,
ConnectionOverAsyncConnection.class, Connection.class);
if (clazz != ConnectionOverAsyncConnection.class) {
@@ -263,7 +397,7 @@ public static Connection createConnection(Configuration conf, ExecutorService po
throw new IOException(e);
}
} else {
- return FutureUtils.get(createAsyncConnection(conf, user, connectionAttributes))
+ return FutureUtils.get(createAsyncConnection(connectionUri, conf, user, connectionAttributes))
.toConnection();
}
}
@@ -277,6 +411,16 @@ public static CompletableFuture createAsyncConnection() {
return createAsyncConnection(HBaseConfiguration.create());
}
+ /**
+ * Call {@link #createAsyncConnection(URI, Configuration)} using default HBaseConfiguration.
+ * @param connectionUri the connection uri for the hbase cluster
+ * @see #createAsyncConnection(URI, Configuration)
+ * @return AsyncConnection object wrapped by CompletableFuture
+ */
+ public static CompletableFuture createAsyncConnection(URI connectionUri) {
+ return createAsyncConnection(connectionUri, HBaseConfiguration.create());
+ }
+
/**
* Call {@link #createAsyncConnection(Configuration, User)} using the given {@code conf} and a
* User object created by {@link UserProvider}. The given {@code conf} will also be used to
@@ -287,6 +431,21 @@ public static CompletableFuture createAsyncConnection() {
* @see UserProvider
*/
public static CompletableFuture createAsyncConnection(Configuration conf) {
+ return createAsyncConnection(null, conf);
+ }
+
+ /**
+ * Call {@link #createAsyncConnection(Configuration, User)} using the given {@code connectionUri},
+ * {@code conf} and a User object created by {@link UserProvider}. The given {@code conf} will
+ * also be used to initialize the {@link UserProvider}.
+ * @param connectionUri the connection uri for the hbase cluster
+ * @param conf configuration
+ * @return AsyncConnection object wrapped by CompletableFuture
+ * @see #createAsyncConnection(Configuration, User)
+ * @see UserProvider
+ */
+ public static CompletableFuture createAsyncConnection(URI connectionUri,
+ Configuration conf) {
User user;
try {
user = AuthUtil.loginClient(conf);
@@ -295,7 +454,7 @@ public static CompletableFuture createAsyncConnection(Configura
future.completeExceptionally(e);
return future;
}
- return createAsyncConnection(conf, user);
+ return createAsyncConnection(connectionUri, conf, user);
}
/**
@@ -315,7 +474,28 @@ public static CompletableFuture createAsyncConnection(Configura
*/
public static CompletableFuture createAsyncConnection(Configuration conf,
final User user) {
- return createAsyncConnection(conf, user, null);
+ return createAsyncConnection(null, conf, user);
+ }
+
+ /**
+ * Create a new AsyncConnection instance using the passed {@code connectionUri}, {@code conf} and
+ * {@code user}. AsyncConnection encapsulates all housekeeping for a connection to the cluster.
+ * All tables and interfaces created from returned connection share zookeeper connection(if used),
+ * meta cache, and connections to region servers and masters.
+ *
+ * The caller is responsible for calling {@link AsyncConnection#close()} on the returned
+ * connection instance.
+ *
+ * Usually you should only create one AsyncConnection instance in your code and use it everywhere
+ * as it is thread safe.
+ * @param connectionUri the connection uri for the hbase cluster
+ * @param conf configuration
+ * @param user the user the asynchronous connection is for
+ * @return AsyncConnection object wrapped by CompletableFuture
+ */
+ public static CompletableFuture createAsyncConnection(URI connectionUri,
+ Configuration conf, final User user) {
+ return createAsyncConnection(connectionUri, conf, user, null);
}
/**
@@ -336,9 +516,38 @@ public static CompletableFuture createAsyncConnection(Configura
*/
public static CompletableFuture createAsyncConnection(Configuration conf,
final User user, Map connectionAttributes) {
+ return createAsyncConnection(null, conf, user, connectionAttributes);
+ }
+
+ /**
+ * Create a new AsyncConnection instance using the passed {@code connectionUri}, {@code conf} and
+ * {@code user}. AsyncConnection encapsulates all housekeeping for a connection to the cluster.
+ * All tables and interfaces created from returned connection share zookeeper connection(if used),
+ * meta cache, and connections to region servers and masters.
+ *
+ * The caller is responsible for calling {@link AsyncConnection#close()} on the returned
+ * connection instance.
+ *
+ * Usually you should only create one AsyncConnection instance in your code and use it everywhere
+ * as it is thread safe.
+ * @param connectionUri the connection uri for the hbase cluster
+ * @param conf configuration
+ * @param user the user the asynchronous connection is for
+ * @param connectionAttributes attributes to be sent along to server during connection establish
+ * @return AsyncConnection object wrapped by CompletableFuture
+ */
+ public static CompletableFuture createAsyncConnection(URI connectionUri,
+ Configuration conf, final User user, Map connectionAttributes) {
return TraceUtil.tracedFuture(() -> {
+ ConnectionRegistry registry;
+ try {
+ registry = connectionUri != null
+ ? ConnectionRegistryFactory.create(connectionUri, conf, user)
+ : ConnectionRegistryFactory.create(conf, user);
+ } catch (Exception e) {
+ return FutureUtils.failedFuture(e);
+ }
CompletableFuture future = new CompletableFuture<>();
- ConnectionRegistry registry = ConnectionRegistryFactory.getRegistry(conf, user);
addListener(registry.getClusterId(), (clusterId, error) -> {
if (error != null) {
registry.close();
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistryFactory.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistryFactory.java
index 415d46397b8f..5eef2c5f93e1 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistryFactory.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistryFactory.java
@@ -17,27 +17,77 @@
*/
package org.apache.hadoop.hbase.client;
-import static org.apache.hadoop.hbase.HConstants.CLIENT_CONNECTION_REGISTRY_IMPL_CONF_KEY;
-
+import java.io.IOException;
+import java.net.URI;
+import java.util.ServiceLoader;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.hbase.util.ReflectionUtils;
import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableMap;
/**
- * Factory class to get the instance of configured connection registry.
+ * The entry point for creating a {@link ConnectionRegistry}.
*/
@InterfaceAudience.Private
final class ConnectionRegistryFactory {
+ private static final Logger LOG = LoggerFactory.getLogger(ConnectionRegistryFactory.class);
+
+ private static final ImmutableMap CREATORS;
+ static {
+ ImmutableMap.Builder builder = ImmutableMap.builder();
+ for (ConnectionRegistryURIFactory factory : ServiceLoader
+ .load(ConnectionRegistryURIFactory.class)) {
+ builder.put(factory.getScheme().toLowerCase(), factory);
+ }
+ // throw IllegalArgumentException if there are duplicated keys
+ CREATORS = builder.buildOrThrow();
+ }
+
private ConnectionRegistryFactory() {
}
- /** Returns The connection registry implementation to use. */
- static ConnectionRegistry getRegistry(Configuration conf, User user) {
+ /**
+ * Returns the connection registry implementation to use, for the given connection url
+ * {@code uri}.
+ *
+ * We use {@link ServiceLoader} to load different implementations, and use the scheme of the given
+ * {@code uri} to select. And if there is no protocol specified, or we can not find a
+ * {@link ConnectionRegistryURIFactory} implementation for the given scheme, we will fallback to
+ * use the old way to create the {@link ConnectionRegistry}. Notice that, if fallback happens, the
+ * specified connection url {@code uri} will not take effect, we will load all the related
+ * configurations from the given Configuration instance {@code conf}
+ */
+ static ConnectionRegistry create(URI uri, Configuration conf, User user) throws IOException {
+ if (StringUtils.isBlank(uri.getScheme())) {
+ LOG.warn("No scheme specified for {}, fallback to use old way", uri);
+ return create(conf, user);
+ }
+ ConnectionRegistryURIFactory creator = CREATORS.get(uri.getScheme().toLowerCase());
+ if (creator == null) {
+ LOG.warn("No creator registered for {}, fallback to use old way", uri);
+ return create(conf, user);
+ }
+ return creator.create(uri, conf, user);
+ }
+
+ /**
+ * Returns the connection registry implementation to use.
+ *
+ * This is used when we do not have a connection url, we will use the old way to load the
+ * connection registry, by checking the
+ * {@literal HConstants#CLIENT_CONNECTION_REGISTRY_IMPL_CONF_KEY} configuration.
+ */
+ static ConnectionRegistry create(Configuration conf, User user) {
Class extends ConnectionRegistry> clazz =
- conf.getClass(CLIENT_CONNECTION_REGISTRY_IMPL_CONF_KEY, RpcConnectionRegistry.class,
- ConnectionRegistry.class);
+ conf.getClass(HConstants.CLIENT_CONNECTION_REGISTRY_IMPL_CONF_KEY,
+ RpcConnectionRegistry.class, ConnectionRegistry.class);
return ReflectionUtils.newInstance(clazz, conf, user);
}
}
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistryURIFactory.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistryURIFactory.java
new file mode 100644
index 000000000000..ab2037a1c138
--- /dev/null
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistryURIFactory.java
@@ -0,0 +1,42 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import java.io.IOException;
+import java.net.URI;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.security.User;
+import org.apache.yetus.audience.InterfaceAudience;
+
+/**
+ * For creating different {@link ConnectionRegistry} implementation.
+ */
+@InterfaceAudience.Private
+public interface ConnectionRegistryURIFactory {
+
+ /**
+ * Instantiate the {@link ConnectionRegistry} using the given parameters.
+ */
+ ConnectionRegistry create(URI uri, Configuration conf, User user) throws IOException;
+
+ /**
+ * The scheme for this implementation. Used to register this URI factory to the
+ * {@link ConnectionRegistryFactory}.
+ */
+ String getScheme();
+}
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcConnectionRegistryCreator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcConnectionRegistryCreator.java
new file mode 100644
index 000000000000..cb2338b1429d
--- /dev/null
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcConnectionRegistryCreator.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import java.io.IOException;
+import java.net.URI;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.security.User;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Connection registry creator implementation for creating {@link RpcConnectionRegistry}.
+ */
+@InterfaceAudience.Private
+public class RpcConnectionRegistryCreator implements ConnectionRegistryURIFactory {
+
+ private static final Logger LOG = LoggerFactory.getLogger(RpcConnectionRegistryCreator.class);
+
+ @Override
+ public ConnectionRegistry create(URI uri, Configuration conf, User user) throws IOException {
+ assert getScheme().equals(uri.getScheme());
+ LOG.debug("connect to hbase cluster with rpc bootstrap servers='{}'", uri.getAuthority());
+ Configuration c = new Configuration(conf);
+ c.set(RpcConnectionRegistry.BOOTSTRAP_NODES, uri.getAuthority());
+ return new RpcConnectionRegistry(c, user);
+ }
+
+ @Override
+ public String getScheme() {
+ return "hbase+rpc";
+ }
+}
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKConnectionRegistryCreator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKConnectionRegistryCreator.java
new file mode 100644
index 000000000000..8aa51e04fe4d
--- /dev/null
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKConnectionRegistryCreator.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import java.io.IOException;
+import java.net.URI;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.security.User;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Connection registry creator implementation for creating {@link ZKConnectionRegistry}.
+ */
+@InterfaceAudience.Private
+public class ZKConnectionRegistryCreator implements ConnectionRegistryURIFactory {
+
+ private static final Logger LOG = LoggerFactory.getLogger(ZKConnectionRegistryCreator.class);
+
+ @Override
+ public ConnectionRegistry create(URI uri, Configuration conf, User user) throws IOException {
+ assert getScheme().equals(uri.getScheme());
+ LOG.debug("connect to hbase cluster with zk quorum='{}' and parent='{}'", uri.getAuthority(),
+ uri.getPath());
+ Configuration c = new Configuration(conf);
+ c.set(HConstants.CLIENT_ZOOKEEPER_QUORUM, uri.getAuthority());
+ c.set(HConstants.ZOOKEEPER_ZNODE_PARENT, uri.getPath());
+ return new ZKConnectionRegistry(c, user);
+ }
+
+ @Override
+ public String getScheme() {
+ return "hbase+zk";
+ }
+}
diff --git a/hbase-client/src/main/resources/META-INF/services/org.apache.hadoop.hbase.client.ConnectionRegistryURIFactory b/hbase-client/src/main/resources/META-INF/services/org.apache.hadoop.hbase.client.ConnectionRegistryURIFactory
new file mode 100644
index 000000000000..b25a569776f1
--- /dev/null
+++ b/hbase-client/src/main/resources/META-INF/services/org.apache.hadoop.hbase.client.ConnectionRegistryURIFactory
@@ -0,0 +1,17 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+org.apache.hadoop.hbase.client.RpcConnectionRegistryCreator
+org.apache.hadoop.hbase.client.ZKConnectionRegistryCreator
\ No newline at end of file
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestConnectionRegistryCreatorUriParsing.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestConnectionRegistryCreatorUriParsing.java
new file mode 100644
index 000000000000..4dabd894b5b4
--- /dev/null
+++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestConnectionRegistryCreatorUriParsing.java
@@ -0,0 +1,157 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertSame;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.mockConstruction;
+import static org.mockito.Mockito.mockStatic;
+
+import java.net.URI;
+import java.util.List;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.security.User;
+import org.apache.hadoop.hbase.testclassification.ClientTests;
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.apache.hadoop.hbase.util.ReflectionUtils;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.ClassRule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.mockito.ArgumentCaptor;
+import org.mockito.MockedConstruction;
+import org.mockito.MockedStatic;
+
+/**
+ * Make sure we can successfully parse the URI component
+ */
+@Category({ ClientTests.class, SmallTests.class })
+public class TestConnectionRegistryCreatorUriParsing {
+
+ @ClassRule
+ public static final HBaseClassTestRule CLASS_RULE =
+ HBaseClassTestRule.forClass(TestConnectionRegistryCreatorUriParsing.class);
+
+ private Configuration conf;
+
+ private User user;
+
+ private MockedConstruction mockedRpcRegistry;
+
+ private MockedConstruction mockedZkRegistry;
+
+ private MockedStatic mockedReflectionUtils;
+
+ private List> args;
+
+ @Before
+ public void setUp() {
+ conf = HBaseConfiguration.create();
+ user = mock(User.class);
+ args = null;
+ mockedRpcRegistry = mockConstruction(RpcConnectionRegistry.class, (mock, context) -> {
+ args = context.arguments();
+ });
+ mockedZkRegistry = mockConstruction(ZKConnectionRegistry.class, (mock, context) -> {
+ args = context.arguments();
+ });
+ mockedReflectionUtils = mockStatic(ReflectionUtils.class);
+ }
+
+ @After
+ public void tearDown() {
+ mockedRpcRegistry.closeOnDemand();
+ mockedZkRegistry.closeOnDemand();
+ mockedReflectionUtils.closeOnDemand();
+ }
+
+ @Test
+ public void testParseRpcSingle() throws Exception {
+ ConnectionRegistryFactory.create(new URI("hbase+rpc://server1:123"), conf, user);
+ assertEquals(1, mockedRpcRegistry.constructed().size());
+ assertSame(user, args.get(1));
+ Configuration conf = (Configuration) args.get(0);
+ assertEquals("server1:123", conf.get(RpcConnectionRegistry.BOOTSTRAP_NODES));
+ }
+
+ @Test
+ public void testParseRpcMultiple() throws Exception {
+ ConnectionRegistryFactory.create(new URI("hbase+rpc://server1:123,server2:456,server3:789"),
+ conf, user);
+ assertEquals(1, mockedRpcRegistry.constructed().size());
+ assertSame(user, args.get(1));
+ Configuration conf = (Configuration) args.get(0);
+ assertEquals("server1:123,server2:456,server3:789",
+ conf.get(RpcConnectionRegistry.BOOTSTRAP_NODES));
+ }
+
+ @Test
+ public void testParseZkSingle() throws Exception {
+ ConnectionRegistryFactory.create(new URI("hbase+zk://server1:123/root"), conf, user);
+ assertEquals(1, mockedZkRegistry.constructed().size());
+ assertSame(user, args.get(1));
+ Configuration conf = (Configuration) args.get(0);
+ assertEquals("server1:123", conf.get(HConstants.CLIENT_ZOOKEEPER_QUORUM));
+ assertEquals("/root", conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT));
+ }
+
+ @Test
+ public void testParseZkMultiple() throws Exception {
+ ConnectionRegistryFactory
+ .create(new URI("hbase+zk://server1:123,server2:456,server3:789/root/path"), conf, user);
+ assertEquals(1, mockedZkRegistry.constructed().size());
+ assertSame(user, args.get(1));
+ Configuration conf = (Configuration) args.get(0);
+ assertEquals("server1:123,server2:456,server3:789",
+ conf.get(HConstants.CLIENT_ZOOKEEPER_QUORUM));
+ assertEquals("/root/path", conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT));
+ }
+
+ @Test
+ public void testFallbackNoScheme() throws Exception {
+ conf.setClass(HConstants.CLIENT_CONNECTION_REGISTRY_IMPL_CONF_KEY, ZKConnectionRegistry.class,
+ ConnectionRegistry.class);
+ ConnectionRegistryFactory.create(new URI("server1:2181/path"), conf, user);
+ ArgumentCaptor> clazzCaptor = ArgumentCaptor.forClass(Class.class);
+ ArgumentCaptor