From 28f36f4619aab268687577f5b9a48309ce483902 Mon Sep 17 00:00:00 2001 From: Bharath Vissapragada Date: Tue, 29 Jun 2021 19:32:13 -0700 Subject: [PATCH] HBASE-26021: Undo the incompatible serialization change in HBASE-7767 (#3435) Revert "HBASE-7767 Get rid of ZKTable, and table enable/disable state in ZK (Andrey Stepachev)" Fixes the ConnectionRegistry and dependencies due to the revert. Signed-off-by: Viraj Jasani Signed-off-by: Andrew Purtell --- .../hbase/client/ConnectionAdapter.java | 5 - .../hbase/client/ConnectionManager.java | 26 +- .../hbase/client/ConnectionRegistry.java | 6 + .../hadoop/hbase/client/HConnection.java | 7 - .../hadoop/hbase/client/MasterRegistry.java | 20 +- .../hadoop/hbase/client/TableState.java | 205 - .../hbase/client/ZKConnectionRegistry.java | 21 + .../hbase/protobuf/RequestConverter.java | 16 +- .../ZKTableStateClientSideReader.java | 204 + .../hbase/zookeeper/ZooKeeperWatcher.java | 1 - .../hadoop/hbase/client/TestAsyncProcess.java | 5 + .../hbase/client/TestClientNoCluster.java | 6 + .../TestZKTableStateClientSideReader.java | 52 + .../hbase/protobuf/generated/HBaseProtos.java | 1771 +- .../protobuf/generated/MasterProtos.java | 19400 ++++++++-------- .../protobuf/generated/ZooKeeperProtos.java | 213 +- hbase-protocol/src/main/protobuf/HBase.proto | 21 - hbase-protocol/src/main/protobuf/Master.proto | 27 +- .../src/main/protobuf/ZooKeeper.proto | 3 +- .../hbase/rsgroup/RSGroupAdminServer.java | 6 +- .../hbase/rsgroup/RSGroupInfoManagerImpl.java | 8 +- .../hadoop/hbase/CoordinatedStateManager.java | 8 + .../apache/hadoop/hbase/TableDescriptor.java | 158 - .../apache/hadoop/hbase/TableDescriptors.java | 25 - .../hadoop/hbase/TableStateManager.java | 121 + .../BaseCoordinatedStateManager.java | 5 + .../ZkCoordinatedStateManager.java | 14 + .../ZkOpenRegionCoordination.java | 4 +- .../hbase/master/AssignmentManager.java | 114 +- .../apache/hadoop/hbase/master/HMaster.java | 36 +- .../hadoop/hbase/master/MasterFileSystem.java | 1 + .../hbase/master/MasterRpcServices.java | 40 +- .../hadoop/hbase/master/MasterServices.java | 5 - .../hadoop/hbase/master/RegionStates.java | 13 +- .../hbase/master/TableNamespaceManager.java | 9 +- .../hbase/master/TableStateManager.java | 219 - .../master/handler/ClosedRegionHandler.java | 5 +- .../master/handler/CreateTableHandler.java | 84 +- .../master/handler/DisableTableHandler.java | 30 +- .../master/handler/EnableTableHandler.java | 45 +- .../master/handler/TableEventHandler.java | 13 +- .../procedure/AddColumnFamilyProcedure.java | 4 +- .../procedure/CreateTableProcedure.java | 15 +- .../DeleteColumnFamilyProcedure.java | 4 +- .../procedure/DisableTableProcedure.java | 14 +- .../procedure/EnableTableProcedure.java | 12 +- .../procedure/MasterDDLOperationHelper.java | 4 +- .../ModifyColumnFamilyProcedure.java | 4 +- .../procedure/ModifyTableProcedure.java | 6 +- .../procedure/ServerCrashProcedure.java | 8 +- .../master/snapshot/SnapshotManager.java | 8 +- .../hbase/migration/NamespaceUpgrade.java | 4 +- .../hbase/regionserver/CompactionTool.java | 14 +- .../hbase/regionserver/wal/WALCellCodec.java | 1 - .../hbase/snapshot/SnapshotManifest.java | 8 +- .../hadoop/hbase/util/FSTableDescriptors.java | 213 +- .../apache/hadoop/hbase/util/HBaseFsck.java | 60 +- .../org/apache/hadoop/hbase/util/HMerge.java | 3 +- .../org/apache/hadoop/hbase/util/Merge.java | 5 +- .../hadoop/hbase/util/ZKDataMigrator.java | 90 +- .../apache/hadoop/hbase/wal/WALSplitter.java | 29 +- .../hbase/zookeeper/ZKTableStateManager.java | 369 + .../hadoop/hbase/HBaseTestingUtility.java | 1 - .../hadoop/hbase/TestDrainingServer.java | 246 +- .../TestFSTableDescriptorForceCreation.java | 12 +- .../TestHColumnDescriptorDefaultVersions.java | 4 +- .../hadoop/hbase/TestTableDescriptor.java | 57 - .../hadoop/hbase/client/TestAdmin1.java | 26 +- .../hbase/client/TestMasterRegistry.java | 3 + .../hbase/master/MockNoopMasterServices.java | 5 - .../TestAssignmentManagerOnCluster.java | 16 +- .../hbase/master/TestCatalogJanitor.java | 35 +- .../hadoop/hbase/master/TestMaster.java | 4 +- .../hbase/master/TestMasterFailover.java | 19 +- .../TestMasterRestartAfterDisablingTable.java | 8 +- .../hbase/master/TestOpenedRegionHandler.java | 11 +- .../hadoop/hbase/master/TestRegionStates.java | 2 + .../hbase/master/TestTableLockManager.java | 6 +- .../MasterProcedureTestingUtility.java | 8 +- .../procedure/TestCreateTableProcedure2.java | 10 +- ...TableDescriptorModificationFromClient.java | 6 +- .../hbase/snapshot/SnapshotTestingUtils.java | 8 +- .../hbase/util/TestFSTableDescriptors.java | 39 +- .../hadoop/hbase/util/TestHBaseFsck.java | 51 + .../zookeeper/TestZKTableStateManager.java | 114 + 85 files changed, 11522 insertions(+), 13016 deletions(-) delete mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableState.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKTableStateClientSideReader.java create mode 100644 hbase-client/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKTableStateClientSideReader.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptor.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/TableStateManager.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKTableStateManager.java delete mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/TestTableDescriptor.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKTableStateManager.java diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionAdapter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionAdapter.java index ac4a34281e8f..fe3af35b009b 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionAdapter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionAdapter.java @@ -184,11 +184,6 @@ public boolean isTableAvailable(byte[] tableName, byte[][] splitKeys) return wrappedConnection.isTableAvailable(tableName, splitKeys); } - @Override - public TableState getTableState(TableName tableName) throws IOException { - return wrappedConnection.getTableState(tableName); - } - @Override public HTableDescriptor[] listTables() throws IOException { return wrappedConnection.listTables(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java index 879e8e3cbcd7..dcd9d5319f08 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java @@ -126,8 +126,6 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableDescriptorsResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableNamesRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableNamesResponse; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledRequest; @@ -1003,7 +1001,7 @@ public HRegionLocation getRegionLocation(final byte[] tableName, @Override public boolean isTableEnabled(TableName tableName) throws IOException { - return getTableState(tableName).inStates(TableState.State.ENABLED); + return this.registry.isTableOnlineState(tableName, true); } @Override @@ -1013,7 +1011,7 @@ public boolean isTableEnabled(byte[] tableName) throws IOException { @Override public boolean isTableDisabled(TableName tableName) throws IOException { - return getTableState(tableName).inStates(TableState.State.DISABLED); + return this.registry.isTableOnlineState(tableName, false); } @Override @@ -2138,13 +2136,6 @@ public ListTableNamesByNamespaceResponse listTableNamesByNamespace( return stub.listTableNamesByNamespace(controller, request); } - @Override - public GetTableStateResponse getTableState( - RpcController controller, GetTableStateRequest request) - throws ServiceException { - return stub.getTableState(controller, request); - } - @Override public void close() { release(this.mss); @@ -2774,19 +2765,6 @@ public RpcRetryingCallerFactory getRpcRetryingCallerFactory() { public RpcControllerFactory getRpcControllerFactory() { return this.rpcControllerFactory; } - - public TableState getTableState(TableName tableName) throws IOException { - MasterKeepAliveConnection master = getKeepAliveMasterService(); - try { - GetTableStateResponse resp = master.getTableState(null, - RequestConverter.buildGetTableStateRequest(tableName)); - return TableState.convert(resp.getTableState()); - } catch (ServiceException se) { - throw ProtobufUtil.getRemoteException(se); - } finally { - master.close(); - } - } } /** diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistry.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistry.java index 353ff6182808..67a7087e60b5 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistry.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistry.java @@ -22,6 +22,7 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.RegionLocations; +import org.apache.hadoop.hbase.TableName; /** * Implementations hold cluster information such as this cluster's id, location of hbase:meta, etc. @@ -51,6 +52,11 @@ interface ConnectionRegistry { */ String getClusterId() throws IOException; + /** + * @param enabled Return true if table is enabled + */ + boolean isTableOnlineState(TableName tableName, boolean enabled) throws IOException; + /** * @return Count of 'running' regionservers * @throws IOException diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnection.java index 7de1dfb937dd..e476d5f9717f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnection.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnection.java @@ -212,13 +212,6 @@ boolean isMasterRunning() @Deprecated boolean isTableDisabled(byte[] tableName) throws IOException; - /** - * Retrieve TableState, represent current table state. - * @param tableName table state for - * @return state of the table - */ - public TableState getTableState(TableName tableName) throws IOException; - /** * @param tableName table name * @return true if all regions of the table are available, false otherwise diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterRegistry.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterRegistry.java index 877049cd395f..928c62a013fe 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterRegistry.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterRegistry.java @@ -40,6 +40,7 @@ import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.RegionLocations; import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.exceptions.ClientExceptionsUtil; import org.apache.hadoop.hbase.exceptions.MasterRegistryFetchException; @@ -62,6 +63,8 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse; /** * Master based registry implementation. Makes RPCs to the configured master addresses from config @@ -206,6 +209,22 @@ public GetNumLiveRSResponse call(ClientMetaService.Interface stub, RpcController return resp.getNumRegionServers(); } + @Override + public boolean isTableOnlineState(TableName tableName, boolean enabled) throws IOException { + final GetTableStateRequest request = GetTableStateRequest.newBuilder().setTableName( + tableName.getNameAsString()).setIsEnabled(enabled).build(); + GetTableStateResponse resp = doCall(new Callable() { + @Override + public GetTableStateResponse call(ClientMetaService.Interface stub, RpcController controller) + throws IOException { + BlockingRpcCallback cb = new BlockingRpcCallback<>(); + stub.getTableState(controller, request, cb); + return cb.get(); + } + }); + return resp.getEnabledOrDisabled(); + } + @Override public void close() { if (rpcClient != null) { @@ -262,5 +281,4 @@ void populateMasterStubs(Set masters) throws IOException { ImmutableSet getParsedMasterServers() { return masterAddr2Stub.keySet(); } - } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableState.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableState.java deleted file mode 100644 index 384d4e695b17..000000000000 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableState.java +++ /dev/null @@ -1,205 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.client; - -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.classification.InterfaceStability; -import org.apache.hadoop.hbase.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; - -/** - * Represents table state. - */ -@InterfaceAudience.Private -public class TableState { - - @InterfaceAudience.Public - @InterfaceStability.Evolving - public static enum State { - ENABLED, - DISABLED, - DISABLING, - ENABLING; - - /** - * Covert from PB version of State - * - * @param state convert from - * @return POJO - */ - public static State convert(HBaseProtos.TableState.State state) { - State ret; - switch (state) { - case ENABLED: - ret = State.ENABLED; - break; - case DISABLED: - ret = State.DISABLED; - break; - case DISABLING: - ret = State.DISABLING; - break; - case ENABLING: - ret = State.ENABLING; - break; - default: - throw new IllegalStateException(state.toString()); - } - return ret; - } - - /** - * Covert to PB version of State - * - * @return PB - */ - public HBaseProtos.TableState.State convert() { - HBaseProtos.TableState.State state; - switch (this) { - case ENABLED: - state = HBaseProtos.TableState.State.ENABLED; - break; - case DISABLED: - state = HBaseProtos.TableState.State.DISABLED; - break; - case DISABLING: - state = HBaseProtos.TableState.State.DISABLING; - break; - case ENABLING: - state = HBaseProtos.TableState.State.ENABLING; - break; - default: - throw new IllegalStateException(this.toString()); - } - return state; - } - - } - - private final long timestamp; - private final TableName tableName; - private final State state; - - /** - * Create instance of TableState. - * @param state table state - */ - public TableState(TableName tableName, State state, long timestamp) { - this.tableName = tableName; - this.state = state; - this.timestamp = timestamp; - } - - /** - * Create instance of TableState with current timestamp - * - * @param tableName table for which state is created - * @param state state of the table - */ - public TableState(TableName tableName, State state) { - this(tableName, state, System.currentTimeMillis()); - } - - /** - * @return table state - */ - public State getState() { - return state; - } - - /** - * Timestamp of table state - * - * @return milliseconds - */ - public long getTimestamp() { - return timestamp; - } - - /** - * Table name for state - * - * @return milliseconds - */ - public TableName getTableName() { - return tableName; - } - - /** - * Check that table in given states - * @param state state - * @return true if satisfies - */ - public boolean inStates(State state) { - return this.state.equals(state); - } - - /** - * Check that table in given states - * @param states state list - * @return true if satisfies - */ - public boolean inStates(State... states) { - for (State s : states) { - if (s.equals(this.state)) { - return true; - } - } - return false; - } - - - /** - * Covert to PB version of TableState - * @return PB - */ - public HBaseProtos.TableState convert() { - return HBaseProtos.TableState.newBuilder() - .setState(this.state.convert()) - .setTable(ProtobufUtil.toProtoTableName(this.tableName)) - .setTimestamp(this.timestamp) - .build(); - } - - /** - * Covert from PB version of TableState - * @param tableState convert from - * @return POJO - */ - public static TableState convert(HBaseProtos.TableState tableState) { - TableState.State state = State.convert(tableState.getState()); - return new TableState(ProtobufUtil.toTableName(tableState.getTable()), - state, tableState.getTimestamp()); - } - - /** - * Static version of state checker - * @param state desired - * @param target equals to any of - * @return true if satisfies - */ - public static boolean isInStates(State state, State... target) { - for (State tableState : target) { - if (state.equals(tableState)) { - return true; - } - } - return false; - } -} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKConnectionRegistry.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKConnectionRegistry.java index c656da86c635..ab171d22975f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKConnectionRegistry.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKConnectionRegistry.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.client; import java.io.IOException; +import java.io.InterruptedIOException; import java.util.List; import org.apache.commons.logging.Log; @@ -27,9 +28,11 @@ import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.RegionLocations; import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker; import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; import org.apache.hadoop.hbase.zookeeper.ZKClusterId; +import org.apache.hadoop.hbase.zookeeper.ZKTableStateClientSideReader; import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.zookeeper.KeeperException; @@ -116,6 +119,24 @@ public String getClusterId() { return this.clusterId; } + @Override + public boolean isTableOnlineState(TableName tableName, boolean enabled) + throws IOException { + ZooKeeperKeepAliveConnection zkw = hci.getKeepAliveZooKeeperWatcher(); + try { + if (enabled) { + return ZKTableStateClientSideReader.isEnabledTable(zkw, tableName); + } + return ZKTableStateClientSideReader.isDisabledTable(zkw, tableName); + } catch (KeeperException e) { + throw new IOException("Enable/Disable failed", e); + } catch (InterruptedException e) { + throw new InterruptedIOException(); + } finally { + zkw.close(); + } + } + @Override public int getCurrentNrHRS() throws IOException { try (ZooKeeperKeepAliveConnection zkw = hci.getKeepAliveZooKeeperWatcher()) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java index d794bc0dc162..65d13a3a62e7 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java @@ -25,8 +25,6 @@ import org.apache.commons.collections.MapUtils; import org.apache.commons.lang.StringUtils; import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.util.ByteStringer; - import org.apache.hadoop.hbase.CellScannable; import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.HColumnDescriptor; @@ -103,7 +101,6 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetSchemaAlterStatusRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableDescriptorsRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableNamesRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCleanerChoreEnabledRequest; @@ -126,6 +123,7 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.TruncateTableRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UnassignRegionRequest; import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest; +import org.apache.hadoop.hbase.util.ByteStringer; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.Pair; @@ -1419,18 +1417,6 @@ public static GetTableNamesRequest buildGetTableNamesRequest(final Pattern patte return builder.build(); } - /* - * Creates a protocol buffer GetTableStateRequest - * - * @param tableName table to get request for - * @return a GetTableStateRequest - */ - public static GetTableStateRequest buildGetTableStateRequest(final TableName tableName) { - return GetTableStateRequest.newBuilder() - .setTableName(ProtobufUtil.toProtoTableName(tableName)) - .build(); - } - /** * Creates a protocol buffer GetTableDescriptorsRequest for a single table * diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKTableStateClientSideReader.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKTableStateClientSideReader.java new file mode 100644 index 000000000000..0c1a719fb683 --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKTableStateClientSideReader.java @@ -0,0 +1,204 @@ +/** + * Copyright The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.zookeeper; + +import java.io.IOException; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +import org.apache.hadoop.hbase.TableNotFoundException; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.exceptions.DeserializationException; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; +import org.apache.zookeeper.KeeperException; + +/** + * Non-instantiable class that provides helper functions to learn + * about HBase table state for code running on client side (hence, not having + * access to consensus context). + * + * Doesn't cache any table state, just goes directly to ZooKeeper. + * TODO: decouple this class from ZooKeeper. + */ +@InterfaceAudience.Private +public final class ZKTableStateClientSideReader { + + private ZKTableStateClientSideReader() {} + + /** + * Go to zookeeper and see if state of table is {@code ZooKeeperProtos.Table.State#DISABLED}. + * This method does not use cache. + * This method is for clients other than AssignmentManager + * @param zkw ZooKeeperWatcher instance to use + * @param tableName table we're checking + * @return True if table is enabled. + */ + public static boolean isDisabledTable(final ZooKeeperWatcher zkw, + final TableName tableName) + throws KeeperException, InterruptedException, TableNotFoundException { + ZooKeeperProtos.Table.State state = getTableState(zkw, tableName); + return isTableState(ZooKeeperProtos.Table.State.DISABLED, state); + } + + /** + * Go to zookeeper and see if state of table is {@code ZooKeeperProtos.Table.State#ENABLED}. + * This method does not use cache. + * This method is for clients other than AssignmentManager + * @param zkw ZooKeeperWatcher instance to use + * @param tableName table we're checking + * @return True if table is enabled. + * @throws KeeperException + */ + public static boolean isEnabledTable(final ZooKeeperWatcher zkw, + final TableName tableName) + throws KeeperException, InterruptedException, TableNotFoundException { + return getTableState(zkw, tableName) == ZooKeeperProtos.Table.State.ENABLED; + } + + /** + * Go to zookeeper and see if state of table is {@code ZooKeeperProtos.Table.State#DISABLING} + * of {@code ZooKeeperProtos.Table.State#DISABLED}. + * This method does not use cache. + * This method is for clients other than AssignmentManager. + * @param zkw ZooKeeperWatcher instance to use + * @param tableName table we're checking + * @return True if table is enabled. + * @throws KeeperException + */ + public static boolean isDisablingOrDisabledTable(final ZooKeeperWatcher zkw, + final TableName tableName) + throws KeeperException, InterruptedException, TableNotFoundException { + ZooKeeperProtos.Table.State state = getTableState(zkw, tableName); + return isTableState(ZooKeeperProtos.Table.State.DISABLING, state) || + isTableState(ZooKeeperProtos.Table.State.DISABLED, state); + } + + /** + * Gets a list of all the tables set as disabled in zookeeper. + * @return Set of disabled tables, empty Set if none + * @throws KeeperException + */ + public static Set getDisabledTables(ZooKeeperWatcher zkw) + throws KeeperException, InterruptedException, TableNotFoundException { + Set disabledTables = new HashSet(); + List children = + ZKUtil.listChildrenNoWatch(zkw, zkw.tableZNode); + for (String child: children) { + TableName tableName = + TableName.valueOf(child); + ZooKeeperProtos.Table.State state = getTableState(zkw, tableName); + if (state == ZooKeeperProtos.Table.State.DISABLED) disabledTables.add(tableName); + } + return disabledTables; + } + + /** + * Gets a list of all the tables set as disabled in zookeeper. + * @return Set of disabled tables, empty Set if none + * @throws KeeperException + */ + public static Set getDisabledOrDisablingTables(ZooKeeperWatcher zkw) + throws KeeperException, InterruptedException, TableNotFoundException { + return + getTablesInStates( + zkw, + ZooKeeperProtos.Table.State.DISABLED, + ZooKeeperProtos.Table.State.DISABLING); + } + + /** + * Gets a list of all the tables set as enabling in zookeeper. + * @param zkw ZooKeeperWatcher instance to use + * @return Set of enabling tables, empty Set if none + * @throws KeeperException + * @throws InterruptedException + */ + public static Set getEnablingTables(ZooKeeperWatcher zkw) + throws KeeperException, InterruptedException, TableNotFoundException { + return getTablesInStates(zkw, ZooKeeperProtos.Table.State.ENABLING); + } + + /** + * Gets a list of tables that are set as one of the passing in states in zookeeper. + * @param zkw ZooKeeperWatcher instance to use + * @param states the list of states that a table could be in + * @return Set of tables in one of the states, empty Set if none + * @throws KeeperException + * @throws InterruptedException + */ + private static Set getTablesInStates( + ZooKeeperWatcher zkw, + ZooKeeperProtos.Table.State... states) + throws KeeperException, InterruptedException, TableNotFoundException { + Set tableNameSet = new HashSet(); + List children = ZKUtil.listChildrenNoWatch(zkw, zkw.tableZNode); + TableName tableName; + ZooKeeperProtos.Table.State tableState; + for (String child: children) { + tableName = TableName.valueOf(child); + tableState = getTableState(zkw, tableName); + for (ZooKeeperProtos.Table.State state : states) { + if (tableState == state) { + tableNameSet.add(tableName); + break; + } + } + } + return tableNameSet; + } + + static boolean isTableState(final ZooKeeperProtos.Table.State expectedState, + final ZooKeeperProtos.Table.State currentState) { + return currentState != null && currentState.equals(expectedState); + } + + /** + * @param zkw ZooKeeperWatcher instance to use + * @param tableName table we're checking + * @return {@link ZooKeeperProtos.Table.State} found in znode. + * @throws KeeperException + * @throws TableNotFoundException if tableName doesn't exist + */ + static ZooKeeperProtos.Table.State getTableState(final ZooKeeperWatcher zkw, + final TableName tableName) + throws KeeperException, InterruptedException, TableNotFoundException { + String znode = ZKUtil.joinZNode(zkw.tableZNode, tableName.getNameAsString()); + byte [] data = ZKUtil.getData(zkw, znode); + if (data == null || data.length <= 0) { + throw new TableNotFoundException(tableName); + } + try { + ProtobufUtil.expectPBMagicPrefix(data); + ZooKeeperProtos.Table.Builder builder = ZooKeeperProtos.Table.newBuilder(); + int magicLen = ProtobufUtil.lengthOfPBMagic(); + ProtobufUtil.mergeFrom(builder, data, magicLen, data.length - magicLen); + return builder.getState(); + } catch (IOException e) { + KeeperException ke = new KeeperException.DataInconsistencyException(); + ke.initCause(e); + throw ke; + } catch (DeserializationException e) { + throw ZKUtil.convert(e); + } + } +} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java index be05054ab4f0..b180fb9411d2 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java @@ -138,7 +138,6 @@ public class ZooKeeperWatcher implements Watcher, Abortable, Closeable { // znode used for region transitioning and assignment public String assignmentZNode; // znode used for table disabling/enabling - @Deprecated public String tableZNode; // znode containing the unique cluster ID public String clusterIdZNode; diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java index a73674a0ada9..6a2649b92e1e 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java @@ -479,6 +479,11 @@ public String getClusterId() { return "testClusterId"; } + @Override + public boolean isTableOnlineState(TableName tableName, boolean enabled) throws IOException { + return false; + } + @Override public int getCurrentNrHRS() throws IOException { return 1; diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java index 5c85980691dd..f6df87d226c6 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java @@ -134,6 +134,12 @@ public String getClusterId() { return HConstants.CLUSTER_ID_DEFAULT; } + @Override + public boolean isTableOnlineState(TableName tableName, boolean enabled) + throws IOException { + return enabled; + } + @Override public int getCurrentNrHRS() throws IOException { return 1; diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKTableStateClientSideReader.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKTableStateClientSideReader.java new file mode 100644 index 000000000000..e82d3b0fc34c --- /dev/null +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKTableStateClientSideReader.java @@ -0,0 +1,52 @@ +/** + * Copyright The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.zookeeper; + +import static org.junit.Assert.fail; + +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.TableNotFoundException; +import org.apache.hadoop.hbase.testclassification.SmallTests; + +import org.apache.zookeeper.Watcher; +import org.apache.zookeeper.data.Stat; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.mockito.Mockito; + +@Category({SmallTests.class}) +public class TestZKTableStateClientSideReader { + + @Test + public void test() throws Exception { + ZooKeeperWatcher zkw = Mockito.mock(ZooKeeperWatcher.class); + RecoverableZooKeeper rzk = Mockito.mock(RecoverableZooKeeper.class); + Mockito.doReturn(rzk).when(zkw).getRecoverableZooKeeper(); + Mockito.doReturn(null).when(rzk).getData(Mockito.anyString(), + Mockito.any(Watcher.class), Mockito.any(Stat.class)); + TableName table = TableName.valueOf("table-not-exists"); + try { + ZKTableStateClientSideReader.getTableState(zkw, table); + fail("Shouldn't reach here"); + } catch(TableNotFoundException e) { + // Expected Table not found exception + } + } +} diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBaseProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBaseProtos.java index 9f0a6eb46b94..aff15c13562f 100644 --- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBaseProtos.java +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBaseProtos.java @@ -1975,1576 +1975,6 @@ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Bui // @@protoc_insertion_point(class_scope:hbase.pb.TableSchema) } - public interface TableStateOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required .hbase.pb.TableState.State state = 1; - /** - * required .hbase.pb.TableState.State state = 1; - * - *
-     * This is the table's state.
-     * 
- */ - boolean hasState(); - /** - * required .hbase.pb.TableState.State state = 1; - * - *
-     * This is the table's state.
-     * 
- */ - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State getState(); - - // required .hbase.pb.TableName table = 2; - /** - * required .hbase.pb.TableName table = 2; - */ - boolean hasTable(); - /** - * required .hbase.pb.TableName table = 2; - */ - org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName getTable(); - /** - * required .hbase.pb.TableName table = 2; - */ - org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder getTableOrBuilder(); - - // optional uint64 timestamp = 3; - /** - * optional uint64 timestamp = 3; - */ - boolean hasTimestamp(); - /** - * optional uint64 timestamp = 3; - */ - long getTimestamp(); - } - /** - * Protobuf type {@code hbase.pb.TableState} - * - *
-   ** Denotes state of the table 
-   * 
- */ - public static final class TableState extends - com.google.protobuf.GeneratedMessage - implements TableStateOrBuilder { - // Use TableState.newBuilder() to construct. - private TableState(com.google.protobuf.GeneratedMessage.Builder builder) { - super(builder); - this.unknownFields = builder.getUnknownFields(); - } - private TableState(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - - private static final TableState defaultInstance; - public static TableState getDefaultInstance() { - return defaultInstance; - } - - public TableState getDefaultInstanceForType() { - return defaultInstance; - } - - private final com.google.protobuf.UnknownFieldSet unknownFields; - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { - return this.unknownFields; - } - private TableState( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - initFields(); - int mutable_bitField0_ = 0; - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - done = true; - } - break; - } - case 8: { - int rawValue = input.readEnum(); - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State value = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State.valueOf(rawValue); - if (value == null) { - unknownFields.mergeVarintField(1, rawValue); - } else { - bitField0_ |= 0x00000001; - state_ = value; - } - break; - } - case 18: { - org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder subBuilder = null; - if (((bitField0_ & 0x00000002) == 0x00000002)) { - subBuilder = table_.toBuilder(); - } - table_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.PARSER, extensionRegistry); - if (subBuilder != null) { - subBuilder.mergeFrom(table_); - table_ = subBuilder.buildPartial(); - } - bitField0_ |= 0x00000002; - break; - } - case 24: { - bitField0_ |= 0x00000004; - timestamp_ = input.readUInt64(); - break; - } - } - } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException( - e.getMessage()).setUnfinishedMessage(this); - } finally { - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); - } - } - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_TableState_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_TableState_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.Builder.class); - } - - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public TableState parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new TableState(input, extensionRegistry); - } - }; - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - /** - * Protobuf enum {@code hbase.pb.TableState.State} - * - *
-     * Table's current state
-     * 
- */ - public enum State - implements com.google.protobuf.ProtocolMessageEnum { - /** - * ENABLED = 0; - */ - ENABLED(0, 0), - /** - * DISABLED = 1; - */ - DISABLED(1, 1), - /** - * DISABLING = 2; - */ - DISABLING(2, 2), - /** - * ENABLING = 3; - */ - ENABLING(3, 3), - ; - - /** - * ENABLED = 0; - */ - public static final int ENABLED_VALUE = 0; - /** - * DISABLED = 1; - */ - public static final int DISABLED_VALUE = 1; - /** - * DISABLING = 2; - */ - public static final int DISABLING_VALUE = 2; - /** - * ENABLING = 3; - */ - public static final int ENABLING_VALUE = 3; - - - public final int getNumber() { return value; } - - public static State valueOf(int value) { - switch (value) { - case 0: return ENABLED; - case 1: return DISABLED; - case 2: return DISABLING; - case 3: return ENABLING; - default: return null; - } - } - - public static com.google.protobuf.Internal.EnumLiteMap - internalGetValueMap() { - return internalValueMap; - } - private static com.google.protobuf.Internal.EnumLiteMap - internalValueMap = - new com.google.protobuf.Internal.EnumLiteMap() { - public State findValueByNumber(int number) { - return State.valueOf(number); - } - }; - - public final com.google.protobuf.Descriptors.EnumValueDescriptor - getValueDescriptor() { - return getDescriptor().getValues().get(index); - } - public final com.google.protobuf.Descriptors.EnumDescriptor - getDescriptorForType() { - return getDescriptor(); - } - public static final com.google.protobuf.Descriptors.EnumDescriptor - getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.getDescriptor().getEnumTypes().get(0); - } - - private static final State[] VALUES = values(); - - public static State valueOf( - com.google.protobuf.Descriptors.EnumValueDescriptor desc) { - if (desc.getType() != getDescriptor()) { - throw new java.lang.IllegalArgumentException( - "EnumValueDescriptor is not for this type."); - } - return VALUES[desc.getIndex()]; - } - - private final int index; - private final int value; - - private State(int index, int value) { - this.index = index; - this.value = value; - } - - // @@protoc_insertion_point(enum_scope:hbase.pb.TableState.State) - } - - private int bitField0_; - // required .hbase.pb.TableState.State state = 1; - public static final int STATE_FIELD_NUMBER = 1; - private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State state_; - /** - * required .hbase.pb.TableState.State state = 1; - * - *
-     * This is the table's state.
-     * 
- */ - public boolean hasState() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required .hbase.pb.TableState.State state = 1; - * - *
-     * This is the table's state.
-     * 
- */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State getState() { - return state_; - } - - // required .hbase.pb.TableName table = 2; - public static final int TABLE_FIELD_NUMBER = 2; - private org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName table_; - /** - * required .hbase.pb.TableName table = 2; - */ - public boolean hasTable() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * required .hbase.pb.TableName table = 2; - */ - public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName getTable() { - return table_; - } - /** - * required .hbase.pb.TableName table = 2; - */ - public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder getTableOrBuilder() { - return table_; - } - - // optional uint64 timestamp = 3; - public static final int TIMESTAMP_FIELD_NUMBER = 3; - private long timestamp_; - /** - * optional uint64 timestamp = 3; - */ - public boolean hasTimestamp() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - /** - * optional uint64 timestamp = 3; - */ - public long getTimestamp() { - return timestamp_; - } - - private void initFields() { - state_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State.ENABLED; - table_ = org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance(); - timestamp_ = 0L; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasState()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasTable()) { - memoizedIsInitialized = 0; - return false; - } - if (!getTable().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeEnum(1, state_.getNumber()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeMessage(2, table_); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - output.writeUInt64(3, timestamp_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeEnumSize(1, state_.getNumber()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(2, table_); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(3, timestamp_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState)) { - return super.equals(obj); - } - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState other = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState) obj; - - boolean result = true; - result = result && (hasState() == other.hasState()); - if (hasState()) { - result = result && - (getState() == other.getState()); - } - result = result && (hasTable() == other.hasTable()); - if (hasTable()) { - result = result && getTable() - .equals(other.getTable()); - } - result = result && (hasTimestamp() == other.hasTimestamp()); - if (hasTimestamp()) { - result = result && (getTimestamp() - == other.getTimestamp()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - private int memoizedHashCode = 0; - @java.lang.Override - public int hashCode() { - if (memoizedHashCode != 0) { - return memoizedHashCode; - } - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasState()) { - hash = (37 * hash) + STATE_FIELD_NUMBER; - hash = (53 * hash) + hashEnum(getState()); - } - if (hasTable()) { - hash = (37 * hash) + TABLE_FIELD_NUMBER; - hash = (53 * hash) + getTable().hashCode(); - } - if (hasTimestamp()) { - hash = (37 * hash) + TIMESTAMP_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getTimestamp()); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - memoizedHashCode = hash; - return hash; - } - - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState parseFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input); - } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - /** - * Protobuf type {@code hbase.pb.TableState} - * - *
-     ** Denotes state of the table 
-     * 
- */ - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableStateOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_TableState_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_TableState_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.Builder.class); - } - - // Construct using org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getTableFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - state_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State.ENABLED; - bitField0_ = (bitField0_ & ~0x00000001); - if (tableBuilder_ == null) { - table_ = org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance(); - } else { - tableBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000002); - timestamp_ = 0L; - bitField0_ = (bitField0_ & ~0x00000004); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_TableState_descriptor; - } - - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.getDefaultInstance(); - } - - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState build() { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState result = new org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.state_ = state_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - if (tableBuilder_ == null) { - result.table_ = table_; - } else { - result.table_ = tableBuilder_.build(); - } - if (((from_bitField0_ & 0x00000004) == 0x00000004)) { - to_bitField0_ |= 0x00000004; - } - result.timestamp_ = timestamp_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.getDefaultInstance()) return this; - if (other.hasState()) { - setState(other.getState()); - } - if (other.hasTable()) { - mergeTable(other.getTable()); - } - if (other.hasTimestamp()) { - setTimestamp(other.getTimestamp()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasState()) { - - return false; - } - if (!hasTable()) { - - return false; - } - if (!getTable().isInitialized()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState) e.getUnfinishedMessage(); - throw e; - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; - } - private int bitField0_; - - // required .hbase.pb.TableState.State state = 1; - private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State state_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State.ENABLED; - /** - * required .hbase.pb.TableState.State state = 1; - * - *
-       * This is the table's state.
-       * 
- */ - public boolean hasState() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required .hbase.pb.TableState.State state = 1; - * - *
-       * This is the table's state.
-       * 
- */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State getState() { - return state_; - } - /** - * required .hbase.pb.TableState.State state = 1; - * - *
-       * This is the table's state.
-       * 
- */ - public Builder setState(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - state_ = value; - onChanged(); - return this; - } - /** - * required .hbase.pb.TableState.State state = 1; - * - *
-       * This is the table's state.
-       * 
- */ - public Builder clearState() { - bitField0_ = (bitField0_ & ~0x00000001); - state_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State.ENABLED; - onChanged(); - return this; - } - - // required .hbase.pb.TableName table = 2; - private org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName table_ = org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder> tableBuilder_; - /** - * required .hbase.pb.TableName table = 2; - */ - public boolean hasTable() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * required .hbase.pb.TableName table = 2; - */ - public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName getTable() { - if (tableBuilder_ == null) { - return table_; - } else { - return tableBuilder_.getMessage(); - } - } - /** - * required .hbase.pb.TableName table = 2; - */ - public Builder setTable(org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName value) { - if (tableBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - table_ = value; - onChanged(); - } else { - tableBuilder_.setMessage(value); - } - bitField0_ |= 0x00000002; - return this; - } - /** - * required .hbase.pb.TableName table = 2; - */ - public Builder setTable( - org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder builderForValue) { - if (tableBuilder_ == null) { - table_ = builderForValue.build(); - onChanged(); - } else { - tableBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000002; - return this; - } - /** - * required .hbase.pb.TableName table = 2; - */ - public Builder mergeTable(org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName value) { - if (tableBuilder_ == null) { - if (((bitField0_ & 0x00000002) == 0x00000002) && - table_ != org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance()) { - table_ = - org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.newBuilder(table_).mergeFrom(value).buildPartial(); - } else { - table_ = value; - } - onChanged(); - } else { - tableBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000002; - return this; - } - /** - * required .hbase.pb.TableName table = 2; - */ - public Builder clearTable() { - if (tableBuilder_ == null) { - table_ = org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance(); - onChanged(); - } else { - tableBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000002); - return this; - } - /** - * required .hbase.pb.TableName table = 2; - */ - public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder getTableBuilder() { - bitField0_ |= 0x00000002; - onChanged(); - return getTableFieldBuilder().getBuilder(); - } - /** - * required .hbase.pb.TableName table = 2; - */ - public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder getTableOrBuilder() { - if (tableBuilder_ != null) { - return tableBuilder_.getMessageOrBuilder(); - } else { - return table_; - } - } - /** - * required .hbase.pb.TableName table = 2; - */ - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder> - getTableFieldBuilder() { - if (tableBuilder_ == null) { - tableBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder>( - table_, - getParentForChildren(), - isClean()); - table_ = null; - } - return tableBuilder_; - } - - // optional uint64 timestamp = 3; - private long timestamp_ ; - /** - * optional uint64 timestamp = 3; - */ - public boolean hasTimestamp() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - /** - * optional uint64 timestamp = 3; - */ - public long getTimestamp() { - return timestamp_; - } - /** - * optional uint64 timestamp = 3; - */ - public Builder setTimestamp(long value) { - bitField0_ |= 0x00000004; - timestamp_ = value; - onChanged(); - return this; - } - /** - * optional uint64 timestamp = 3; - */ - public Builder clearTimestamp() { - bitField0_ = (bitField0_ & ~0x00000004); - timestamp_ = 0L; - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:hbase.pb.TableState) - } - - static { - defaultInstance = new TableState(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:hbase.pb.TableState) - } - - public interface TableDescriptorOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required .hbase.pb.TableSchema schema = 1; - /** - * required .hbase.pb.TableSchema schema = 1; - */ - boolean hasSchema(); - /** - * required .hbase.pb.TableSchema schema = 1; - */ - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getSchema(); - /** - * required .hbase.pb.TableSchema schema = 1; - */ - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getSchemaOrBuilder(); - - // optional .hbase.pb.TableState.State state = 2 [default = ENABLED]; - /** - * optional .hbase.pb.TableState.State state = 2 [default = ENABLED]; - */ - boolean hasState(); - /** - * optional .hbase.pb.TableState.State state = 2 [default = ENABLED]; - */ - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State getState(); - } - /** - * Protobuf type {@code hbase.pb.TableDescriptor} - * - *
-   ** On HDFS representation of table state. 
-   * 
- */ - public static final class TableDescriptor extends - com.google.protobuf.GeneratedMessage - implements TableDescriptorOrBuilder { - // Use TableDescriptor.newBuilder() to construct. - private TableDescriptor(com.google.protobuf.GeneratedMessage.Builder builder) { - super(builder); - this.unknownFields = builder.getUnknownFields(); - } - private TableDescriptor(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - - private static final TableDescriptor defaultInstance; - public static TableDescriptor getDefaultInstance() { - return defaultInstance; - } - - public TableDescriptor getDefaultInstanceForType() { - return defaultInstance; - } - - private final com.google.protobuf.UnknownFieldSet unknownFields; - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { - return this.unknownFields; - } - private TableDescriptor( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - initFields(); - int mutable_bitField0_ = 0; - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - done = true; - } - break; - } - case 10: { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder subBuilder = null; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - subBuilder = schema_.toBuilder(); - } - schema_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.PARSER, extensionRegistry); - if (subBuilder != null) { - subBuilder.mergeFrom(schema_); - schema_ = subBuilder.buildPartial(); - } - bitField0_ |= 0x00000001; - break; - } - case 16: { - int rawValue = input.readEnum(); - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State value = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State.valueOf(rawValue); - if (value == null) { - unknownFields.mergeVarintField(2, rawValue); - } else { - bitField0_ |= 0x00000002; - state_ = value; - } - break; - } - } - } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException( - e.getMessage()).setUnfinishedMessage(this); - } finally { - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); - } - } - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_TableDescriptor_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_TableDescriptor_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor.Builder.class); - } - - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public TableDescriptor parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new TableDescriptor(input, extensionRegistry); - } - }; - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - private int bitField0_; - // required .hbase.pb.TableSchema schema = 1; - public static final int SCHEMA_FIELD_NUMBER = 1; - private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema schema_; - /** - * required .hbase.pb.TableSchema schema = 1; - */ - public boolean hasSchema() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required .hbase.pb.TableSchema schema = 1; - */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getSchema() { - return schema_; - } - /** - * required .hbase.pb.TableSchema schema = 1; - */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getSchemaOrBuilder() { - return schema_; - } - - // optional .hbase.pb.TableState.State state = 2 [default = ENABLED]; - public static final int STATE_FIELD_NUMBER = 2; - private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State state_; - /** - * optional .hbase.pb.TableState.State state = 2 [default = ENABLED]; - */ - public boolean hasState() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * optional .hbase.pb.TableState.State state = 2 [default = ENABLED]; - */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State getState() { - return state_; - } - - private void initFields() { - schema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance(); - state_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State.ENABLED; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasSchema()) { - memoizedIsInitialized = 0; - return false; - } - if (!getSchema().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeMessage(1, schema_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeEnum(2, state_.getNumber()); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, schema_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeEnumSize(2, state_.getNumber()); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor)) { - return super.equals(obj); - } - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor other = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor) obj; - - boolean result = true; - result = result && (hasSchema() == other.hasSchema()); - if (hasSchema()) { - result = result && getSchema() - .equals(other.getSchema()); - } - result = result && (hasState() == other.hasState()); - if (hasState()) { - result = result && - (getState() == other.getState()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - private int memoizedHashCode = 0; - @java.lang.Override - public int hashCode() { - if (memoizedHashCode != 0) { - return memoizedHashCode; - } - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasSchema()) { - hash = (37 * hash) + SCHEMA_FIELD_NUMBER; - hash = (53 * hash) + getSchema().hashCode(); - } - if (hasState()) { - hash = (37 * hash) + STATE_FIELD_NUMBER; - hash = (53 * hash) + hashEnum(getState()); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - memoizedHashCode = hash; - return hash; - } - - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor parseFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input); - } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - /** - * Protobuf type {@code hbase.pb.TableDescriptor} - * - *
-     ** On HDFS representation of table state. 
-     * 
- */ - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptorOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_TableDescriptor_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_TableDescriptor_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor.Builder.class); - } - - // Construct using org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getSchemaFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - if (schemaBuilder_ == null) { - schema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance(); - } else { - schemaBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - state_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State.ENABLED; - bitField0_ = (bitField0_ & ~0x00000002); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_TableDescriptor_descriptor; - } - - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor.getDefaultInstance(); - } - - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor build() { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor result = new org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - if (schemaBuilder_ == null) { - result.schema_ = schema_; - } else { - result.schema_ = schemaBuilder_.build(); - } - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.state_ = state_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor.getDefaultInstance()) return this; - if (other.hasSchema()) { - mergeSchema(other.getSchema()); - } - if (other.hasState()) { - setState(other.getState()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasSchema()) { - - return false; - } - if (!getSchema().isInitialized()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor) e.getUnfinishedMessage(); - throw e; - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; - } - private int bitField0_; - - // required .hbase.pb.TableSchema schema = 1; - private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema schema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder> schemaBuilder_; - /** - * required .hbase.pb.TableSchema schema = 1; - */ - public boolean hasSchema() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required .hbase.pb.TableSchema schema = 1; - */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getSchema() { - if (schemaBuilder_ == null) { - return schema_; - } else { - return schemaBuilder_.getMessage(); - } - } - /** - * required .hbase.pb.TableSchema schema = 1; - */ - public Builder setSchema(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema value) { - if (schemaBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - schema_ = value; - onChanged(); - } else { - schemaBuilder_.setMessage(value); - } - bitField0_ |= 0x00000001; - return this; - } - /** - * required .hbase.pb.TableSchema schema = 1; - */ - public Builder setSchema( - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder builderForValue) { - if (schemaBuilder_ == null) { - schema_ = builderForValue.build(); - onChanged(); - } else { - schemaBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000001; - return this; - } - /** - * required .hbase.pb.TableSchema schema = 1; - */ - public Builder mergeSchema(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema value) { - if (schemaBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001) && - schema_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance()) { - schema_ = - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.newBuilder(schema_).mergeFrom(value).buildPartial(); - } else { - schema_ = value; - } - onChanged(); - } else { - schemaBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000001; - return this; - } - /** - * required .hbase.pb.TableSchema schema = 1; - */ - public Builder clearSchema() { - if (schemaBuilder_ == null) { - schema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance(); - onChanged(); - } else { - schemaBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - /** - * required .hbase.pb.TableSchema schema = 1; - */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder getSchemaBuilder() { - bitField0_ |= 0x00000001; - onChanged(); - return getSchemaFieldBuilder().getBuilder(); - } - /** - * required .hbase.pb.TableSchema schema = 1; - */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getSchemaOrBuilder() { - if (schemaBuilder_ != null) { - return schemaBuilder_.getMessageOrBuilder(); - } else { - return schema_; - } - } - /** - * required .hbase.pb.TableSchema schema = 1; - */ - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder> - getSchemaFieldBuilder() { - if (schemaBuilder_ == null) { - schemaBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder>( - schema_, - getParentForChildren(), - isClean()); - schema_ = null; - } - return schemaBuilder_; - } - - // optional .hbase.pb.TableState.State state = 2 [default = ENABLED]; - private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State state_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State.ENABLED; - /** - * optional .hbase.pb.TableState.State state = 2 [default = ENABLED]; - */ - public boolean hasState() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * optional .hbase.pb.TableState.State state = 2 [default = ENABLED]; - */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State getState() { - return state_; - } - /** - * optional .hbase.pb.TableState.State state = 2 [default = ENABLED]; - */ - public Builder setState(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000002; - state_ = value; - onChanged(); - return this; - } - /** - * optional .hbase.pb.TableState.State state = 2 [default = ENABLED]; - */ - public Builder clearState() { - bitField0_ = (bitField0_ & ~0x00000002); - state_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State.ENABLED; - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:hbase.pb.TableDescriptor) - } - - static { - defaultInstance = new TableDescriptor(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:hbase.pb.TableDescriptor) - } - public interface ColumnFamilySchemaOrBuilder extends com.google.protobuf.MessageOrBuilder { @@ -22363,16 +20793,6 @@ public Builder clearSeqNum() { private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hbase_pb_TableSchema_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_hbase_pb_TableState_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_hbase_pb_TableState_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_hbase_pb_TableDescriptor_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_hbase_pb_TableDescriptor_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_hbase_pb_ColumnFamilySchema_descriptor; private static @@ -22513,74 +20933,67 @@ public Builder clearSeqNum() { "leName\022,\n\nattributes\030\002 \003(\0132\030.hbase.pb.By" + "tesBytesPair\0225\n\017column_families\030\003 \003(\0132\034." + "hbase.pb.ColumnFamilySchema\022/\n\rconfigura" + - "tion\030\004 \003(\0132\030.hbase.pb.NameStringPair\"\257\001\n" + - "\nTableState\022)\n\005state\030\001 \002(\0162\032.hbase.pb.Ta" + - "bleState.State\022\"\n\005table\030\002 \002(\0132\023.hbase.pb" + - ".TableName\022\021\n\ttimestamp\030\003 \001(\004\"?\n\005State\022\013", - "\n\007ENABLED\020\000\022\014\n\010DISABLED\020\001\022\r\n\tDISABLING\020\002" + - "\022\014\n\010ENABLING\020\003\"l\n\017TableDescriptor\022%\n\006sch" + - "ema\030\001 \002(\0132\025.hbase.pb.TableSchema\0222\n\005stat" + - "e\030\002 \001(\0162\032.hbase.pb.TableState.State:\007ENA" + - "BLED\"\201\001\n\022ColumnFamilySchema\022\014\n\004name\030\001 \002(" + - "\014\022,\n\nattributes\030\002 \003(\0132\030.hbase.pb.BytesBy" + - "tesPair\022/\n\rconfiguration\030\003 \003(\0132\030.hbase.p" + - "b.NameStringPair\"\243\001\n\nRegionInfo\022\021\n\tregio" + - "n_id\030\001 \002(\004\022\'\n\ntable_name\030\002 \002(\0132\023.hbase.p" + - "b.TableName\022\021\n\tstart_key\030\003 \001(\014\022\017\n\007end_ke", - "y\030\004 \001(\014\022\017\n\007offline\030\005 \001(\010\022\r\n\005split\030\006 \001(\010\022" + - "\025\n\nreplica_id\030\007 \001(\005:\0010\":\n\014FavoredNodes\022*" + - "\n\014favored_node\030\001 \003(\0132\024.hbase.pb.ServerNa" + - "me\"\236\001\n\017RegionSpecifier\022;\n\004type\030\001 \002(\0162-.h" + - "base.pb.RegionSpecifier.RegionSpecifierT" + - "ype\022\r\n\005value\030\002 \002(\014\"?\n\023RegionSpecifierTyp" + - "e\022\017\n\013REGION_NAME\020\001\022\027\n\023ENCODED_REGION_NAM" + - "E\020\002\"%\n\tTimeRange\022\014\n\004from\030\001 \001(\004\022\n\n\002to\030\002 \001" + - "(\004\"W\n\025ColumnFamilyTimeRange\022\025\n\rcolumn_fa" + - "mily\030\001 \002(\014\022\'\n\ntime_range\030\002 \002(\0132\023.hbase.p", - "b.TimeRange\"A\n\nServerName\022\021\n\thost_name\030\001" + - " \002(\t\022\014\n\004port\030\002 \001(\r\022\022\n\nstart_code\030\003 \001(\004\"\033" + - "\n\013Coprocessor\022\014\n\004name\030\001 \002(\t\"-\n\016NameStrin" + - "gPair\022\014\n\004name\030\001 \002(\t\022\r\n\005value\030\002 \002(\t\",\n\rNa" + - "meBytesPair\022\014\n\004name\030\001 \002(\t\022\r\n\005value\030\002 \001(\014" + - "\"/\n\016BytesBytesPair\022\r\n\005first\030\001 \002(\014\022\016\n\006sec" + - "ond\030\002 \002(\014\",\n\rNameInt64Pair\022\014\n\004name\030\001 \001(\t" + - "\022\r\n\005value\030\002 \001(\003\"\206\001\n\024ProcedureDescription" + - "\022\021\n\tsignature\030\001 \002(\t\022\020\n\010instance\030\002 \001(\t\022\030\n" + - "\rcreation_time\030\003 \001(\003:\0010\022/\n\rconfiguration", - "\030\004 \003(\0132\030.hbase.pb.NameStringPair\"\n\n\010Empt" + - "yMsg\"\033\n\007LongMsg\022\020\n\010long_msg\030\001 \002(\003\"\037\n\tDou" + - "bleMsg\022\022\n\ndouble_msg\030\001 \002(\001\"\'\n\rBigDecimal" + - "Msg\022\026\n\016bigdecimal_msg\030\001 \002(\014\"5\n\004UUID\022\026\n\016l" + - "east_sig_bits\030\001 \002(\004\022\025\n\rmost_sig_bits\030\002 \002" + - "(\004\"T\n\023NamespaceDescriptor\022\014\n\004name\030\001 \002(\014\022" + - "/\n\rconfiguration\030\002 \003(\0132\030.hbase.pb.NameSt" + - "ringPair\"\235\001\n\013VersionInfo\022\017\n\007version\030\001 \002(" + - "\t\022\013\n\003url\030\002 \002(\t\022\020\n\010revision\030\003 \002(\t\022\014\n\004user" + - "\030\004 \002(\t\022\014\n\004date\030\005 \002(\t\022\024\n\014src_checksum\030\006 \002", - "(\t\022\025\n\rversion_major\030\007 \001(\r\022\025\n\rversion_min" + - "or\030\010 \001(\r\"Q\n\020RegionServerInfo\022\020\n\010infoPort" + - "\030\001 \001(\005\022+\n\014version_info\030\002 \001(\0132\025.hbase.pb." + - "VersionInfo\"\243\002\n\023SnapshotDescription\022\014\n\004n" + - "ame\030\001 \002(\t\022\r\n\005table\030\002 \001(\t\022\030\n\rcreation_tim" + - "e\030\003 \001(\003:\0010\0227\n\004type\030\004 \001(\0162\".hbase.pb.Snap" + - "shotDescription.Type:\005FLUSH\022\017\n\007version\030\005" + - " \001(\005\022\r\n\005owner\030\006 \001(\t\022<\n\025users_and_permiss" + - "ions\030\007 \001(\0132\035.hbase.pb.UsersAndPermission" + - "s\022\016\n\003ttl\030\010 \001(\003:\0010\".\n\004Type\022\014\n\010DISABLED\020\000\022", - "\t\n\005FLUSH\020\001\022\r\n\tSKIPFLUSH\020\002\"9\n\nLogRequest\022" + - "\026\n\016log_class_name\030\001 \002(\t\022\023\n\013log_message\030\002" + - " \002(\014\"7\n\010LogEntry\022\026\n\016log_class_name\030\001 \002(\t" + - "\022\023\n\013log_message\030\002 \002(\014\"w\n\016RegionLocation\022" + - ")\n\013region_info\030\001 \002(\0132\024.hbase.pb.RegionIn" + - "fo\022)\n\013server_name\030\002 \001(\0132\024.hbase.pb.Serve" + - "rName\022\017\n\007seq_num\030\003 \002(\003*r\n\013CompareType\022\010\n" + - "\004LESS\020\000\022\021\n\rLESS_OR_EQUAL\020\001\022\t\n\005EQUAL\020\002\022\r\n" + - "\tNOT_EQUAL\020\003\022\024\n\020GREATER_OR_EQUAL\020\004\022\013\n\007GR" + - "EATER\020\005\022\t\n\005NO_OP\020\006*n\n\010TimeUnit\022\017\n\013NANOSE", - "CONDS\020\001\022\020\n\014MICROSECONDS\020\002\022\020\n\014MILLISECOND" + - "S\020\003\022\013\n\007SECONDS\020\004\022\013\n\007MINUTES\020\005\022\t\n\005HOURS\020\006" + - "\022\010\n\004DAYS\020\007B>\n*org.apache.hadoop.hbase.pr" + - "otobuf.generatedB\013HBaseProtosH\001\240\001\001" + "tion\030\004 \003(\0132\030.hbase.pb.NameStringPair\"\201\001\n" + + "\022ColumnFamilySchema\022\014\n\004name\030\001 \002(\014\022,\n\natt" + + "ributes\030\002 \003(\0132\030.hbase.pb.BytesBytesPair\022" + + "/\n\rconfiguration\030\003 \003(\0132\030.hbase.pb.NameSt", + "ringPair\"\243\001\n\nRegionInfo\022\021\n\tregion_id\030\001 \002" + + "(\004\022\'\n\ntable_name\030\002 \002(\0132\023.hbase.pb.TableN" + + "ame\022\021\n\tstart_key\030\003 \001(\014\022\017\n\007end_key\030\004 \001(\014\022" + + "\017\n\007offline\030\005 \001(\010\022\r\n\005split\030\006 \001(\010\022\025\n\nrepli" + + "ca_id\030\007 \001(\005:\0010\":\n\014FavoredNodes\022*\n\014favore" + + "d_node\030\001 \003(\0132\024.hbase.pb.ServerName\"\236\001\n\017R" + + "egionSpecifier\022;\n\004type\030\001 \002(\0162-.hbase.pb." + + "RegionSpecifier.RegionSpecifierType\022\r\n\005v" + + "alue\030\002 \002(\014\"?\n\023RegionSpecifierType\022\017\n\013REG" + + "ION_NAME\020\001\022\027\n\023ENCODED_REGION_NAME\020\002\"%\n\tT", + "imeRange\022\014\n\004from\030\001 \001(\004\022\n\n\002to\030\002 \001(\004\"W\n\025Co" + + "lumnFamilyTimeRange\022\025\n\rcolumn_family\030\001 \002" + + "(\014\022\'\n\ntime_range\030\002 \002(\0132\023.hbase.pb.TimeRa" + + "nge\"A\n\nServerName\022\021\n\thost_name\030\001 \002(\t\022\014\n\004" + + "port\030\002 \001(\r\022\022\n\nstart_code\030\003 \001(\004\"\033\n\013Coproc" + + "essor\022\014\n\004name\030\001 \002(\t\"-\n\016NameStringPair\022\014\n" + + "\004name\030\001 \002(\t\022\r\n\005value\030\002 \002(\t\",\n\rNameBytesP" + + "air\022\014\n\004name\030\001 \002(\t\022\r\n\005value\030\002 \001(\014\"/\n\016Byte" + + "sBytesPair\022\r\n\005first\030\001 \002(\014\022\016\n\006second\030\002 \002(" + + "\014\",\n\rNameInt64Pair\022\014\n\004name\030\001 \001(\t\022\r\n\005valu", + "e\030\002 \001(\003\"\206\001\n\024ProcedureDescription\022\021\n\tsign" + + "ature\030\001 \002(\t\022\020\n\010instance\030\002 \001(\t\022\030\n\rcreatio" + + "n_time\030\003 \001(\003:\0010\022/\n\rconfiguration\030\004 \003(\0132\030" + + ".hbase.pb.NameStringPair\"\n\n\010EmptyMsg\"\033\n\007" + + "LongMsg\022\020\n\010long_msg\030\001 \002(\003\"\037\n\tDoubleMsg\022\022" + + "\n\ndouble_msg\030\001 \002(\001\"\'\n\rBigDecimalMsg\022\026\n\016b" + + "igdecimal_msg\030\001 \002(\014\"5\n\004UUID\022\026\n\016least_sig" + + "_bits\030\001 \002(\004\022\025\n\rmost_sig_bits\030\002 \002(\004\"T\n\023Na" + + "mespaceDescriptor\022\014\n\004name\030\001 \002(\014\022/\n\rconfi" + + "guration\030\002 \003(\0132\030.hbase.pb.NameStringPair", + "\"\235\001\n\013VersionInfo\022\017\n\007version\030\001 \002(\t\022\013\n\003url" + + "\030\002 \002(\t\022\020\n\010revision\030\003 \002(\t\022\014\n\004user\030\004 \002(\t\022\014" + + "\n\004date\030\005 \002(\t\022\024\n\014src_checksum\030\006 \002(\t\022\025\n\rve" + + "rsion_major\030\007 \001(\r\022\025\n\rversion_minor\030\010 \001(\r" + + "\"Q\n\020RegionServerInfo\022\020\n\010infoPort\030\001 \001(\005\022+" + + "\n\014version_info\030\002 \001(\0132\025.hbase.pb.VersionI" + + "nfo\"\243\002\n\023SnapshotDescription\022\014\n\004name\030\001 \002(" + + "\t\022\r\n\005table\030\002 \001(\t\022\030\n\rcreation_time\030\003 \001(\003:" + + "\0010\0227\n\004type\030\004 \001(\0162\".hbase.pb.SnapshotDesc" + + "ription.Type:\005FLUSH\022\017\n\007version\030\005 \001(\005\022\r\n\005", + "owner\030\006 \001(\t\022<\n\025users_and_permissions\030\007 \001" + + "(\0132\035.hbase.pb.UsersAndPermissions\022\016\n\003ttl" + + "\030\010 \001(\003:\0010\".\n\004Type\022\014\n\010DISABLED\020\000\022\t\n\005FLUSH" + + "\020\001\022\r\n\tSKIPFLUSH\020\002\"9\n\nLogRequest\022\026\n\016log_c" + + "lass_name\030\001 \002(\t\022\023\n\013log_message\030\002 \002(\014\"7\n\010" + + "LogEntry\022\026\n\016log_class_name\030\001 \002(\t\022\023\n\013log_" + + "message\030\002 \002(\014\"w\n\016RegionLocation\022)\n\013regio" + + "n_info\030\001 \002(\0132\024.hbase.pb.RegionInfo\022)\n\013se" + + "rver_name\030\002 \001(\0132\024.hbase.pb.ServerName\022\017\n" + + "\007seq_num\030\003 \002(\003*r\n\013CompareType\022\010\n\004LESS\020\000\022", + "\021\n\rLESS_OR_EQUAL\020\001\022\t\n\005EQUAL\020\002\022\r\n\tNOT_EQU" + + "AL\020\003\022\024\n\020GREATER_OR_EQUAL\020\004\022\013\n\007GREATER\020\005\022" + + "\t\n\005NO_OP\020\006*n\n\010TimeUnit\022\017\n\013NANOSECONDS\020\001\022" + + "\020\n\014MICROSECONDS\020\002\022\020\n\014MILLISECONDS\020\003\022\013\n\007S" + + "ECONDS\020\004\022\013\n\007MINUTES\020\005\022\t\n\005HOURS\020\006\022\010\n\004DAYS" + + "\020\007B>\n*org.apache.hadoop.hbase.protobuf.g" + + "eneratedB\013HBaseProtosH\001\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -22593,164 +21006,152 @@ public com.google.protobuf.ExtensionRegistry assignDescriptors( com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_TableSchema_descriptor, new java.lang.String[] { "TableName", "Attributes", "ColumnFamilies", "Configuration", }); - internal_static_hbase_pb_TableState_descriptor = - getDescriptor().getMessageTypes().get(1); - internal_static_hbase_pb_TableState_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_hbase_pb_TableState_descriptor, - new java.lang.String[] { "State", "Table", "Timestamp", }); - internal_static_hbase_pb_TableDescriptor_descriptor = - getDescriptor().getMessageTypes().get(2); - internal_static_hbase_pb_TableDescriptor_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_hbase_pb_TableDescriptor_descriptor, - new java.lang.String[] { "Schema", "State", }); internal_static_hbase_pb_ColumnFamilySchema_descriptor = - getDescriptor().getMessageTypes().get(3); + getDescriptor().getMessageTypes().get(1); internal_static_hbase_pb_ColumnFamilySchema_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_ColumnFamilySchema_descriptor, new java.lang.String[] { "Name", "Attributes", "Configuration", }); internal_static_hbase_pb_RegionInfo_descriptor = - getDescriptor().getMessageTypes().get(4); + getDescriptor().getMessageTypes().get(2); internal_static_hbase_pb_RegionInfo_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_RegionInfo_descriptor, new java.lang.String[] { "RegionId", "TableName", "StartKey", "EndKey", "Offline", "Split", "ReplicaId", }); internal_static_hbase_pb_FavoredNodes_descriptor = - getDescriptor().getMessageTypes().get(5); + getDescriptor().getMessageTypes().get(3); internal_static_hbase_pb_FavoredNodes_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_FavoredNodes_descriptor, new java.lang.String[] { "FavoredNode", }); internal_static_hbase_pb_RegionSpecifier_descriptor = - getDescriptor().getMessageTypes().get(6); + getDescriptor().getMessageTypes().get(4); internal_static_hbase_pb_RegionSpecifier_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_RegionSpecifier_descriptor, new java.lang.String[] { "Type", "Value", }); internal_static_hbase_pb_TimeRange_descriptor = - getDescriptor().getMessageTypes().get(7); + getDescriptor().getMessageTypes().get(5); internal_static_hbase_pb_TimeRange_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_TimeRange_descriptor, new java.lang.String[] { "From", "To", }); internal_static_hbase_pb_ColumnFamilyTimeRange_descriptor = - getDescriptor().getMessageTypes().get(8); + getDescriptor().getMessageTypes().get(6); internal_static_hbase_pb_ColumnFamilyTimeRange_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_ColumnFamilyTimeRange_descriptor, new java.lang.String[] { "ColumnFamily", "TimeRange", }); internal_static_hbase_pb_ServerName_descriptor = - getDescriptor().getMessageTypes().get(9); + getDescriptor().getMessageTypes().get(7); internal_static_hbase_pb_ServerName_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_ServerName_descriptor, new java.lang.String[] { "HostName", "Port", "StartCode", }); internal_static_hbase_pb_Coprocessor_descriptor = - getDescriptor().getMessageTypes().get(10); + getDescriptor().getMessageTypes().get(8); internal_static_hbase_pb_Coprocessor_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_Coprocessor_descriptor, new java.lang.String[] { "Name", }); internal_static_hbase_pb_NameStringPair_descriptor = - getDescriptor().getMessageTypes().get(11); + getDescriptor().getMessageTypes().get(9); internal_static_hbase_pb_NameStringPair_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_NameStringPair_descriptor, new java.lang.String[] { "Name", "Value", }); internal_static_hbase_pb_NameBytesPair_descriptor = - getDescriptor().getMessageTypes().get(12); + getDescriptor().getMessageTypes().get(10); internal_static_hbase_pb_NameBytesPair_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_NameBytesPair_descriptor, new java.lang.String[] { "Name", "Value", }); internal_static_hbase_pb_BytesBytesPair_descriptor = - getDescriptor().getMessageTypes().get(13); + getDescriptor().getMessageTypes().get(11); internal_static_hbase_pb_BytesBytesPair_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_BytesBytesPair_descriptor, new java.lang.String[] { "First", "Second", }); internal_static_hbase_pb_NameInt64Pair_descriptor = - getDescriptor().getMessageTypes().get(14); + getDescriptor().getMessageTypes().get(12); internal_static_hbase_pb_NameInt64Pair_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_NameInt64Pair_descriptor, new java.lang.String[] { "Name", "Value", }); internal_static_hbase_pb_ProcedureDescription_descriptor = - getDescriptor().getMessageTypes().get(15); + getDescriptor().getMessageTypes().get(13); internal_static_hbase_pb_ProcedureDescription_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_ProcedureDescription_descriptor, new java.lang.String[] { "Signature", "Instance", "CreationTime", "Configuration", }); internal_static_hbase_pb_EmptyMsg_descriptor = - getDescriptor().getMessageTypes().get(16); + getDescriptor().getMessageTypes().get(14); internal_static_hbase_pb_EmptyMsg_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_EmptyMsg_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_LongMsg_descriptor = - getDescriptor().getMessageTypes().get(17); + getDescriptor().getMessageTypes().get(15); internal_static_hbase_pb_LongMsg_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_LongMsg_descriptor, new java.lang.String[] { "LongMsg", }); internal_static_hbase_pb_DoubleMsg_descriptor = - getDescriptor().getMessageTypes().get(18); + getDescriptor().getMessageTypes().get(16); internal_static_hbase_pb_DoubleMsg_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_DoubleMsg_descriptor, new java.lang.String[] { "DoubleMsg", }); internal_static_hbase_pb_BigDecimalMsg_descriptor = - getDescriptor().getMessageTypes().get(19); + getDescriptor().getMessageTypes().get(17); internal_static_hbase_pb_BigDecimalMsg_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_BigDecimalMsg_descriptor, new java.lang.String[] { "BigdecimalMsg", }); internal_static_hbase_pb_UUID_descriptor = - getDescriptor().getMessageTypes().get(20); + getDescriptor().getMessageTypes().get(18); internal_static_hbase_pb_UUID_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_UUID_descriptor, new java.lang.String[] { "LeastSigBits", "MostSigBits", }); internal_static_hbase_pb_NamespaceDescriptor_descriptor = - getDescriptor().getMessageTypes().get(21); + getDescriptor().getMessageTypes().get(19); internal_static_hbase_pb_NamespaceDescriptor_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_NamespaceDescriptor_descriptor, new java.lang.String[] { "Name", "Configuration", }); internal_static_hbase_pb_VersionInfo_descriptor = - getDescriptor().getMessageTypes().get(22); + getDescriptor().getMessageTypes().get(20); internal_static_hbase_pb_VersionInfo_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_VersionInfo_descriptor, new java.lang.String[] { "Version", "Url", "Revision", "User", "Date", "SrcChecksum", "VersionMajor", "VersionMinor", }); internal_static_hbase_pb_RegionServerInfo_descriptor = - getDescriptor().getMessageTypes().get(23); + getDescriptor().getMessageTypes().get(21); internal_static_hbase_pb_RegionServerInfo_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_RegionServerInfo_descriptor, new java.lang.String[] { "InfoPort", "VersionInfo", }); internal_static_hbase_pb_SnapshotDescription_descriptor = - getDescriptor().getMessageTypes().get(24); + getDescriptor().getMessageTypes().get(22); internal_static_hbase_pb_SnapshotDescription_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_SnapshotDescription_descriptor, new java.lang.String[] { "Name", "Table", "CreationTime", "Type", "Version", "Owner", "UsersAndPermissions", "Ttl", }); internal_static_hbase_pb_LogRequest_descriptor = - getDescriptor().getMessageTypes().get(25); + getDescriptor().getMessageTypes().get(23); internal_static_hbase_pb_LogRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_LogRequest_descriptor, new java.lang.String[] { "LogClassName", "LogMessage", }); internal_static_hbase_pb_LogEntry_descriptor = - getDescriptor().getMessageTypes().get(26); + getDescriptor().getMessageTypes().get(24); internal_static_hbase_pb_LogEntry_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_LogEntry_descriptor, new java.lang.String[] { "LogClassName", "LogMessage", }); internal_static_hbase_pb_RegionLocation_descriptor = - getDescriptor().getMessageTypes().get(27); + getDescriptor().getMessageTypes().get(25); internal_static_hbase_pb_RegionLocation_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_RegionLocation_descriptor, diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java index 9fa92e1762dd..e165fcd1d620 100644 --- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java @@ -49724,42 +49724,28 @@ public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder // @@protoc_insertion_point(class_scope:hbase.pb.GetTableNamesResponse) } - public interface GetTableStateRequestOrBuilder + public interface GetClusterStatusRequestOrBuilder extends com.google.protobuf.MessageOrBuilder { - - // required .hbase.pb.TableName table_name = 1; - /** - * required .hbase.pb.TableName table_name = 1; - */ - boolean hasTableName(); - /** - * required .hbase.pb.TableName table_name = 1; - */ - org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName getTableName(); - /** - * required .hbase.pb.TableName table_name = 1; - */ - org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder getTableNameOrBuilder(); } /** - * Protobuf type {@code hbase.pb.GetTableStateRequest} + * Protobuf type {@code hbase.pb.GetClusterStatusRequest} */ - public static final class GetTableStateRequest extends + public static final class GetClusterStatusRequest extends com.google.protobuf.GeneratedMessage - implements GetTableStateRequestOrBuilder { - // Use GetTableStateRequest.newBuilder() to construct. - private GetTableStateRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + implements GetClusterStatusRequestOrBuilder { + // Use GetClusterStatusRequest.newBuilder() to construct. + private GetClusterStatusRequest(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } - private GetTableStateRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + private GetClusterStatusRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - private static final GetTableStateRequest defaultInstance; - public static GetTableStateRequest getDefaultInstance() { + private static final GetClusterStatusRequest defaultInstance; + public static GetClusterStatusRequest getDefaultInstance() { return defaultInstance; } - public GetTableStateRequest getDefaultInstanceForType() { + public GetClusterStatusRequest getDefaultInstanceForType() { return defaultInstance; } @@ -49769,12 +49755,11 @@ public GetTableStateRequest getDefaultInstanceForType() { getUnknownFields() { return this.unknownFields; } - private GetTableStateRequest( + private GetClusterStatusRequest( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); - int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { @@ -49792,19 +49777,6 @@ private GetTableStateRequest( } break; } - case 10: { - org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder subBuilder = null; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - subBuilder = tableName_.toBuilder(); - } - tableName_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.PARSER, extensionRegistry); - if (subBuilder != null) { - subBuilder.mergeFrom(tableName_); - tableName_ = subBuilder.buildPartial(); - } - bitField0_ |= 0x00000001; - break; - } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { @@ -49819,70 +49791,38 @@ private GetTableStateRequest( } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetTableStateRequest_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetClusterStatusRequest_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetTableStateRequest_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetClusterStatusRequest_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest.Builder.class); } - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public GetTableStateRequest parsePartialFrom( + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public GetClusterStatusRequest parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - return new GetTableStateRequest(input, extensionRegistry); + return new GetClusterStatusRequest(input, extensionRegistry); } }; @java.lang.Override - public com.google.protobuf.Parser getParserForType() { + public com.google.protobuf.Parser getParserForType() { return PARSER; } - private int bitField0_; - // required .hbase.pb.TableName table_name = 1; - public static final int TABLE_NAME_FIELD_NUMBER = 1; - private org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName tableName_; - /** - * required .hbase.pb.TableName table_name = 1; - */ - public boolean hasTableName() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required .hbase.pb.TableName table_name = 1; - */ - public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName getTableName() { - return tableName_; - } - /** - * required .hbase.pb.TableName table_name = 1; - */ - public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder getTableNameOrBuilder() { - return tableName_; - } - private void initFields() { - tableName_ = org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; - if (!hasTableName()) { - memoizedIsInitialized = 0; - return false; - } - if (!getTableName().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } memoizedIsInitialized = 1; return true; } @@ -49890,9 +49830,6 @@ public final boolean isInitialized() { public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeMessage(1, tableName_); - } getUnknownFields().writeTo(output); } @@ -49902,10 +49839,6 @@ public int getSerializedSize() { if (size != -1) return size; size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, tableName_); - } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; @@ -49923,17 +49856,12 @@ public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest)) { + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest)) { return super.equals(obj); } - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest) obj; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest) obj; boolean result = true; - result = result && (hasTableName() == other.hasTableName()); - if (hasTableName()) { - result = result && getTableName() - .equals(other.getTableName()); - } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -49947,62 +49875,58 @@ public int hashCode() { } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasTableName()) { - hash = (37 * hash) + TABLE_NAME_FIELD_NUMBER; - hash = (53 * hash) + getTableName().hashCode(); - } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest parseFrom(byte[] data) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest parseFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest parseDelimitedFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest parseDelimitedFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -50011,7 +49935,7 @@ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableSt public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest prototype) { + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @@ -50023,24 +49947,24 @@ protected Builder newBuilderForType( return builder; } /** - * Protobuf type {@code hbase.pb.GetTableStateRequest} + * Protobuf type {@code hbase.pb.GetClusterStatusRequest} */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequestOrBuilder { + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequestOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetTableStateRequest_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetClusterStatusRequest_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetTableStateRequest_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetClusterStatusRequest_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest.Builder.class); } - // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest.newBuilder() + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -50052,7 +49976,6 @@ private Builder( } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getTableNameFieldBuilder(); } } private static Builder create() { @@ -50061,12 +49984,6 @@ private static Builder create() { public Builder clear() { super.clear(); - if (tableNameBuilder_ == null) { - tableName_ = org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance(); - } else { - tableNameBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); return this; } @@ -50076,65 +49993,43 @@ public Builder clone() { public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetTableStateRequest_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetClusterStatusRequest_descriptor; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest.getDefaultInstance(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest.getDefaultInstance(); } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest build() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest result = buildPartial(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - if (tableNameBuilder_ == null) { - result.tableName_ = tableName_; - } else { - result.tableName_ = tableNameBuilder_.build(); - } - result.bitField0_ = to_bitField0_; + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest(this); onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest)other); + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest)other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest.getDefaultInstance()) return this; - if (other.hasTableName()) { - mergeTableName(other.getTableName()); - } + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest.getDefaultInstance()) return this; this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { - if (!hasTableName()) { - - return false; - } - if (!getTableName().isInitialized()) { - - return false; - } return true; } @@ -50142,11 +50037,11 @@ public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest parsedMessage = null; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest) e.getUnfinishedMessage(); + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { @@ -50155,172 +50050,54 @@ public Builder mergeFrom( } return this; } - private int bitField0_; - // required .hbase.pb.TableName table_name = 1; - private org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName tableName_ = org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder> tableNameBuilder_; - /** - * required .hbase.pb.TableName table_name = 1; - */ - public boolean hasTableName() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required .hbase.pb.TableName table_name = 1; - */ - public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName getTableName() { - if (tableNameBuilder_ == null) { - return tableName_; - } else { - return tableNameBuilder_.getMessage(); - } - } - /** - * required .hbase.pb.TableName table_name = 1; - */ - public Builder setTableName(org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName value) { - if (tableNameBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - tableName_ = value; - onChanged(); - } else { - tableNameBuilder_.setMessage(value); - } - bitField0_ |= 0x00000001; - return this; - } - /** - * required .hbase.pb.TableName table_name = 1; - */ - public Builder setTableName( - org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder builderForValue) { - if (tableNameBuilder_ == null) { - tableName_ = builderForValue.build(); - onChanged(); - } else { - tableNameBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000001; - return this; - } - /** - * required .hbase.pb.TableName table_name = 1; - */ - public Builder mergeTableName(org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName value) { - if (tableNameBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001) && - tableName_ != org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance()) { - tableName_ = - org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.newBuilder(tableName_).mergeFrom(value).buildPartial(); - } else { - tableName_ = value; - } - onChanged(); - } else { - tableNameBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000001; - return this; - } - /** - * required .hbase.pb.TableName table_name = 1; - */ - public Builder clearTableName() { - if (tableNameBuilder_ == null) { - tableName_ = org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance(); - onChanged(); - } else { - tableNameBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - /** - * required .hbase.pb.TableName table_name = 1; - */ - public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder getTableNameBuilder() { - bitField0_ |= 0x00000001; - onChanged(); - return getTableNameFieldBuilder().getBuilder(); - } - /** - * required .hbase.pb.TableName table_name = 1; - */ - public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder getTableNameOrBuilder() { - if (tableNameBuilder_ != null) { - return tableNameBuilder_.getMessageOrBuilder(); - } else { - return tableName_; - } - } - /** - * required .hbase.pb.TableName table_name = 1; - */ - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder> - getTableNameFieldBuilder() { - if (tableNameBuilder_ == null) { - tableNameBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder>( - tableName_, - getParentForChildren(), - isClean()); - tableName_ = null; - } - return tableNameBuilder_; - } - - // @@protoc_insertion_point(builder_scope:hbase.pb.GetTableStateRequest) + // @@protoc_insertion_point(builder_scope:hbase.pb.GetClusterStatusRequest) } static { - defaultInstance = new GetTableStateRequest(true); + defaultInstance = new GetClusterStatusRequest(true); defaultInstance.initFields(); } - // @@protoc_insertion_point(class_scope:hbase.pb.GetTableStateRequest) + // @@protoc_insertion_point(class_scope:hbase.pb.GetClusterStatusRequest) } - public interface GetTableStateResponseOrBuilder + public interface GetClusterStatusResponseOrBuilder extends com.google.protobuf.MessageOrBuilder { - // required .hbase.pb.TableState table_state = 1; + // required .hbase.pb.ClusterStatus cluster_status = 1; /** - * required .hbase.pb.TableState table_state = 1; + * required .hbase.pb.ClusterStatus cluster_status = 1; */ - boolean hasTableState(); + boolean hasClusterStatus(); /** - * required .hbase.pb.TableState table_state = 1; + * required .hbase.pb.ClusterStatus cluster_status = 1; */ - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState getTableState(); + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus getClusterStatus(); /** - * required .hbase.pb.TableState table_state = 1; + * required .hbase.pb.ClusterStatus cluster_status = 1; */ - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableStateOrBuilder getTableStateOrBuilder(); + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatusOrBuilder getClusterStatusOrBuilder(); } /** - * Protobuf type {@code hbase.pb.GetTableStateResponse} + * Protobuf type {@code hbase.pb.GetClusterStatusResponse} */ - public static final class GetTableStateResponse extends + public static final class GetClusterStatusResponse extends com.google.protobuf.GeneratedMessage - implements GetTableStateResponseOrBuilder { - // Use GetTableStateResponse.newBuilder() to construct. - private GetTableStateResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + implements GetClusterStatusResponseOrBuilder { + // Use GetClusterStatusResponse.newBuilder() to construct. + private GetClusterStatusResponse(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } - private GetTableStateResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + private GetClusterStatusResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - private static final GetTableStateResponse defaultInstance; - public static GetTableStateResponse getDefaultInstance() { + private static final GetClusterStatusResponse defaultInstance; + public static GetClusterStatusResponse getDefaultInstance() { return defaultInstance; } - public GetTableStateResponse getDefaultInstanceForType() { + public GetClusterStatusResponse getDefaultInstanceForType() { return defaultInstance; } @@ -50330,7 +50107,7 @@ public GetTableStateResponse getDefaultInstanceForType() { getUnknownFields() { return this.unknownFields; } - private GetTableStateResponse( + private GetClusterStatusResponse( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { @@ -50354,14 +50131,14 @@ private GetTableStateResponse( break; } case 10: { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.Builder subBuilder = null; + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus.Builder subBuilder = null; if (((bitField0_ & 0x00000001) == 0x00000001)) { - subBuilder = tableState_.toBuilder(); + subBuilder = clusterStatus_.toBuilder(); } - tableState_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.PARSER, extensionRegistry); + clusterStatus_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus.PARSER, extensionRegistry); if (subBuilder != null) { - subBuilder.mergeFrom(tableState_); - tableState_ = subBuilder.buildPartial(); + subBuilder.mergeFrom(clusterStatus_); + clusterStatus_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000001; break; @@ -50380,67 +50157,67 @@ private GetTableStateResponse( } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetTableStateResponse_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetClusterStatusResponse_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetTableStateResponse_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetClusterStatusResponse_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse.Builder.class); } - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public GetTableStateResponse parsePartialFrom( + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public GetClusterStatusResponse parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - return new GetTableStateResponse(input, extensionRegistry); + return new GetClusterStatusResponse(input, extensionRegistry); } }; @java.lang.Override - public com.google.protobuf.Parser getParserForType() { + public com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; - // required .hbase.pb.TableState table_state = 1; - public static final int TABLE_STATE_FIELD_NUMBER = 1; - private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState tableState_; + // required .hbase.pb.ClusterStatus cluster_status = 1; + public static final int CLUSTER_STATUS_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus clusterStatus_; /** - * required .hbase.pb.TableState table_state = 1; + * required .hbase.pb.ClusterStatus cluster_status = 1; */ - public boolean hasTableState() { + public boolean hasClusterStatus() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** - * required .hbase.pb.TableState table_state = 1; + * required .hbase.pb.ClusterStatus cluster_status = 1; */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState getTableState() { - return tableState_; + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus getClusterStatus() { + return clusterStatus_; } /** - * required .hbase.pb.TableState table_state = 1; + * required .hbase.pb.ClusterStatus cluster_status = 1; */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableStateOrBuilder getTableStateOrBuilder() { - return tableState_; + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatusOrBuilder getClusterStatusOrBuilder() { + return clusterStatus_; } private void initFields() { - tableState_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.getDefaultInstance(); + clusterStatus_ = org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus.getDefaultInstance(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; - if (!hasTableState()) { + if (!hasClusterStatus()) { memoizedIsInitialized = 0; return false; } - if (!getTableState().isInitialized()) { + if (!getClusterStatus().isInitialized()) { memoizedIsInitialized = 0; return false; } @@ -50452,7 +50229,7 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeMessage(1, tableState_); + output.writeMessage(1, clusterStatus_); } getUnknownFields().writeTo(output); } @@ -50465,7 +50242,7 @@ public int getSerializedSize() { size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, tableState_); + .computeMessageSize(1, clusterStatus_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; @@ -50484,16 +50261,16 @@ public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse)) { + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse)) { return super.equals(obj); } - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse) obj; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse) obj; boolean result = true; - result = result && (hasTableState() == other.hasTableState()); - if (hasTableState()) { - result = result && getTableState() - .equals(other.getTableState()); + result = result && (hasClusterStatus() == other.hasClusterStatus()); + if (hasClusterStatus()) { + result = result && getClusterStatus() + .equals(other.getClusterStatus()); } result = result && getUnknownFields().equals(other.getUnknownFields()); @@ -50508,62 +50285,62 @@ public int hashCode() { } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasTableState()) { - hash = (37 * hash) + TABLE_STATE_FIELD_NUMBER; - hash = (53 * hash) + getTableState().hashCode(); + if (hasClusterStatus()) { + hash = (37 * hash) + CLUSTER_STATUS_FIELD_NUMBER; + hash = (53 * hash) + getClusterStatus().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse parseFrom(byte[] data) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse parseFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse parseDelimitedFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse parseDelimitedFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -50572,7 +50349,7 @@ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableSt public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse prototype) { + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @@ -50584,24 +50361,24 @@ protected Builder newBuilderForType( return builder; } /** - * Protobuf type {@code hbase.pb.GetTableStateResponse} + * Protobuf type {@code hbase.pb.GetClusterStatusResponse} */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponseOrBuilder { + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponseOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetTableStateResponse_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetClusterStatusResponse_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetTableStateResponse_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetClusterStatusResponse_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse.Builder.class); } - // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.newBuilder() + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -50613,7 +50390,7 @@ private Builder( } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getTableStateFieldBuilder(); + getClusterStatusFieldBuilder(); } } private static Builder create() { @@ -50622,10 +50399,10 @@ private static Builder create() { public Builder clear() { super.clear(); - if (tableStateBuilder_ == null) { - tableState_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.getDefaultInstance(); + if (clusterStatusBuilder_ == null) { + clusterStatus_ = org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus.getDefaultInstance(); } else { - tableStateBuilder_.clear(); + clusterStatusBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; @@ -50637,32 +50414,32 @@ public Builder clone() { public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetTableStateResponse_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetClusterStatusResponse_descriptor; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.getDefaultInstance(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse.getDefaultInstance(); } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse build() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse result = buildPartial(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse(this); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } - if (tableStateBuilder_ == null) { - result.tableState_ = tableState_; + if (clusterStatusBuilder_ == null) { + result.clusterStatus_ = clusterStatus_; } else { - result.tableState_ = tableStateBuilder_.build(); + result.clusterStatus_ = clusterStatusBuilder_.build(); } result.bitField0_ = to_bitField0_; onBuilt(); @@ -50670,29 +50447,29 @@ public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResp } public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse)other); + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse)other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.getDefaultInstance()) return this; - if (other.hasTableState()) { - mergeTableState(other.getTableState()); + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse.getDefaultInstance()) return this; + if (other.hasClusterStatus()) { + mergeClusterStatus(other.getClusterStatus()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { - if (!hasTableState()) { + if (!hasClusterStatus()) { return false; } - if (!getTableState().isInitialized()) { + if (!getClusterStatus().isInitialized()) { return false; } @@ -50703,11 +50480,11 @@ public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse parsedMessage = null; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse) e.getUnfinishedMessage(); + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { @@ -50718,156 +50495,156 @@ public Builder mergeFrom( } private int bitField0_; - // required .hbase.pb.TableState table_state = 1; - private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState tableState_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.getDefaultInstance(); + // required .hbase.pb.ClusterStatus cluster_status = 1; + private org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus clusterStatus_ = org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus.getDefaultInstance(); private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableStateOrBuilder> tableStateBuilder_; + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatusOrBuilder> clusterStatusBuilder_; /** - * required .hbase.pb.TableState table_state = 1; + * required .hbase.pb.ClusterStatus cluster_status = 1; */ - public boolean hasTableState() { + public boolean hasClusterStatus() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** - * required .hbase.pb.TableState table_state = 1; + * required .hbase.pb.ClusterStatus cluster_status = 1; */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState getTableState() { - if (tableStateBuilder_ == null) { - return tableState_; + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus getClusterStatus() { + if (clusterStatusBuilder_ == null) { + return clusterStatus_; } else { - return tableStateBuilder_.getMessage(); + return clusterStatusBuilder_.getMessage(); } } /** - * required .hbase.pb.TableState table_state = 1; + * required .hbase.pb.ClusterStatus cluster_status = 1; */ - public Builder setTableState(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState value) { - if (tableStateBuilder_ == null) { + public Builder setClusterStatus(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus value) { + if (clusterStatusBuilder_ == null) { if (value == null) { throw new NullPointerException(); } - tableState_ = value; + clusterStatus_ = value; onChanged(); } else { - tableStateBuilder_.setMessage(value); + clusterStatusBuilder_.setMessage(value); } bitField0_ |= 0x00000001; return this; } /** - * required .hbase.pb.TableState table_state = 1; + * required .hbase.pb.ClusterStatus cluster_status = 1; */ - public Builder setTableState( - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.Builder builderForValue) { - if (tableStateBuilder_ == null) { - tableState_ = builderForValue.build(); + public Builder setClusterStatus( + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus.Builder builderForValue) { + if (clusterStatusBuilder_ == null) { + clusterStatus_ = builderForValue.build(); onChanged(); } else { - tableStateBuilder_.setMessage(builderForValue.build()); + clusterStatusBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000001; return this; } /** - * required .hbase.pb.TableState table_state = 1; + * required .hbase.pb.ClusterStatus cluster_status = 1; */ - public Builder mergeTableState(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState value) { - if (tableStateBuilder_ == null) { + public Builder mergeClusterStatus(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus value) { + if (clusterStatusBuilder_ == null) { if (((bitField0_ & 0x00000001) == 0x00000001) && - tableState_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.getDefaultInstance()) { - tableState_ = - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.newBuilder(tableState_).mergeFrom(value).buildPartial(); + clusterStatus_ != org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus.getDefaultInstance()) { + clusterStatus_ = + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus.newBuilder(clusterStatus_).mergeFrom(value).buildPartial(); } else { - tableState_ = value; + clusterStatus_ = value; } onChanged(); } else { - tableStateBuilder_.mergeFrom(value); + clusterStatusBuilder_.mergeFrom(value); } bitField0_ |= 0x00000001; return this; } /** - * required .hbase.pb.TableState table_state = 1; + * required .hbase.pb.ClusterStatus cluster_status = 1; */ - public Builder clearTableState() { - if (tableStateBuilder_ == null) { - tableState_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.getDefaultInstance(); + public Builder clearClusterStatus() { + if (clusterStatusBuilder_ == null) { + clusterStatus_ = org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus.getDefaultInstance(); onChanged(); } else { - tableStateBuilder_.clear(); + clusterStatusBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } /** - * required .hbase.pb.TableState table_state = 1; + * required .hbase.pb.ClusterStatus cluster_status = 1; */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.Builder getTableStateBuilder() { + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus.Builder getClusterStatusBuilder() { bitField0_ |= 0x00000001; onChanged(); - return getTableStateFieldBuilder().getBuilder(); + return getClusterStatusFieldBuilder().getBuilder(); } /** - * required .hbase.pb.TableState table_state = 1; + * required .hbase.pb.ClusterStatus cluster_status = 1; */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableStateOrBuilder getTableStateOrBuilder() { - if (tableStateBuilder_ != null) { - return tableStateBuilder_.getMessageOrBuilder(); + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatusOrBuilder getClusterStatusOrBuilder() { + if (clusterStatusBuilder_ != null) { + return clusterStatusBuilder_.getMessageOrBuilder(); } else { - return tableState_; + return clusterStatus_; } } /** - * required .hbase.pb.TableState table_state = 1; + * required .hbase.pb.ClusterStatus cluster_status = 1; */ private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableStateOrBuilder> - getTableStateFieldBuilder() { - if (tableStateBuilder_ == null) { - tableStateBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableStateOrBuilder>( - tableState_, + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatusOrBuilder> + getClusterStatusFieldBuilder() { + if (clusterStatusBuilder_ == null) { + clusterStatusBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatusOrBuilder>( + clusterStatus_, getParentForChildren(), isClean()); - tableState_ = null; + clusterStatus_ = null; } - return tableStateBuilder_; + return clusterStatusBuilder_; } - // @@protoc_insertion_point(builder_scope:hbase.pb.GetTableStateResponse) + // @@protoc_insertion_point(builder_scope:hbase.pb.GetClusterStatusResponse) } static { - defaultInstance = new GetTableStateResponse(true); + defaultInstance = new GetClusterStatusResponse(true); defaultInstance.initFields(); } - // @@protoc_insertion_point(class_scope:hbase.pb.GetTableStateResponse) + // @@protoc_insertion_point(class_scope:hbase.pb.GetClusterStatusResponse) } - public interface GetClusterStatusRequestOrBuilder + public interface IsMasterRunningRequestOrBuilder extends com.google.protobuf.MessageOrBuilder { } /** - * Protobuf type {@code hbase.pb.GetClusterStatusRequest} + * Protobuf type {@code hbase.pb.IsMasterRunningRequest} */ - public static final class GetClusterStatusRequest extends + public static final class IsMasterRunningRequest extends com.google.protobuf.GeneratedMessage - implements GetClusterStatusRequestOrBuilder { - // Use GetClusterStatusRequest.newBuilder() to construct. - private GetClusterStatusRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + implements IsMasterRunningRequestOrBuilder { + // Use IsMasterRunningRequest.newBuilder() to construct. + private IsMasterRunningRequest(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } - private GetClusterStatusRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + private IsMasterRunningRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - private static final GetClusterStatusRequest defaultInstance; - public static GetClusterStatusRequest getDefaultInstance() { + private static final IsMasterRunningRequest defaultInstance; + public static IsMasterRunningRequest getDefaultInstance() { return defaultInstance; } - public GetClusterStatusRequest getDefaultInstanceForType() { + public IsMasterRunningRequest getDefaultInstanceForType() { return defaultInstance; } @@ -50877,7 +50654,7 @@ public GetClusterStatusRequest getDefaultInstanceForType() { getUnknownFields() { return this.unknownFields; } - private GetClusterStatusRequest( + private IsMasterRunningRequest( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { @@ -50913,28 +50690,28 @@ private GetClusterStatusRequest( } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetClusterStatusRequest_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsMasterRunningRequest_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetClusterStatusRequest_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsMasterRunningRequest_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest.Builder.class); } - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public GetClusterStatusRequest parsePartialFrom( + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public IsMasterRunningRequest parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - return new GetClusterStatusRequest(input, extensionRegistry); + return new IsMasterRunningRequest(input, extensionRegistry); } }; @java.lang.Override - public com.google.protobuf.Parser getParserForType() { + public com.google.protobuf.Parser getParserForType() { return PARSER; } @@ -50978,10 +50755,10 @@ public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest)) { + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest)) { return super.equals(obj); } - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest) obj; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest) obj; boolean result = true; result = result && @@ -51002,53 +50779,53 @@ public int hashCode() { return hash; } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest parseFrom(byte[] data) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest parseFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest parseDelimitedFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest parseDelimitedFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -51057,7 +50834,7 @@ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCluster public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest prototype) { + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @@ -51069,24 +50846,24 @@ protected Builder newBuilderForType( return builder; } /** - * Protobuf type {@code hbase.pb.GetClusterStatusRequest} + * Protobuf type {@code hbase.pb.IsMasterRunningRequest} */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequestOrBuilder { + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequestOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetClusterStatusRequest_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsMasterRunningRequest_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetClusterStatusRequest_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsMasterRunningRequest_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest.Builder.class); } - // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest.newBuilder() + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -51115,38 +50892,38 @@ public Builder clone() { public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetClusterStatusRequest_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsMasterRunningRequest_descriptor; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest.getDefaultInstance(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest.getDefaultInstance(); } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest build() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest result = buildPartial(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest(this); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest(this); onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest)other); + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest)other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest.getDefaultInstance()) return this; + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest.getDefaultInstance()) return this; this.mergeUnknownFields(other.getUnknownFields()); return this; } @@ -51159,11 +50936,11 @@ public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest parsedMessage = null; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest) e.getUnfinishedMessage(); + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { @@ -51173,53 +50950,49 @@ public Builder mergeFrom( return this; } - // @@protoc_insertion_point(builder_scope:hbase.pb.GetClusterStatusRequest) + // @@protoc_insertion_point(builder_scope:hbase.pb.IsMasterRunningRequest) } static { - defaultInstance = new GetClusterStatusRequest(true); + defaultInstance = new IsMasterRunningRequest(true); defaultInstance.initFields(); } - // @@protoc_insertion_point(class_scope:hbase.pb.GetClusterStatusRequest) + // @@protoc_insertion_point(class_scope:hbase.pb.IsMasterRunningRequest) } - public interface GetClusterStatusResponseOrBuilder + public interface IsMasterRunningResponseOrBuilder extends com.google.protobuf.MessageOrBuilder { - // required .hbase.pb.ClusterStatus cluster_status = 1; - /** - * required .hbase.pb.ClusterStatus cluster_status = 1; - */ - boolean hasClusterStatus(); + // required bool is_master_running = 1; /** - * required .hbase.pb.ClusterStatus cluster_status = 1; + * required bool is_master_running = 1; */ - org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus getClusterStatus(); + boolean hasIsMasterRunning(); /** - * required .hbase.pb.ClusterStatus cluster_status = 1; + * required bool is_master_running = 1; */ - org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatusOrBuilder getClusterStatusOrBuilder(); + boolean getIsMasterRunning(); } /** - * Protobuf type {@code hbase.pb.GetClusterStatusResponse} + * Protobuf type {@code hbase.pb.IsMasterRunningResponse} */ - public static final class GetClusterStatusResponse extends + public static final class IsMasterRunningResponse extends com.google.protobuf.GeneratedMessage - implements GetClusterStatusResponseOrBuilder { - // Use GetClusterStatusResponse.newBuilder() to construct. - private GetClusterStatusResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + implements IsMasterRunningResponseOrBuilder { + // Use IsMasterRunningResponse.newBuilder() to construct. + private IsMasterRunningResponse(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } - private GetClusterStatusResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + private IsMasterRunningResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - private static final GetClusterStatusResponse defaultInstance; - public static GetClusterStatusResponse getDefaultInstance() { + private static final IsMasterRunningResponse defaultInstance; + public static IsMasterRunningResponse getDefaultInstance() { return defaultInstance; } - public GetClusterStatusResponse getDefaultInstanceForType() { + public IsMasterRunningResponse getDefaultInstanceForType() { return defaultInstance; } @@ -51229,7 +51002,7 @@ public GetClusterStatusResponse getDefaultInstanceForType() { getUnknownFields() { return this.unknownFields; } - private GetClusterStatusResponse( + private IsMasterRunningResponse( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { @@ -51252,17 +51025,9 @@ private GetClusterStatusResponse( } break; } - case 10: { - org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus.Builder subBuilder = null; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - subBuilder = clusterStatus_.toBuilder(); - } - clusterStatus_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus.PARSER, extensionRegistry); - if (subBuilder != null) { - subBuilder.mergeFrom(clusterStatus_); - clusterStatus_ = subBuilder.buildPartial(); - } + case 8: { bitField0_ |= 0x00000001; + isMasterRunning_ = input.readBool(); break; } } @@ -51279,67 +51044,57 @@ private GetClusterStatusResponse( } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetClusterStatusResponse_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsMasterRunningResponse_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetClusterStatusResponse_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsMasterRunningResponse_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse.Builder.class); } - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public GetClusterStatusResponse parsePartialFrom( + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public IsMasterRunningResponse parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - return new GetClusterStatusResponse(input, extensionRegistry); + return new IsMasterRunningResponse(input, extensionRegistry); } }; @java.lang.Override - public com.google.protobuf.Parser getParserForType() { + public com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; - // required .hbase.pb.ClusterStatus cluster_status = 1; - public static final int CLUSTER_STATUS_FIELD_NUMBER = 1; - private org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus clusterStatus_; + // required bool is_master_running = 1; + public static final int IS_MASTER_RUNNING_FIELD_NUMBER = 1; + private boolean isMasterRunning_; /** - * required .hbase.pb.ClusterStatus cluster_status = 1; + * required bool is_master_running = 1; */ - public boolean hasClusterStatus() { + public boolean hasIsMasterRunning() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** - * required .hbase.pb.ClusterStatus cluster_status = 1; - */ - public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus getClusterStatus() { - return clusterStatus_; - } - /** - * required .hbase.pb.ClusterStatus cluster_status = 1; + * required bool is_master_running = 1; */ - public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatusOrBuilder getClusterStatusOrBuilder() { - return clusterStatus_; + public boolean getIsMasterRunning() { + return isMasterRunning_; } private void initFields() { - clusterStatus_ = org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus.getDefaultInstance(); + isMasterRunning_ = false; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; - if (!hasClusterStatus()) { - memoizedIsInitialized = 0; - return false; - } - if (!getClusterStatus().isInitialized()) { + if (!hasIsMasterRunning()) { memoizedIsInitialized = 0; return false; } @@ -51351,7 +51106,7 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeMessage(1, clusterStatus_); + output.writeBool(1, isMasterRunning_); } getUnknownFields().writeTo(output); } @@ -51364,7 +51119,7 @@ public int getSerializedSize() { size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, clusterStatus_); + .computeBoolSize(1, isMasterRunning_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; @@ -51383,16 +51138,16 @@ public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse)) { + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse)) { return super.equals(obj); } - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse) obj; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse) obj; boolean result = true; - result = result && (hasClusterStatus() == other.hasClusterStatus()); - if (hasClusterStatus()) { - result = result && getClusterStatus() - .equals(other.getClusterStatus()); + result = result && (hasIsMasterRunning() == other.hasIsMasterRunning()); + if (hasIsMasterRunning()) { + result = result && (getIsMasterRunning() + == other.getIsMasterRunning()); } result = result && getUnknownFields().equals(other.getUnknownFields()); @@ -51407,62 +51162,62 @@ public int hashCode() { } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasClusterStatus()) { - hash = (37 * hash) + CLUSTER_STATUS_FIELD_NUMBER; - hash = (53 * hash) + getClusterStatus().hashCode(); + if (hasIsMasterRunning()) { + hash = (37 * hash) + IS_MASTER_RUNNING_FIELD_NUMBER; + hash = (53 * hash) + hashBoolean(getIsMasterRunning()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse parseFrom(byte[] data) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse parseFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse parseDelimitedFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse parseDelimitedFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -51471,7 +51226,7 @@ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCluster public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse prototype) { + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @@ -51483,24 +51238,24 @@ protected Builder newBuilderForType( return builder; } /** - * Protobuf type {@code hbase.pb.GetClusterStatusResponse} + * Protobuf type {@code hbase.pb.IsMasterRunningResponse} */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponseOrBuilder { + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponseOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetClusterStatusResponse_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsMasterRunningResponse_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetClusterStatusResponse_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsMasterRunningResponse_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse.Builder.class); } - // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse.newBuilder() + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -51512,7 +51267,6 @@ private Builder( } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getClusterStatusFieldBuilder(); } } private static Builder create() { @@ -51521,11 +51275,7 @@ private static Builder create() { public Builder clear() { super.clear(); - if (clusterStatusBuilder_ == null) { - clusterStatus_ = org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus.getDefaultInstance(); - } else { - clusterStatusBuilder_.clear(); - } + isMasterRunning_ = false; bitField0_ = (bitField0_ & ~0x00000001); return this; } @@ -51536,62 +51286,54 @@ public Builder clone() { public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetClusterStatusResponse_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsMasterRunningResponse_descriptor; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse.getDefaultInstance(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse.getDefaultInstance(); } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse build() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse result = buildPartial(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse(this); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } - if (clusterStatusBuilder_ == null) { - result.clusterStatus_ = clusterStatus_; - } else { - result.clusterStatus_ = clusterStatusBuilder_.build(); - } + result.isMasterRunning_ = isMasterRunning_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse)other); + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse)other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse.getDefaultInstance()) return this; - if (other.hasClusterStatus()) { - mergeClusterStatus(other.getClusterStatus()); + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse.getDefaultInstance()) return this; + if (other.hasIsMasterRunning()) { + setIsMasterRunning(other.getIsMasterRunning()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { - if (!hasClusterStatus()) { - - return false; - } - if (!getClusterStatus().isInitialized()) { + if (!hasIsMasterRunning()) { return false; } @@ -51602,11 +51344,11 @@ public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse parsedMessage = null; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse) e.getUnfinishedMessage(); + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { @@ -51617,156 +51359,86 @@ public Builder mergeFrom( } private int bitField0_; - // required .hbase.pb.ClusterStatus cluster_status = 1; - private org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus clusterStatus_ = org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatusOrBuilder> clusterStatusBuilder_; + // required bool is_master_running = 1; + private boolean isMasterRunning_ ; /** - * required .hbase.pb.ClusterStatus cluster_status = 1; + * required bool is_master_running = 1; */ - public boolean hasClusterStatus() { + public boolean hasIsMasterRunning() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** - * required .hbase.pb.ClusterStatus cluster_status = 1; - */ - public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus getClusterStatus() { - if (clusterStatusBuilder_ == null) { - return clusterStatus_; - } else { - return clusterStatusBuilder_.getMessage(); - } - } - /** - * required .hbase.pb.ClusterStatus cluster_status = 1; - */ - public Builder setClusterStatus(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus value) { - if (clusterStatusBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - clusterStatus_ = value; - onChanged(); - } else { - clusterStatusBuilder_.setMessage(value); - } - bitField0_ |= 0x00000001; - return this; - } - /** - * required .hbase.pb.ClusterStatus cluster_status = 1; + * required bool is_master_running = 1; */ - public Builder setClusterStatus( - org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus.Builder builderForValue) { - if (clusterStatusBuilder_ == null) { - clusterStatus_ = builderForValue.build(); - onChanged(); - } else { - clusterStatusBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000001; - return this; + public boolean getIsMasterRunning() { + return isMasterRunning_; } /** - * required .hbase.pb.ClusterStatus cluster_status = 1; + * required bool is_master_running = 1; */ - public Builder mergeClusterStatus(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus value) { - if (clusterStatusBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001) && - clusterStatus_ != org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus.getDefaultInstance()) { - clusterStatus_ = - org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus.newBuilder(clusterStatus_).mergeFrom(value).buildPartial(); - } else { - clusterStatus_ = value; - } - onChanged(); - } else { - clusterStatusBuilder_.mergeFrom(value); - } + public Builder setIsMasterRunning(boolean value) { bitField0_ |= 0x00000001; + isMasterRunning_ = value; + onChanged(); return this; } /** - * required .hbase.pb.ClusterStatus cluster_status = 1; + * required bool is_master_running = 1; */ - public Builder clearClusterStatus() { - if (clusterStatusBuilder_ == null) { - clusterStatus_ = org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus.getDefaultInstance(); - onChanged(); - } else { - clusterStatusBuilder_.clear(); - } + public Builder clearIsMasterRunning() { bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - /** - * required .hbase.pb.ClusterStatus cluster_status = 1; - */ - public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus.Builder getClusterStatusBuilder() { - bitField0_ |= 0x00000001; + isMasterRunning_ = false; onChanged(); - return getClusterStatusFieldBuilder().getBuilder(); - } - /** - * required .hbase.pb.ClusterStatus cluster_status = 1; - */ - public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatusOrBuilder getClusterStatusOrBuilder() { - if (clusterStatusBuilder_ != null) { - return clusterStatusBuilder_.getMessageOrBuilder(); - } else { - return clusterStatus_; - } - } - /** - * required .hbase.pb.ClusterStatus cluster_status = 1; - */ - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatusOrBuilder> - getClusterStatusFieldBuilder() { - if (clusterStatusBuilder_ == null) { - clusterStatusBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatusOrBuilder>( - clusterStatus_, - getParentForChildren(), - isClean()); - clusterStatus_ = null; - } - return clusterStatusBuilder_; + return this; } - // @@protoc_insertion_point(builder_scope:hbase.pb.GetClusterStatusResponse) + // @@protoc_insertion_point(builder_scope:hbase.pb.IsMasterRunningResponse) } static { - defaultInstance = new GetClusterStatusResponse(true); + defaultInstance = new IsMasterRunningResponse(true); defaultInstance.initFields(); } - // @@protoc_insertion_point(class_scope:hbase.pb.GetClusterStatusResponse) + // @@protoc_insertion_point(class_scope:hbase.pb.IsMasterRunningResponse) } - public interface IsMasterRunningRequestOrBuilder + public interface ExecProcedureRequestOrBuilder extends com.google.protobuf.MessageOrBuilder { + + // required .hbase.pb.ProcedureDescription procedure = 1; + /** + * required .hbase.pb.ProcedureDescription procedure = 1; + */ + boolean hasProcedure(); + /** + * required .hbase.pb.ProcedureDescription procedure = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription getProcedure(); + /** + * required .hbase.pb.ProcedureDescription procedure = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescriptionOrBuilder getProcedureOrBuilder(); } /** - * Protobuf type {@code hbase.pb.IsMasterRunningRequest} + * Protobuf type {@code hbase.pb.ExecProcedureRequest} */ - public static final class IsMasterRunningRequest extends + public static final class ExecProcedureRequest extends com.google.protobuf.GeneratedMessage - implements IsMasterRunningRequestOrBuilder { - // Use IsMasterRunningRequest.newBuilder() to construct. - private IsMasterRunningRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + implements ExecProcedureRequestOrBuilder { + // Use ExecProcedureRequest.newBuilder() to construct. + private ExecProcedureRequest(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } - private IsMasterRunningRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + private ExecProcedureRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - private static final IsMasterRunningRequest defaultInstance; - public static IsMasterRunningRequest getDefaultInstance() { + private static final ExecProcedureRequest defaultInstance; + public static ExecProcedureRequest getDefaultInstance() { return defaultInstance; } - public IsMasterRunningRequest getDefaultInstanceForType() { + public ExecProcedureRequest getDefaultInstanceForType() { return defaultInstance; } @@ -51776,11 +51448,12 @@ public IsMasterRunningRequest getDefaultInstanceForType() { getUnknownFields() { return this.unknownFields; } - private IsMasterRunningRequest( + private ExecProcedureRequest( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); + int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { @@ -51798,6 +51471,19 @@ private IsMasterRunningRequest( } break; } + case 10: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = procedure_.toBuilder(); + } + procedure_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(procedure_); + procedure_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; + break; + } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { @@ -51812,38 +51498,70 @@ private IsMasterRunningRequest( } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsMasterRunningRequest_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ExecProcedureRequest_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsMasterRunningRequest_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ExecProcedureRequest_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest.Builder.class); } - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public IsMasterRunningRequest parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public ExecProcedureRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - return new IsMasterRunningRequest(input, extensionRegistry); + return new ExecProcedureRequest(input, extensionRegistry); } }; @java.lang.Override - public com.google.protobuf.Parser getParserForType() { + public com.google.protobuf.Parser getParserForType() { return PARSER; } + private int bitField0_; + // required .hbase.pb.ProcedureDescription procedure = 1; + public static final int PROCEDURE_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription procedure_; + /** + * required .hbase.pb.ProcedureDescription procedure = 1; + */ + public boolean hasProcedure() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .hbase.pb.ProcedureDescription procedure = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription getProcedure() { + return procedure_; + } + /** + * required .hbase.pb.ProcedureDescription procedure = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescriptionOrBuilder getProcedureOrBuilder() { + return procedure_; + } + private void initFields() { + procedure_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.getDefaultInstance(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; + if (!hasProcedure()) { + memoizedIsInitialized = 0; + return false; + } + if (!getProcedure().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } memoizedIsInitialized = 1; return true; } @@ -51851,6 +51569,9 @@ public final boolean isInitialized() { public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, procedure_); + } getUnknownFields().writeTo(output); } @@ -51860,6 +51581,10 @@ public int getSerializedSize() { if (size != -1) return size; size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, procedure_); + } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; @@ -51877,12 +51602,17 @@ public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest)) { + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest)) { return super.equals(obj); } - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest) obj; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest) obj; boolean result = true; + result = result && (hasProcedure() == other.hasProcedure()); + if (hasProcedure()) { + result = result && getProcedure() + .equals(other.getProcedure()); + } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -51896,58 +51626,62 @@ public int hashCode() { } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasProcedure()) { + hash = (37 * hash) + PROCEDURE_FIELD_NUMBER; + hash = (53 * hash) + getProcedure().hashCode(); + } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest parseFrom(byte[] data) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest parseFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest parseDelimitedFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest parseDelimitedFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -51956,7 +51690,7 @@ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRu public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest prototype) { + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @@ -51968,24 +51702,24 @@ protected Builder newBuilderForType( return builder; } /** - * Protobuf type {@code hbase.pb.IsMasterRunningRequest} + * Protobuf type {@code hbase.pb.ExecProcedureRequest} */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequestOrBuilder { + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequestOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsMasterRunningRequest_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ExecProcedureRequest_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsMasterRunningRequest_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ExecProcedureRequest_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest.Builder.class); } - // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest.newBuilder() + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -51997,6 +51731,7 @@ private Builder( } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getProcedureFieldBuilder(); } } private static Builder create() { @@ -52005,6 +51740,12 @@ private static Builder create() { public Builder clear() { super.clear(); + if (procedureBuilder_ == null) { + procedure_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.getDefaultInstance(); + } else { + procedureBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); return this; } @@ -52014,43 +51755,65 @@ public Builder clone() { public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsMasterRunningRequest_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ExecProcedureRequest_descriptor; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest.getDefaultInstance(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest.getDefaultInstance(); } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest build() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest result = buildPartial(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest(this); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (procedureBuilder_ == null) { + result.procedure_ = procedure_; + } else { + result.procedure_ = procedureBuilder_.build(); + } + result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest)other); + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest)other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest.getDefaultInstance()) return this; + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest.getDefaultInstance()) return this; + if (other.hasProcedure()) { + mergeProcedure(other.getProcedure()); + } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { + if (!hasProcedure()) { + + return false; + } + if (!getProcedure().isInitialized()) { + + return false; + } return true; } @@ -52058,11 +51821,11 @@ public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest parsedMessage = null; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest) e.getUnfinishedMessage(); + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { @@ -52071,155 +51834,301 @@ public Builder mergeFrom( } return this; } + private int bitField0_; - // @@protoc_insertion_point(builder_scope:hbase.pb.IsMasterRunningRequest) - } - - static { - defaultInstance = new IsMasterRunningRequest(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:hbase.pb.IsMasterRunningRequest) - } - - public interface IsMasterRunningResponseOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required bool is_master_running = 1; - /** - * required bool is_master_running = 1; - */ - boolean hasIsMasterRunning(); - /** - * required bool is_master_running = 1; - */ - boolean getIsMasterRunning(); - } - /** - * Protobuf type {@code hbase.pb.IsMasterRunningResponse} - */ - public static final class IsMasterRunningResponse extends - com.google.protobuf.GeneratedMessage - implements IsMasterRunningResponseOrBuilder { - // Use IsMasterRunningResponse.newBuilder() to construct. - private IsMasterRunningResponse(com.google.protobuf.GeneratedMessage.Builder builder) { - super(builder); - this.unknownFields = builder.getUnknownFields(); - } - private IsMasterRunningResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - - private static final IsMasterRunningResponse defaultInstance; - public static IsMasterRunningResponse getDefaultInstance() { - return defaultInstance; - } - - public IsMasterRunningResponse getDefaultInstanceForType() { - return defaultInstance; - } - - private final com.google.protobuf.UnknownFieldSet unknownFields; - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { - return this.unknownFields; - } - private IsMasterRunningResponse( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - initFields(); - int mutable_bitField0_ = 0; - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - done = true; - } - break; - } - case 8: { - bitField0_ |= 0x00000001; - isMasterRunning_ = input.readBool(); - break; - } - } - } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException( - e.getMessage()).setUnfinishedMessage(this); - } finally { - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); + // required .hbase.pb.ProcedureDescription procedure = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription procedure_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescriptionOrBuilder> procedureBuilder_; + /** + * required .hbase.pb.ProcedureDescription procedure = 1; + */ + public boolean hasProcedure() { + return ((bitField0_ & 0x00000001) == 0x00000001); } - } - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsMasterRunningResponse_descriptor; - } - + /** + * required .hbase.pb.ProcedureDescription procedure = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription getProcedure() { + if (procedureBuilder_ == null) { + return procedure_; + } else { + return procedureBuilder_.getMessage(); + } + } + /** + * required .hbase.pb.ProcedureDescription procedure = 1; + */ + public Builder setProcedure(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription value) { + if (procedureBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + procedure_ = value; + onChanged(); + } else { + procedureBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .hbase.pb.ProcedureDescription procedure = 1; + */ + public Builder setProcedure( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.Builder builderForValue) { + if (procedureBuilder_ == null) { + procedure_ = builderForValue.build(); + onChanged(); + } else { + procedureBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .hbase.pb.ProcedureDescription procedure = 1; + */ + public Builder mergeProcedure(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription value) { + if (procedureBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + procedure_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.getDefaultInstance()) { + procedure_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.newBuilder(procedure_).mergeFrom(value).buildPartial(); + } else { + procedure_ = value; + } + onChanged(); + } else { + procedureBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .hbase.pb.ProcedureDescription procedure = 1; + */ + public Builder clearProcedure() { + if (procedureBuilder_ == null) { + procedure_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.getDefaultInstance(); + onChanged(); + } else { + procedureBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + /** + * required .hbase.pb.ProcedureDescription procedure = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.Builder getProcedureBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getProcedureFieldBuilder().getBuilder(); + } + /** + * required .hbase.pb.ProcedureDescription procedure = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescriptionOrBuilder getProcedureOrBuilder() { + if (procedureBuilder_ != null) { + return procedureBuilder_.getMessageOrBuilder(); + } else { + return procedure_; + } + } + /** + * required .hbase.pb.ProcedureDescription procedure = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescriptionOrBuilder> + getProcedureFieldBuilder() { + if (procedureBuilder_ == null) { + procedureBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescriptionOrBuilder>( + procedure_, + getParentForChildren(), + isClean()); + procedure_ = null; + } + return procedureBuilder_; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.ExecProcedureRequest) + } + + static { + defaultInstance = new ExecProcedureRequest(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.ExecProcedureRequest) + } + + public interface ExecProcedureResponseOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // optional int64 expected_timeout = 1; + /** + * optional int64 expected_timeout = 1; + */ + boolean hasExpectedTimeout(); + /** + * optional int64 expected_timeout = 1; + */ + long getExpectedTimeout(); + + // optional bytes return_data = 2; + /** + * optional bytes return_data = 2; + */ + boolean hasReturnData(); + /** + * optional bytes return_data = 2; + */ + com.google.protobuf.ByteString getReturnData(); + } + /** + * Protobuf type {@code hbase.pb.ExecProcedureResponse} + */ + public static final class ExecProcedureResponse extends + com.google.protobuf.GeneratedMessage + implements ExecProcedureResponseOrBuilder { + // Use ExecProcedureResponse.newBuilder() to construct. + private ExecProcedureResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private ExecProcedureResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final ExecProcedureResponse defaultInstance; + public static ExecProcedureResponse getDefaultInstance() { + return defaultInstance; + } + + public ExecProcedureResponse getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private ExecProcedureResponse( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 8: { + bitField0_ |= 0x00000001; + expectedTimeout_ = input.readInt64(); + break; + } + case 18: { + bitField0_ |= 0x00000002; + returnData_ = input.readBytes(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ExecProcedureResponse_descriptor; + } + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsMasterRunningResponse_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ExecProcedureResponse_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse.Builder.class); } - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public IsMasterRunningResponse parsePartialFrom( + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public ExecProcedureResponse parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - return new IsMasterRunningResponse(input, extensionRegistry); + return new ExecProcedureResponse(input, extensionRegistry); } }; @java.lang.Override - public com.google.protobuf.Parser getParserForType() { + public com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; - // required bool is_master_running = 1; - public static final int IS_MASTER_RUNNING_FIELD_NUMBER = 1; - private boolean isMasterRunning_; + // optional int64 expected_timeout = 1; + public static final int EXPECTED_TIMEOUT_FIELD_NUMBER = 1; + private long expectedTimeout_; /** - * required bool is_master_running = 1; + * optional int64 expected_timeout = 1; */ - public boolean hasIsMasterRunning() { + public boolean hasExpectedTimeout() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** - * required bool is_master_running = 1; + * optional int64 expected_timeout = 1; */ - public boolean getIsMasterRunning() { - return isMasterRunning_; + public long getExpectedTimeout() { + return expectedTimeout_; } - private void initFields() { - isMasterRunning_ = false; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; + // optional bytes return_data = 2; + public static final int RETURN_DATA_FIELD_NUMBER = 2; + private com.google.protobuf.ByteString returnData_; + /** + * optional bytes return_data = 2; + */ + public boolean hasReturnData() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional bytes return_data = 2; + */ + public com.google.protobuf.ByteString getReturnData() { + return returnData_; + } + + private void initFields() { + expectedTimeout_ = 0L; + returnData_ = com.google.protobuf.ByteString.EMPTY; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; - if (!hasIsMasterRunning()) { - memoizedIsInitialized = 0; - return false; - } memoizedIsInitialized = 1; return true; } @@ -52228,7 +52137,10 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBool(1, isMasterRunning_); + output.writeInt64(1, expectedTimeout_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeBytes(2, returnData_); } getUnknownFields().writeTo(output); } @@ -52241,7 +52153,11 @@ public int getSerializedSize() { size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream - .computeBoolSize(1, isMasterRunning_); + .computeInt64Size(1, expectedTimeout_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(2, returnData_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; @@ -52260,16 +52176,21 @@ public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse)) { + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse)) { return super.equals(obj); } - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse) obj; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse) obj; boolean result = true; - result = result && (hasIsMasterRunning() == other.hasIsMasterRunning()); - if (hasIsMasterRunning()) { - result = result && (getIsMasterRunning() - == other.getIsMasterRunning()); + result = result && (hasExpectedTimeout() == other.hasExpectedTimeout()); + if (hasExpectedTimeout()) { + result = result && (getExpectedTimeout() + == other.getExpectedTimeout()); + } + result = result && (hasReturnData() == other.hasReturnData()); + if (hasReturnData()) { + result = result && getReturnData() + .equals(other.getReturnData()); } result = result && getUnknownFields().equals(other.getUnknownFields()); @@ -52284,62 +52205,66 @@ public int hashCode() { } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasIsMasterRunning()) { - hash = (37 * hash) + IS_MASTER_RUNNING_FIELD_NUMBER; - hash = (53 * hash) + hashBoolean(getIsMasterRunning()); + if (hasExpectedTimeout()) { + hash = (37 * hash) + EXPECTED_TIMEOUT_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getExpectedTimeout()); + } + if (hasReturnData()) { + hash = (37 * hash) + RETURN_DATA_FIELD_NUMBER; + hash = (53 * hash) + getReturnData().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse parseFrom(byte[] data) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse parseFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse parseDelimitedFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse parseDelimitedFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -52348,7 +52273,7 @@ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRu public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse prototype) { + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @@ -52360,24 +52285,24 @@ protected Builder newBuilderForType( return builder; } /** - * Protobuf type {@code hbase.pb.IsMasterRunningResponse} + * Protobuf type {@code hbase.pb.ExecProcedureResponse} */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponseOrBuilder { + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponseOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsMasterRunningResponse_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ExecProcedureResponse_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsMasterRunningResponse_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ExecProcedureResponse_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse.Builder.class); } - // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse.newBuilder() + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -52397,8 +52322,10 @@ private static Builder create() { public Builder clear() { super.clear(); - isMasterRunning_ = false; + expectedTimeout_ = 0L; bitField0_ = (bitField0_ & ~0x00000001); + returnData_ = com.google.protobuf.ByteString.EMPTY; + bitField0_ = (bitField0_ & ~0x00000002); return this; } @@ -52408,57 +52335,60 @@ public Builder clone() { public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsMasterRunningResponse_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ExecProcedureResponse_descriptor; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse.getDefaultInstance(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse.getDefaultInstance(); } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse build() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse result = buildPartial(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse(this); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } - result.isMasterRunning_ = isMasterRunning_; + result.expectedTimeout_ = expectedTimeout_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.returnData_ = returnData_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse)other); + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse)other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse.getDefaultInstance()) return this; - if (other.hasIsMasterRunning()) { - setIsMasterRunning(other.getIsMasterRunning()); + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse.getDefaultInstance()) return this; + if (other.hasExpectedTimeout()) { + setExpectedTimeout(other.getExpectedTimeout()); + } + if (other.hasReturnData()) { + setReturnData(other.getReturnData()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { - if (!hasIsMasterRunning()) { - - return false; - } return true; } @@ -52466,11 +52396,11 @@ public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse parsedMessage = null; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse) e.getUnfinishedMessage(); + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { @@ -52481,86 +52411,122 @@ public Builder mergeFrom( } private int bitField0_; - // required bool is_master_running = 1; - private boolean isMasterRunning_ ; + // optional int64 expected_timeout = 1; + private long expectedTimeout_ ; /** - * required bool is_master_running = 1; + * optional int64 expected_timeout = 1; */ - public boolean hasIsMasterRunning() { + public boolean hasExpectedTimeout() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** - * required bool is_master_running = 1; + * optional int64 expected_timeout = 1; */ - public boolean getIsMasterRunning() { - return isMasterRunning_; + public long getExpectedTimeout() { + return expectedTimeout_; } /** - * required bool is_master_running = 1; + * optional int64 expected_timeout = 1; */ - public Builder setIsMasterRunning(boolean value) { + public Builder setExpectedTimeout(long value) { bitField0_ |= 0x00000001; - isMasterRunning_ = value; + expectedTimeout_ = value; onChanged(); return this; } /** - * required bool is_master_running = 1; + * optional int64 expected_timeout = 1; */ - public Builder clearIsMasterRunning() { + public Builder clearExpectedTimeout() { bitField0_ = (bitField0_ & ~0x00000001); - isMasterRunning_ = false; + expectedTimeout_ = 0L; onChanged(); return this; } - // @@protoc_insertion_point(builder_scope:hbase.pb.IsMasterRunningResponse) + // optional bytes return_data = 2; + private com.google.protobuf.ByteString returnData_ = com.google.protobuf.ByteString.EMPTY; + /** + * optional bytes return_data = 2; + */ + public boolean hasReturnData() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional bytes return_data = 2; + */ + public com.google.protobuf.ByteString getReturnData() { + return returnData_; + } + /** + * optional bytes return_data = 2; + */ + public Builder setReturnData(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + returnData_ = value; + onChanged(); + return this; + } + /** + * optional bytes return_data = 2; + */ + public Builder clearReturnData() { + bitField0_ = (bitField0_ & ~0x00000002); + returnData_ = getDefaultInstance().getReturnData(); + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.ExecProcedureResponse) } static { - defaultInstance = new IsMasterRunningResponse(true); + defaultInstance = new ExecProcedureResponse(true); defaultInstance.initFields(); } - // @@protoc_insertion_point(class_scope:hbase.pb.IsMasterRunningResponse) + // @@protoc_insertion_point(class_scope:hbase.pb.ExecProcedureResponse) } - public interface ExecProcedureRequestOrBuilder + public interface IsProcedureDoneRequestOrBuilder extends com.google.protobuf.MessageOrBuilder { - // required .hbase.pb.ProcedureDescription procedure = 1; + // optional .hbase.pb.ProcedureDescription procedure = 1; /** - * required .hbase.pb.ProcedureDescription procedure = 1; + * optional .hbase.pb.ProcedureDescription procedure = 1; */ boolean hasProcedure(); /** - * required .hbase.pb.ProcedureDescription procedure = 1; + * optional .hbase.pb.ProcedureDescription procedure = 1; */ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription getProcedure(); /** - * required .hbase.pb.ProcedureDescription procedure = 1; + * optional .hbase.pb.ProcedureDescription procedure = 1; */ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescriptionOrBuilder getProcedureOrBuilder(); } /** - * Protobuf type {@code hbase.pb.ExecProcedureRequest} + * Protobuf type {@code hbase.pb.IsProcedureDoneRequest} */ - public static final class ExecProcedureRequest extends + public static final class IsProcedureDoneRequest extends com.google.protobuf.GeneratedMessage - implements ExecProcedureRequestOrBuilder { - // Use ExecProcedureRequest.newBuilder() to construct. - private ExecProcedureRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + implements IsProcedureDoneRequestOrBuilder { + // Use IsProcedureDoneRequest.newBuilder() to construct. + private IsProcedureDoneRequest(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } - private ExecProcedureRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + private IsProcedureDoneRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - private static final ExecProcedureRequest defaultInstance; - public static ExecProcedureRequest getDefaultInstance() { + private static final IsProcedureDoneRequest defaultInstance; + public static IsProcedureDoneRequest getDefaultInstance() { return defaultInstance; } - public ExecProcedureRequest getDefaultInstanceForType() { + public IsProcedureDoneRequest getDefaultInstanceForType() { return defaultInstance; } @@ -52570,7 +52536,7 @@ public ExecProcedureRequest getDefaultInstanceForType() { getUnknownFields() { return this.unknownFields; } - private ExecProcedureRequest( + private IsProcedureDoneRequest( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { @@ -52620,49 +52586,49 @@ private ExecProcedureRequest( } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ExecProcedureRequest_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsProcedureDoneRequest_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ExecProcedureRequest_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsProcedureDoneRequest_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest.Builder.class); } - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public ExecProcedureRequest parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public IsProcedureDoneRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - return new ExecProcedureRequest(input, extensionRegistry); + return new IsProcedureDoneRequest(input, extensionRegistry); } }; @java.lang.Override - public com.google.protobuf.Parser getParserForType() { + public com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; - // required .hbase.pb.ProcedureDescription procedure = 1; + // optional .hbase.pb.ProcedureDescription procedure = 1; public static final int PROCEDURE_FIELD_NUMBER = 1; private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription procedure_; /** - * required .hbase.pb.ProcedureDescription procedure = 1; + * optional .hbase.pb.ProcedureDescription procedure = 1; */ public boolean hasProcedure() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** - * required .hbase.pb.ProcedureDescription procedure = 1; + * optional .hbase.pb.ProcedureDescription procedure = 1; */ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription getProcedure() { return procedure_; } /** - * required .hbase.pb.ProcedureDescription procedure = 1; + * optional .hbase.pb.ProcedureDescription procedure = 1; */ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescriptionOrBuilder getProcedureOrBuilder() { return procedure_; @@ -52676,13 +52642,11 @@ public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; - if (!hasProcedure()) { - memoizedIsInitialized = 0; - return false; - } - if (!getProcedure().isInitialized()) { - memoizedIsInitialized = 0; - return false; + if (hasProcedure()) { + if (!getProcedure().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } } memoizedIsInitialized = 1; return true; @@ -52724,10 +52688,10 @@ public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest)) { + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest)) { return super.equals(obj); } - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest) obj; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest) obj; boolean result = true; result = result && (hasProcedure() == other.hasProcedure()); @@ -52757,53 +52721,53 @@ public int hashCode() { return hash; } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest parseFrom(byte[] data) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest parseFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest parseDelimitedFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest parseDelimitedFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -52812,7 +52776,7 @@ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProced public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest prototype) { + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @@ -52824,24 +52788,24 @@ protected Builder newBuilderForType( return builder; } /** - * Protobuf type {@code hbase.pb.ExecProcedureRequest} + * Protobuf type {@code hbase.pb.IsProcedureDoneRequest} */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequestOrBuilder { + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequestOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ExecProcedureRequest_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsProcedureDoneRequest_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ExecProcedureRequest_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsProcedureDoneRequest_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest.Builder.class); } - // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest.newBuilder() + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -52877,23 +52841,23 @@ public Builder clone() { public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ExecProcedureRequest_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsProcedureDoneRequest_descriptor; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest.getDefaultInstance(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest.getDefaultInstance(); } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest build() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest result = buildPartial(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest(this); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { @@ -52910,16 +52874,16 @@ public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequ } public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest)other); + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest)other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest.getDefaultInstance()) return this; + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest.getDefaultInstance()) return this; if (other.hasProcedure()) { mergeProcedure(other.getProcedure()); } @@ -52928,13 +52892,11 @@ public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos } public final boolean isInitialized() { - if (!hasProcedure()) { - - return false; - } - if (!getProcedure().isInitialized()) { - - return false; + if (hasProcedure()) { + if (!getProcedure().isInitialized()) { + + return false; + } } return true; } @@ -52943,11 +52905,11 @@ public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest parsedMessage = null; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest) e.getUnfinishedMessage(); + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { @@ -52958,18 +52920,18 @@ public Builder mergeFrom( } private int bitField0_; - // required .hbase.pb.ProcedureDescription procedure = 1; + // optional .hbase.pb.ProcedureDescription procedure = 1; private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription procedure_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.getDefaultInstance(); private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescriptionOrBuilder> procedureBuilder_; /** - * required .hbase.pb.ProcedureDescription procedure = 1; + * optional .hbase.pb.ProcedureDescription procedure = 1; */ public boolean hasProcedure() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** - * required .hbase.pb.ProcedureDescription procedure = 1; + * optional .hbase.pb.ProcedureDescription procedure = 1; */ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription getProcedure() { if (procedureBuilder_ == null) { @@ -52979,7 +52941,7 @@ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescripti } } /** - * required .hbase.pb.ProcedureDescription procedure = 1; + * optional .hbase.pb.ProcedureDescription procedure = 1; */ public Builder setProcedure(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription value) { if (procedureBuilder_ == null) { @@ -52995,7 +52957,7 @@ public Builder setProcedure(org.apache.hadoop.hbase.protobuf.generated.HBaseProt return this; } /** - * required .hbase.pb.ProcedureDescription procedure = 1; + * optional .hbase.pb.ProcedureDescription procedure = 1; */ public Builder setProcedure( org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.Builder builderForValue) { @@ -53009,7 +52971,7 @@ public Builder setProcedure( return this; } /** - * required .hbase.pb.ProcedureDescription procedure = 1; + * optional .hbase.pb.ProcedureDescription procedure = 1; */ public Builder mergeProcedure(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription value) { if (procedureBuilder_ == null) { @@ -53028,7 +52990,7 @@ public Builder mergeProcedure(org.apache.hadoop.hbase.protobuf.generated.HBasePr return this; } /** - * required .hbase.pb.ProcedureDescription procedure = 1; + * optional .hbase.pb.ProcedureDescription procedure = 1; */ public Builder clearProcedure() { if (procedureBuilder_ == null) { @@ -53041,7 +53003,7 @@ public Builder clearProcedure() { return this; } /** - * required .hbase.pb.ProcedureDescription procedure = 1; + * optional .hbase.pb.ProcedureDescription procedure = 1; */ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.Builder getProcedureBuilder() { bitField0_ |= 0x00000001; @@ -53049,7 +53011,7 @@ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescripti return getProcedureFieldBuilder().getBuilder(); } /** - * required .hbase.pb.ProcedureDescription procedure = 1; + * optional .hbase.pb.ProcedureDescription procedure = 1; */ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescriptionOrBuilder getProcedureOrBuilder() { if (procedureBuilder_ != null) { @@ -53059,7 +53021,7 @@ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescripti } } /** - * required .hbase.pb.ProcedureDescription procedure = 1; + * optional .hbase.pb.ProcedureDescription procedure = 1; */ private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescriptionOrBuilder> @@ -53075,59 +53037,63 @@ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescripti return procedureBuilder_; } - // @@protoc_insertion_point(builder_scope:hbase.pb.ExecProcedureRequest) + // @@protoc_insertion_point(builder_scope:hbase.pb.IsProcedureDoneRequest) } static { - defaultInstance = new ExecProcedureRequest(true); + defaultInstance = new IsProcedureDoneRequest(true); defaultInstance.initFields(); } - // @@protoc_insertion_point(class_scope:hbase.pb.ExecProcedureRequest) + // @@protoc_insertion_point(class_scope:hbase.pb.IsProcedureDoneRequest) } - public interface ExecProcedureResponseOrBuilder + public interface IsProcedureDoneResponseOrBuilder extends com.google.protobuf.MessageOrBuilder { - // optional int64 expected_timeout = 1; + // optional bool done = 1 [default = false]; /** - * optional int64 expected_timeout = 1; + * optional bool done = 1 [default = false]; */ - boolean hasExpectedTimeout(); + boolean hasDone(); /** - * optional int64 expected_timeout = 1; + * optional bool done = 1 [default = false]; */ - long getExpectedTimeout(); + boolean getDone(); - // optional bytes return_data = 2; + // optional .hbase.pb.ProcedureDescription snapshot = 2; /** - * optional bytes return_data = 2; + * optional .hbase.pb.ProcedureDescription snapshot = 2; */ - boolean hasReturnData(); + boolean hasSnapshot(); /** - * optional bytes return_data = 2; + * optional .hbase.pb.ProcedureDescription snapshot = 2; */ - com.google.protobuf.ByteString getReturnData(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription getSnapshot(); + /** + * optional .hbase.pb.ProcedureDescription snapshot = 2; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescriptionOrBuilder getSnapshotOrBuilder(); } /** - * Protobuf type {@code hbase.pb.ExecProcedureResponse} + * Protobuf type {@code hbase.pb.IsProcedureDoneResponse} */ - public static final class ExecProcedureResponse extends + public static final class IsProcedureDoneResponse extends com.google.protobuf.GeneratedMessage - implements ExecProcedureResponseOrBuilder { - // Use ExecProcedureResponse.newBuilder() to construct. - private ExecProcedureResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + implements IsProcedureDoneResponseOrBuilder { + // Use IsProcedureDoneResponse.newBuilder() to construct. + private IsProcedureDoneResponse(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } - private ExecProcedureResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + private IsProcedureDoneResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - private static final ExecProcedureResponse defaultInstance; - public static ExecProcedureResponse getDefaultInstance() { + private static final IsProcedureDoneResponse defaultInstance; + public static IsProcedureDoneResponse getDefaultInstance() { return defaultInstance; } - public ExecProcedureResponse getDefaultInstanceForType() { + public IsProcedureDoneResponse getDefaultInstanceForType() { return defaultInstance; } @@ -53137,7 +53103,7 @@ public ExecProcedureResponse getDefaultInstanceForType() { getUnknownFields() { return this.unknownFields; } - private ExecProcedureResponse( + private IsProcedureDoneResponse( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { @@ -53162,12 +53128,20 @@ private ExecProcedureResponse( } case 8: { bitField0_ |= 0x00000001; - expectedTimeout_ = input.readInt64(); + done_ = input.readBool(); break; } case 18: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.Builder subBuilder = null; + if (((bitField0_ & 0x00000002) == 0x00000002)) { + subBuilder = snapshot_.toBuilder(); + } + snapshot_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(snapshot_); + snapshot_ = subBuilder.buildPartial(); + } bitField0_ |= 0x00000002; - returnData_ = input.readBytes(); break; } } @@ -53184,73 +53158,85 @@ private ExecProcedureResponse( } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ExecProcedureResponse_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsProcedureDoneResponse_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ExecProcedureResponse_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsProcedureDoneResponse_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse.Builder.class); } - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public ExecProcedureResponse parsePartialFrom( + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public IsProcedureDoneResponse parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - return new ExecProcedureResponse(input, extensionRegistry); + return new IsProcedureDoneResponse(input, extensionRegistry); } }; @java.lang.Override - public com.google.protobuf.Parser getParserForType() { + public com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; - // optional int64 expected_timeout = 1; - public static final int EXPECTED_TIMEOUT_FIELD_NUMBER = 1; - private long expectedTimeout_; + // optional bool done = 1 [default = false]; + public static final int DONE_FIELD_NUMBER = 1; + private boolean done_; /** - * optional int64 expected_timeout = 1; + * optional bool done = 1 [default = false]; */ - public boolean hasExpectedTimeout() { + public boolean hasDone() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** - * optional int64 expected_timeout = 1; + * optional bool done = 1 [default = false]; */ - public long getExpectedTimeout() { - return expectedTimeout_; + public boolean getDone() { + return done_; } - // optional bytes return_data = 2; - public static final int RETURN_DATA_FIELD_NUMBER = 2; - private com.google.protobuf.ByteString returnData_; + // optional .hbase.pb.ProcedureDescription snapshot = 2; + public static final int SNAPSHOT_FIELD_NUMBER = 2; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription snapshot_; /** - * optional bytes return_data = 2; + * optional .hbase.pb.ProcedureDescription snapshot = 2; */ - public boolean hasReturnData() { + public boolean hasSnapshot() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** - * optional bytes return_data = 2; + * optional .hbase.pb.ProcedureDescription snapshot = 2; */ - public com.google.protobuf.ByteString getReturnData() { - return returnData_; + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription getSnapshot() { + return snapshot_; + } + /** + * optional .hbase.pb.ProcedureDescription snapshot = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescriptionOrBuilder getSnapshotOrBuilder() { + return snapshot_; } private void initFields() { - expectedTimeout_ = 0L; - returnData_ = com.google.protobuf.ByteString.EMPTY; + done_ = false; + snapshot_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.getDefaultInstance(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; + if (hasSnapshot()) { + if (!getSnapshot().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } memoizedIsInitialized = 1; return true; } @@ -53259,10 +53245,10 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeInt64(1, expectedTimeout_); + output.writeBool(1, done_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeBytes(2, returnData_); + output.writeMessage(2, snapshot_); } getUnknownFields().writeTo(output); } @@ -53275,11 +53261,11 @@ public int getSerializedSize() { size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream - .computeInt64Size(1, expectedTimeout_); + .computeBoolSize(1, done_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream - .computeBytesSize(2, returnData_); + .computeMessageSize(2, snapshot_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; @@ -53298,21 +53284,21 @@ public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse)) { + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse)) { return super.equals(obj); } - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse) obj; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse) obj; boolean result = true; - result = result && (hasExpectedTimeout() == other.hasExpectedTimeout()); - if (hasExpectedTimeout()) { - result = result && (getExpectedTimeout() - == other.getExpectedTimeout()); + result = result && (hasDone() == other.hasDone()); + if (hasDone()) { + result = result && (getDone() + == other.getDone()); } - result = result && (hasReturnData() == other.hasReturnData()); - if (hasReturnData()) { - result = result && getReturnData() - .equals(other.getReturnData()); + result = result && (hasSnapshot() == other.hasSnapshot()); + if (hasSnapshot()) { + result = result && getSnapshot() + .equals(other.getSnapshot()); } result = result && getUnknownFields().equals(other.getUnknownFields()); @@ -53327,66 +53313,66 @@ public int hashCode() { } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasExpectedTimeout()) { - hash = (37 * hash) + EXPECTED_TIMEOUT_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getExpectedTimeout()); + if (hasDone()) { + hash = (37 * hash) + DONE_FIELD_NUMBER; + hash = (53 * hash) + hashBoolean(getDone()); } - if (hasReturnData()) { - hash = (37 * hash) + RETURN_DATA_FIELD_NUMBER; - hash = (53 * hash) + getReturnData().hashCode(); + if (hasSnapshot()) { + hash = (37 * hash) + SNAPSHOT_FIELD_NUMBER; + hash = (53 * hash) + getSnapshot().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse parseFrom(byte[] data) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse parseFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse parseDelimitedFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse parseDelimitedFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -53395,7 +53381,7 @@ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProced public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse prototype) { + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @@ -53407,24 +53393,24 @@ protected Builder newBuilderForType( return builder; } /** - * Protobuf type {@code hbase.pb.ExecProcedureResponse} + * Protobuf type {@code hbase.pb.IsProcedureDoneResponse} */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponseOrBuilder { + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponseOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ExecProcedureResponse_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsProcedureDoneResponse_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ExecProcedureResponse_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsProcedureDoneResponse_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse.Builder.class); } - // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse.newBuilder() + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -53436,6 +53422,7 @@ private Builder( } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getSnapshotFieldBuilder(); } } private static Builder create() { @@ -53444,9 +53431,13 @@ private static Builder create() { public Builder clear() { super.clear(); - expectedTimeout_ = 0L; + done_ = false; bitField0_ = (bitField0_ & ~0x00000001); - returnData_ = com.google.protobuf.ByteString.EMPTY; + if (snapshotBuilder_ == null) { + snapshot_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.getDefaultInstance(); + } else { + snapshotBuilder_.clear(); + } bitField0_ = (bitField0_ & ~0x00000002); return this; } @@ -53457,60 +53448,70 @@ public Builder clone() { public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ExecProcedureResponse_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsProcedureDoneResponse_descriptor; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse.getDefaultInstance(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse.getDefaultInstance(); } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse build() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse result = buildPartial(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse(this); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } - result.expectedTimeout_ = expectedTimeout_; + result.done_ = done_; if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } - result.returnData_ = returnData_; + if (snapshotBuilder_ == null) { + result.snapshot_ = snapshot_; + } else { + result.snapshot_ = snapshotBuilder_.build(); + } result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse)other); + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse)other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse.getDefaultInstance()) return this; - if (other.hasExpectedTimeout()) { - setExpectedTimeout(other.getExpectedTimeout()); + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse.getDefaultInstance()) return this; + if (other.hasDone()) { + setDone(other.getDone()); } - if (other.hasReturnData()) { - setReturnData(other.getReturnData()); + if (other.hasSnapshot()) { + mergeSnapshot(other.getSnapshot()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { + if (hasSnapshot()) { + if (!getSnapshot().isInitialized()) { + + return false; + } + } return true; } @@ -53518,11 +53519,11 @@ public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse parsedMessage = null; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse) e.getUnfinishedMessage(); + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { @@ -53533,122 +53534,199 @@ public Builder mergeFrom( } private int bitField0_; - // optional int64 expected_timeout = 1; - private long expectedTimeout_ ; + // optional bool done = 1 [default = false]; + private boolean done_ ; /** - * optional int64 expected_timeout = 1; + * optional bool done = 1 [default = false]; */ - public boolean hasExpectedTimeout() { + public boolean hasDone() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** - * optional int64 expected_timeout = 1; + * optional bool done = 1 [default = false]; */ - public long getExpectedTimeout() { - return expectedTimeout_; + public boolean getDone() { + return done_; } /** - * optional int64 expected_timeout = 1; + * optional bool done = 1 [default = false]; */ - public Builder setExpectedTimeout(long value) { + public Builder setDone(boolean value) { bitField0_ |= 0x00000001; - expectedTimeout_ = value; + done_ = value; onChanged(); return this; } /** - * optional int64 expected_timeout = 1; + * optional bool done = 1 [default = false]; */ - public Builder clearExpectedTimeout() { + public Builder clearDone() { bitField0_ = (bitField0_ & ~0x00000001); - expectedTimeout_ = 0L; + done_ = false; onChanged(); return this; } - // optional bytes return_data = 2; - private com.google.protobuf.ByteString returnData_ = com.google.protobuf.ByteString.EMPTY; + // optional .hbase.pb.ProcedureDescription snapshot = 2; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription snapshot_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescriptionOrBuilder> snapshotBuilder_; /** - * optional bytes return_data = 2; + * optional .hbase.pb.ProcedureDescription snapshot = 2; */ - public boolean hasReturnData() { + public boolean hasSnapshot() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** - * optional bytes return_data = 2; + * optional .hbase.pb.ProcedureDescription snapshot = 2; */ - public com.google.protobuf.ByteString getReturnData() { - return returnData_; + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription getSnapshot() { + if (snapshotBuilder_ == null) { + return snapshot_; + } else { + return snapshotBuilder_.getMessage(); + } } /** - * optional bytes return_data = 2; + * optional .hbase.pb.ProcedureDescription snapshot = 2; */ - public Builder setReturnData(com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000002; - returnData_ = value; - onChanged(); + public Builder setSnapshot(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription value) { + if (snapshotBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + snapshot_ = value; + onChanged(); + } else { + snapshotBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; return this; } /** - * optional bytes return_data = 2; + * optional .hbase.pb.ProcedureDescription snapshot = 2; */ - public Builder clearReturnData() { + public Builder setSnapshot( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.Builder builderForValue) { + if (snapshotBuilder_ == null) { + snapshot_ = builderForValue.build(); + onChanged(); + } else { + snapshotBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * optional .hbase.pb.ProcedureDescription snapshot = 2; + */ + public Builder mergeSnapshot(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription value) { + if (snapshotBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002) && + snapshot_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.getDefaultInstance()) { + snapshot_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.newBuilder(snapshot_).mergeFrom(value).buildPartial(); + } else { + snapshot_ = value; + } + onChanged(); + } else { + snapshotBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * optional .hbase.pb.ProcedureDescription snapshot = 2; + */ + public Builder clearSnapshot() { + if (snapshotBuilder_ == null) { + snapshot_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.getDefaultInstance(); + onChanged(); + } else { + snapshotBuilder_.clear(); + } bitField0_ = (bitField0_ & ~0x00000002); - returnData_ = getDefaultInstance().getReturnData(); - onChanged(); return this; } - - // @@protoc_insertion_point(builder_scope:hbase.pb.ExecProcedureResponse) - } - - static { - defaultInstance = new ExecProcedureResponse(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:hbase.pb.ExecProcedureResponse) - } - - public interface IsProcedureDoneRequestOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // optional .hbase.pb.ProcedureDescription procedure = 1; - /** - * optional .hbase.pb.ProcedureDescription procedure = 1; - */ - boolean hasProcedure(); + /** + * optional .hbase.pb.ProcedureDescription snapshot = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.Builder getSnapshotBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return getSnapshotFieldBuilder().getBuilder(); + } + /** + * optional .hbase.pb.ProcedureDescription snapshot = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescriptionOrBuilder getSnapshotOrBuilder() { + if (snapshotBuilder_ != null) { + return snapshotBuilder_.getMessageOrBuilder(); + } else { + return snapshot_; + } + } + /** + * optional .hbase.pb.ProcedureDescription snapshot = 2; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescriptionOrBuilder> + getSnapshotFieldBuilder() { + if (snapshotBuilder_ == null) { + snapshotBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescriptionOrBuilder>( + snapshot_, + getParentForChildren(), + isClean()); + snapshot_ = null; + } + return snapshotBuilder_; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.IsProcedureDoneResponse) + } + + static { + defaultInstance = new IsProcedureDoneResponse(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.IsProcedureDoneResponse) + } + + public interface GetProcedureResultRequestOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required uint64 proc_id = 1; /** - * optional .hbase.pb.ProcedureDescription procedure = 1; + * required uint64 proc_id = 1; */ - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription getProcedure(); + boolean hasProcId(); /** - * optional .hbase.pb.ProcedureDescription procedure = 1; + * required uint64 proc_id = 1; */ - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescriptionOrBuilder getProcedureOrBuilder(); + long getProcId(); } /** - * Protobuf type {@code hbase.pb.IsProcedureDoneRequest} + * Protobuf type {@code hbase.pb.GetProcedureResultRequest} */ - public static final class IsProcedureDoneRequest extends + public static final class GetProcedureResultRequest extends com.google.protobuf.GeneratedMessage - implements IsProcedureDoneRequestOrBuilder { - // Use IsProcedureDoneRequest.newBuilder() to construct. - private IsProcedureDoneRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + implements GetProcedureResultRequestOrBuilder { + // Use GetProcedureResultRequest.newBuilder() to construct. + private GetProcedureResultRequest(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } - private IsProcedureDoneRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + private GetProcedureResultRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - private static final IsProcedureDoneRequest defaultInstance; - public static IsProcedureDoneRequest getDefaultInstance() { + private static final GetProcedureResultRequest defaultInstance; + public static GetProcedureResultRequest getDefaultInstance() { return defaultInstance; } - public IsProcedureDoneRequest getDefaultInstanceForType() { + public GetProcedureResultRequest getDefaultInstanceForType() { return defaultInstance; } @@ -53658,7 +53736,7 @@ public IsProcedureDoneRequest getDefaultInstanceForType() { getUnknownFields() { return this.unknownFields; } - private IsProcedureDoneRequest( + private GetProcedureResultRequest( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { @@ -53681,17 +53759,9 @@ private IsProcedureDoneRequest( } break; } - case 10: { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.Builder subBuilder = null; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - subBuilder = procedure_.toBuilder(); - } - procedure_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.PARSER, extensionRegistry); - if (subBuilder != null) { - subBuilder.mergeFrom(procedure_); - procedure_ = subBuilder.buildPartial(); - } + case 8: { bitField0_ |= 0x00000001; + procId_ = input.readUInt64(); break; } } @@ -53708,67 +53778,59 @@ private IsProcedureDoneRequest( } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsProcedureDoneRequest_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetProcedureResultRequest_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsProcedureDoneRequest_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetProcedureResultRequest_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest.Builder.class); } - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public IsProcedureDoneRequest parsePartialFrom( + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public GetProcedureResultRequest parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - return new IsProcedureDoneRequest(input, extensionRegistry); + return new GetProcedureResultRequest(input, extensionRegistry); } }; @java.lang.Override - public com.google.protobuf.Parser getParserForType() { + public com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; - // optional .hbase.pb.ProcedureDescription procedure = 1; - public static final int PROCEDURE_FIELD_NUMBER = 1; - private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription procedure_; + // required uint64 proc_id = 1; + public static final int PROC_ID_FIELD_NUMBER = 1; + private long procId_; /** - * optional .hbase.pb.ProcedureDescription procedure = 1; + * required uint64 proc_id = 1; */ - public boolean hasProcedure() { + public boolean hasProcId() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** - * optional .hbase.pb.ProcedureDescription procedure = 1; - */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription getProcedure() { - return procedure_; - } - /** - * optional .hbase.pb.ProcedureDescription procedure = 1; + * required uint64 proc_id = 1; */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescriptionOrBuilder getProcedureOrBuilder() { - return procedure_; + public long getProcId() { + return procId_; } private void initFields() { - procedure_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.getDefaultInstance(); + procId_ = 0L; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; - if (hasProcedure()) { - if (!getProcedure().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } + if (!hasProcId()) { + memoizedIsInitialized = 0; + return false; } memoizedIsInitialized = 1; return true; @@ -53778,7 +53840,7 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeMessage(1, procedure_); + output.writeUInt64(1, procId_); } getUnknownFields().writeTo(output); } @@ -53791,7 +53853,7 @@ public int getSerializedSize() { size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, procedure_); + .computeUInt64Size(1, procId_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; @@ -53810,16 +53872,16 @@ public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest)) { + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest)) { return super.equals(obj); } - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest) obj; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest) obj; boolean result = true; - result = result && (hasProcedure() == other.hasProcedure()); - if (hasProcedure()) { - result = result && getProcedure() - .equals(other.getProcedure()); + result = result && (hasProcId() == other.hasProcId()); + if (hasProcId()) { + result = result && (getProcId() + == other.getProcId()); } result = result && getUnknownFields().equals(other.getUnknownFields()); @@ -53834,62 +53896,62 @@ public int hashCode() { } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasProcedure()) { - hash = (37 * hash) + PROCEDURE_FIELD_NUMBER; - hash = (53 * hash) + getProcedure().hashCode(); + if (hasProcId()) { + hash = (37 * hash) + PROC_ID_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getProcId()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest parseFrom(byte[] data) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest parseFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest parseDelimitedFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest parseDelimitedFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -53898,7 +53960,7 @@ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedur public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest prototype) { + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @@ -53910,24 +53972,24 @@ protected Builder newBuilderForType( return builder; } /** - * Protobuf type {@code hbase.pb.IsProcedureDoneRequest} + * Protobuf type {@code hbase.pb.GetProcedureResultRequest} */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequestOrBuilder { + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequestOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsProcedureDoneRequest_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetProcedureResultRequest_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsProcedureDoneRequest_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetProcedureResultRequest_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest.Builder.class); } - // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest.newBuilder() + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -53939,7 +54001,6 @@ private Builder( } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getProcedureFieldBuilder(); } } private static Builder create() { @@ -53948,11 +54009,7 @@ private static Builder create() { public Builder clear() { super.clear(); - if (procedureBuilder_ == null) { - procedure_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.getDefaultInstance(); - } else { - procedureBuilder_.clear(); - } + procId_ = 0L; bitField0_ = (bitField0_ & ~0x00000001); return this; } @@ -53963,62 +54020,56 @@ public Builder clone() { public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsProcedureDoneRequest_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetProcedureResultRequest_descriptor; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest.getDefaultInstance(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest.getDefaultInstance(); } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest build() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest result = buildPartial(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest(this); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } - if (procedureBuilder_ == null) { - result.procedure_ = procedure_; - } else { - result.procedure_ = procedureBuilder_.build(); - } + result.procId_ = procId_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest)other); + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest)other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest.getDefaultInstance()) return this; - if (other.hasProcedure()) { - mergeProcedure(other.getProcedure()); + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest.getDefaultInstance()) return this; + if (other.hasProcId()) { + setProcId(other.getProcId()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { - if (hasProcedure()) { - if (!getProcedure().isInitialized()) { - - return false; - } + if (!hasProcId()) { + + return false; } return true; } @@ -54027,11 +54078,11 @@ public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest parsedMessage = null; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest) e.getUnfinishedMessage(); + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { @@ -54042,180 +54093,126 @@ public Builder mergeFrom( } private int bitField0_; - // optional .hbase.pb.ProcedureDescription procedure = 1; - private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription procedure_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescriptionOrBuilder> procedureBuilder_; + // required uint64 proc_id = 1; + private long procId_ ; /** - * optional .hbase.pb.ProcedureDescription procedure = 1; + * required uint64 proc_id = 1; */ - public boolean hasProcedure() { + public boolean hasProcId() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** - * optional .hbase.pb.ProcedureDescription procedure = 1; - */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription getProcedure() { - if (procedureBuilder_ == null) { - return procedure_; - } else { - return procedureBuilder_.getMessage(); - } - } - /** - * optional .hbase.pb.ProcedureDescription procedure = 1; - */ - public Builder setProcedure(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription value) { - if (procedureBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - procedure_ = value; - onChanged(); - } else { - procedureBuilder_.setMessage(value); - } - bitField0_ |= 0x00000001; - return this; - } - /** - * optional .hbase.pb.ProcedureDescription procedure = 1; + * required uint64 proc_id = 1; */ - public Builder setProcedure( - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.Builder builderForValue) { - if (procedureBuilder_ == null) { - procedure_ = builderForValue.build(); - onChanged(); - } else { - procedureBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000001; - return this; + public long getProcId() { + return procId_; } /** - * optional .hbase.pb.ProcedureDescription procedure = 1; + * required uint64 proc_id = 1; */ - public Builder mergeProcedure(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription value) { - if (procedureBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001) && - procedure_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.getDefaultInstance()) { - procedure_ = - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.newBuilder(procedure_).mergeFrom(value).buildPartial(); - } else { - procedure_ = value; - } - onChanged(); - } else { - procedureBuilder_.mergeFrom(value); - } + public Builder setProcId(long value) { bitField0_ |= 0x00000001; + procId_ = value; + onChanged(); return this; } /** - * optional .hbase.pb.ProcedureDescription procedure = 1; + * required uint64 proc_id = 1; */ - public Builder clearProcedure() { - if (procedureBuilder_ == null) { - procedure_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.getDefaultInstance(); - onChanged(); - } else { - procedureBuilder_.clear(); - } + public Builder clearProcId() { bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - /** - * optional .hbase.pb.ProcedureDescription procedure = 1; - */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.Builder getProcedureBuilder() { - bitField0_ |= 0x00000001; + procId_ = 0L; onChanged(); - return getProcedureFieldBuilder().getBuilder(); - } - /** - * optional .hbase.pb.ProcedureDescription procedure = 1; - */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescriptionOrBuilder getProcedureOrBuilder() { - if (procedureBuilder_ != null) { - return procedureBuilder_.getMessageOrBuilder(); - } else { - return procedure_; - } - } - /** - * optional .hbase.pb.ProcedureDescription procedure = 1; - */ - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescriptionOrBuilder> - getProcedureFieldBuilder() { - if (procedureBuilder_ == null) { - procedureBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescriptionOrBuilder>( - procedure_, - getParentForChildren(), - isClean()); - procedure_ = null; - } - return procedureBuilder_; + return this; } - // @@protoc_insertion_point(builder_scope:hbase.pb.IsProcedureDoneRequest) + // @@protoc_insertion_point(builder_scope:hbase.pb.GetProcedureResultRequest) } static { - defaultInstance = new IsProcedureDoneRequest(true); + defaultInstance = new GetProcedureResultRequest(true); defaultInstance.initFields(); } - // @@protoc_insertion_point(class_scope:hbase.pb.IsProcedureDoneRequest) + // @@protoc_insertion_point(class_scope:hbase.pb.GetProcedureResultRequest) } - public interface IsProcedureDoneResponseOrBuilder + public interface GetProcedureResultResponseOrBuilder extends com.google.protobuf.MessageOrBuilder { - // optional bool done = 1 [default = false]; + // required .hbase.pb.GetProcedureResultResponse.State state = 1; /** - * optional bool done = 1 [default = false]; + * required .hbase.pb.GetProcedureResultResponse.State state = 1; */ - boolean hasDone(); + boolean hasState(); /** - * optional bool done = 1 [default = false]; + * required .hbase.pb.GetProcedureResultResponse.State state = 1; */ - boolean getDone(); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.State getState(); - // optional .hbase.pb.ProcedureDescription snapshot = 2; + // optional uint64 start_time = 2; /** - * optional .hbase.pb.ProcedureDescription snapshot = 2; + * optional uint64 start_time = 2; */ - boolean hasSnapshot(); + boolean hasStartTime(); /** - * optional .hbase.pb.ProcedureDescription snapshot = 2; + * optional uint64 start_time = 2; */ - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription getSnapshot(); + long getStartTime(); + + // optional uint64 last_update = 3; /** - * optional .hbase.pb.ProcedureDescription snapshot = 2; + * optional uint64 last_update = 3; */ - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescriptionOrBuilder getSnapshotOrBuilder(); + boolean hasLastUpdate(); + /** + * optional uint64 last_update = 3; + */ + long getLastUpdate(); + + // optional bytes result = 4; + /** + * optional bytes result = 4; + */ + boolean hasResult(); + /** + * optional bytes result = 4; + */ + com.google.protobuf.ByteString getResult(); + + // optional .hbase.pb.ForeignExceptionMessage exception = 5; + /** + * optional .hbase.pb.ForeignExceptionMessage exception = 5; + */ + boolean hasException(); + /** + * optional .hbase.pb.ForeignExceptionMessage exception = 5; + */ + org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage getException(); + /** + * optional .hbase.pb.ForeignExceptionMessage exception = 5; + */ + org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessageOrBuilder getExceptionOrBuilder(); } /** - * Protobuf type {@code hbase.pb.IsProcedureDoneResponse} + * Protobuf type {@code hbase.pb.GetProcedureResultResponse} */ - public static final class IsProcedureDoneResponse extends + public static final class GetProcedureResultResponse extends com.google.protobuf.GeneratedMessage - implements IsProcedureDoneResponseOrBuilder { - // Use IsProcedureDoneResponse.newBuilder() to construct. - private IsProcedureDoneResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + implements GetProcedureResultResponseOrBuilder { + // Use GetProcedureResultResponse.newBuilder() to construct. + private GetProcedureResultResponse(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } - private IsProcedureDoneResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + private GetProcedureResultResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - private static final IsProcedureDoneResponse defaultInstance; - public static IsProcedureDoneResponse getDefaultInstance() { + private static final GetProcedureResultResponse defaultInstance; + public static GetProcedureResultResponse getDefaultInstance() { return defaultInstance; } - public IsProcedureDoneResponse getDefaultInstanceForType() { + public GetProcedureResultResponse getDefaultInstanceForType() { return defaultInstance; } @@ -54225,7 +54222,7 @@ public IsProcedureDoneResponse getDefaultInstanceForType() { getUnknownFields() { return this.unknownFields; } - private IsProcedureDoneResponse( + private GetProcedureResultResponse( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { @@ -54249,21 +54246,42 @@ private IsProcedureDoneResponse( break; } case 8: { - bitField0_ |= 0x00000001; - done_ = input.readBool(); + int rawValue = input.readEnum(); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.State value = org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.State.valueOf(rawValue); + if (value == null) { + unknownFields.mergeVarintField(1, rawValue); + } else { + bitField0_ |= 0x00000001; + state_ = value; + } break; } - case 18: { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.Builder subBuilder = null; - if (((bitField0_ & 0x00000002) == 0x00000002)) { - subBuilder = snapshot_.toBuilder(); + case 16: { + bitField0_ |= 0x00000002; + startTime_ = input.readUInt64(); + break; + } + case 24: { + bitField0_ |= 0x00000004; + lastUpdate_ = input.readUInt64(); + break; + } + case 34: { + bitField0_ |= 0x00000008; + result_ = input.readBytes(); + break; + } + case 42: { + org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.Builder subBuilder = null; + if (((bitField0_ & 0x00000010) == 0x00000010)) { + subBuilder = exception_.toBuilder(); } - snapshot_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.PARSER, extensionRegistry); + exception_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.PARSER, extensionRegistry); if (subBuilder != null) { - subBuilder.mergeFrom(snapshot_); - snapshot_ = subBuilder.buildPartial(); + subBuilder.mergeFrom(exception_); + exception_ = subBuilder.buildPartial(); } - bitField0_ |= 0x00000002; + bitField0_ |= 0x00000010; break; } } @@ -54280,84 +54298,224 @@ private IsProcedureDoneResponse( } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsProcedureDoneResponse_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetProcedureResultResponse_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsProcedureDoneResponse_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetProcedureResultResponse_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.Builder.class); } - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public IsProcedureDoneResponse parsePartialFrom( + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public GetProcedureResultResponse parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - return new IsProcedureDoneResponse(input, extensionRegistry); + return new GetProcedureResultResponse(input, extensionRegistry); } }; @java.lang.Override - public com.google.protobuf.Parser getParserForType() { + public com.google.protobuf.Parser getParserForType() { return PARSER; } - private int bitField0_; - // optional bool done = 1 [default = false]; - public static final int DONE_FIELD_NUMBER = 1; - private boolean done_; /** - * optional bool done = 1 [default = false]; + * Protobuf enum {@code hbase.pb.GetProcedureResultResponse.State} */ - public boolean hasDone() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * optional bool done = 1 [default = false]; + public enum State + implements com.google.protobuf.ProtocolMessageEnum { + /** + * NOT_FOUND = 0; + */ + NOT_FOUND(0, 0), + /** + * RUNNING = 1; + */ + RUNNING(1, 1), + /** + * FINISHED = 2; + */ + FINISHED(2, 2), + ; + + /** + * NOT_FOUND = 0; + */ + public static final int NOT_FOUND_VALUE = 0; + /** + * RUNNING = 1; + */ + public static final int RUNNING_VALUE = 1; + /** + * FINISHED = 2; + */ + public static final int FINISHED_VALUE = 2; + + + public final int getNumber() { return value; } + + public static State valueOf(int value) { + switch (value) { + case 0: return NOT_FOUND; + case 1: return RUNNING; + case 2: return FINISHED; + default: return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + private static com.google.protobuf.Internal.EnumLiteMap + internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public State findValueByNumber(int number) { + return State.valueOf(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor + getValueDescriptor() { + return getDescriptor().getValues().get(index); + } + public final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptorForType() { + return getDescriptor(); + } + public static final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.getDescriptor().getEnumTypes().get(0); + } + + private static final State[] VALUES = values(); + + public static State valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "EnumValueDescriptor is not for this type."); + } + return VALUES[desc.getIndex()]; + } + + private final int index; + private final int value; + + private State(int index, int value) { + this.index = index; + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:hbase.pb.GetProcedureResultResponse.State) + } + + private int bitField0_; + // required .hbase.pb.GetProcedureResultResponse.State state = 1; + public static final int STATE_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.State state_; + /** + * required .hbase.pb.GetProcedureResultResponse.State state = 1; */ - public boolean getDone() { - return done_; + public boolean hasState() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .hbase.pb.GetProcedureResultResponse.State state = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.State getState() { + return state_; } - // optional .hbase.pb.ProcedureDescription snapshot = 2; - public static final int SNAPSHOT_FIELD_NUMBER = 2; - private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription snapshot_; + // optional uint64 start_time = 2; + public static final int START_TIME_FIELD_NUMBER = 2; + private long startTime_; /** - * optional .hbase.pb.ProcedureDescription snapshot = 2; + * optional uint64 start_time = 2; */ - public boolean hasSnapshot() { + public boolean hasStartTime() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** - * optional .hbase.pb.ProcedureDescription snapshot = 2; + * optional uint64 start_time = 2; */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription getSnapshot() { - return snapshot_; + public long getStartTime() { + return startTime_; } + + // optional uint64 last_update = 3; + public static final int LAST_UPDATE_FIELD_NUMBER = 3; + private long lastUpdate_; /** - * optional .hbase.pb.ProcedureDescription snapshot = 2; + * optional uint64 last_update = 3; */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescriptionOrBuilder getSnapshotOrBuilder() { - return snapshot_; + public boolean hasLastUpdate() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional uint64 last_update = 3; + */ + public long getLastUpdate() { + return lastUpdate_; + } + + // optional bytes result = 4; + public static final int RESULT_FIELD_NUMBER = 4; + private com.google.protobuf.ByteString result_; + /** + * optional bytes result = 4; + */ + public boolean hasResult() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * optional bytes result = 4; + */ + public com.google.protobuf.ByteString getResult() { + return result_; + } + + // optional .hbase.pb.ForeignExceptionMessage exception = 5; + public static final int EXCEPTION_FIELD_NUMBER = 5; + private org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage exception_; + /** + * optional .hbase.pb.ForeignExceptionMessage exception = 5; + */ + public boolean hasException() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + /** + * optional .hbase.pb.ForeignExceptionMessage exception = 5; + */ + public org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage getException() { + return exception_; + } + /** + * optional .hbase.pb.ForeignExceptionMessage exception = 5; + */ + public org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessageOrBuilder getExceptionOrBuilder() { + return exception_; } private void initFields() { - done_ = false; - snapshot_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.getDefaultInstance(); + state_ = org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.State.NOT_FOUND; + startTime_ = 0L; + lastUpdate_ = 0L; + result_ = com.google.protobuf.ByteString.EMPTY; + exception_ = org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.getDefaultInstance(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; - if (hasSnapshot()) { - if (!getSnapshot().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } + if (!hasState()) { + memoizedIsInitialized = 0; + return false; } memoizedIsInitialized = 1; return true; @@ -54367,10 +54525,19 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBool(1, done_); + output.writeEnum(1, state_.getNumber()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeMessage(2, snapshot_); + output.writeUInt64(2, startTime_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeUInt64(3, lastUpdate_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + output.writeBytes(4, result_); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + output.writeMessage(5, exception_); } getUnknownFields().writeTo(output); } @@ -54383,11 +54550,23 @@ public int getSerializedSize() { size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream - .computeBoolSize(1, done_); + .computeEnumSize(1, state_.getNumber()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream - .computeMessageSize(2, snapshot_); + .computeUInt64Size(2, startTime_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(3, lastUpdate_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(4, result_); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(5, exception_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; @@ -54406,21 +54585,36 @@ public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse)) { + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse)) { return super.equals(obj); } - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse) obj; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse) obj; boolean result = true; - result = result && (hasDone() == other.hasDone()); - if (hasDone()) { - result = result && (getDone() - == other.getDone()); + result = result && (hasState() == other.hasState()); + if (hasState()) { + result = result && + (getState() == other.getState()); } - result = result && (hasSnapshot() == other.hasSnapshot()); - if (hasSnapshot()) { - result = result && getSnapshot() - .equals(other.getSnapshot()); + result = result && (hasStartTime() == other.hasStartTime()); + if (hasStartTime()) { + result = result && (getStartTime() + == other.getStartTime()); + } + result = result && (hasLastUpdate() == other.hasLastUpdate()); + if (hasLastUpdate()) { + result = result && (getLastUpdate() + == other.getLastUpdate()); + } + result = result && (hasResult() == other.hasResult()); + if (hasResult()) { + result = result && getResult() + .equals(other.getResult()); + } + result = result && (hasException() == other.hasException()); + if (hasException()) { + result = result && getException() + .equals(other.getException()); } result = result && getUnknownFields().equals(other.getUnknownFields()); @@ -54435,66 +54629,78 @@ public int hashCode() { } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasDone()) { - hash = (37 * hash) + DONE_FIELD_NUMBER; - hash = (53 * hash) + hashBoolean(getDone()); + if (hasState()) { + hash = (37 * hash) + STATE_FIELD_NUMBER; + hash = (53 * hash) + hashEnum(getState()); } - if (hasSnapshot()) { - hash = (37 * hash) + SNAPSHOT_FIELD_NUMBER; - hash = (53 * hash) + getSnapshot().hashCode(); + if (hasStartTime()) { + hash = (37 * hash) + START_TIME_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getStartTime()); + } + if (hasLastUpdate()) { + hash = (37 * hash) + LAST_UPDATE_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getLastUpdate()); + } + if (hasResult()) { + hash = (37 * hash) + RESULT_FIELD_NUMBER; + hash = (53 * hash) + getResult().hashCode(); + } + if (hasException()) { + hash = (37 * hash) + EXCEPTION_FIELD_NUMBER; + hash = (53 * hash) + getException().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse parseFrom(byte[] data) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse parseFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse parseDelimitedFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse parseDelimitedFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -54503,7 +54709,7 @@ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedur public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse prototype) { + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @@ -54515,24 +54721,24 @@ protected Builder newBuilderForType( return builder; } /** - * Protobuf type {@code hbase.pb.IsProcedureDoneResponse} + * Protobuf type {@code hbase.pb.GetProcedureResultResponse} */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponseOrBuilder { + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponseOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsProcedureDoneResponse_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetProcedureResultResponse_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsProcedureDoneResponse_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetProcedureResultResponse_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.Builder.class); } - // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse.newBuilder() + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -54544,7 +54750,7 @@ private Builder( } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getSnapshotFieldBuilder(); + getExceptionFieldBuilder(); } } private static Builder create() { @@ -54553,14 +54759,20 @@ private static Builder create() { public Builder clear() { super.clear(); - done_ = false; + state_ = org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.State.NOT_FOUND; bitField0_ = (bitField0_ & ~0x00000001); - if (snapshotBuilder_ == null) { - snapshot_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.getDefaultInstance(); + startTime_ = 0L; + bitField0_ = (bitField0_ & ~0x00000002); + lastUpdate_ = 0L; + bitField0_ = (bitField0_ & ~0x00000004); + result_ = com.google.protobuf.ByteString.EMPTY; + bitField0_ = (bitField0_ & ~0x00000008); + if (exceptionBuilder_ == null) { + exception_ = org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.getDefaultInstance(); } else { - snapshotBuilder_.clear(); + exceptionBuilder_.clear(); } - bitField0_ = (bitField0_ & ~0x00000002); + bitField0_ = (bitField0_ & ~0x00000010); return this; } @@ -54570,36 +54782,48 @@ public Builder clone() { public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsProcedureDoneResponse_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetProcedureResultResponse_descriptor; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse.getDefaultInstance(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.getDefaultInstance(); } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse build() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse result = buildPartial(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse(this); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } - result.done_ = done_; + result.state_ = state_; if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } - if (snapshotBuilder_ == null) { - result.snapshot_ = snapshot_; + result.startTime_ = startTime_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.lastUpdate_ = lastUpdate_; + if (((from_bitField0_ & 0x00000008) == 0x00000008)) { + to_bitField0_ |= 0x00000008; + } + result.result_ = result_; + if (((from_bitField0_ & 0x00000010) == 0x00000010)) { + to_bitField0_ |= 0x00000010; + } + if (exceptionBuilder_ == null) { + result.exception_ = exception_; } else { - result.snapshot_ = snapshotBuilder_.build(); + result.exception_ = exceptionBuilder_.build(); } result.bitField0_ = to_bitField0_; onBuilt(); @@ -54607,32 +54831,39 @@ public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRe } public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse)other); + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse)other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse.getDefaultInstance()) return this; - if (other.hasDone()) { - setDone(other.getDone()); + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.getDefaultInstance()) return this; + if (other.hasState()) { + setState(other.getState()); } - if (other.hasSnapshot()) { - mergeSnapshot(other.getSnapshot()); + if (other.hasStartTime()) { + setStartTime(other.getStartTime()); + } + if (other.hasLastUpdate()) { + setLastUpdate(other.getLastUpdate()); + } + if (other.hasResult()) { + setResult(other.getResult()); + } + if (other.hasException()) { + mergeException(other.getException()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { - if (hasSnapshot()) { - if (!getSnapshot().isInitialized()) { - - return false; - } + if (!hasState()) { + + return false; } return true; } @@ -54641,11 +54872,11 @@ public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse parsedMessage = null; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse) e.getUnfinishedMessage(); + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { @@ -54656,168 +54887,273 @@ public Builder mergeFrom( } private int bitField0_; - // optional bool done = 1 [default = false]; - private boolean done_ ; + // required .hbase.pb.GetProcedureResultResponse.State state = 1; + private org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.State state_ = org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.State.NOT_FOUND; /** - * optional bool done = 1 [default = false]; + * required .hbase.pb.GetProcedureResultResponse.State state = 1; */ - public boolean hasDone() { + public boolean hasState() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** - * optional bool done = 1 [default = false]; + * required .hbase.pb.GetProcedureResultResponse.State state = 1; */ - public boolean getDone() { - return done_; + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.State getState() { + return state_; } /** - * optional bool done = 1 [default = false]; + * required .hbase.pb.GetProcedureResultResponse.State state = 1; */ - public Builder setDone(boolean value) { + public Builder setState(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.State value) { + if (value == null) { + throw new NullPointerException(); + } bitField0_ |= 0x00000001; - done_ = value; + state_ = value; onChanged(); return this; } /** - * optional bool done = 1 [default = false]; + * required .hbase.pb.GetProcedureResultResponse.State state = 1; */ - public Builder clearDone() { + public Builder clearState() { bitField0_ = (bitField0_ & ~0x00000001); - done_ = false; + state_ = org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.State.NOT_FOUND; onChanged(); return this; } - // optional .hbase.pb.ProcedureDescription snapshot = 2; - private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription snapshot_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescriptionOrBuilder> snapshotBuilder_; + // optional uint64 start_time = 2; + private long startTime_ ; /** - * optional .hbase.pb.ProcedureDescription snapshot = 2; + * optional uint64 start_time = 2; */ - public boolean hasSnapshot() { + public boolean hasStartTime() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** - * optional .hbase.pb.ProcedureDescription snapshot = 2; + * optional uint64 start_time = 2; */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription getSnapshot() { - if (snapshotBuilder_ == null) { - return snapshot_; - } else { - return snapshotBuilder_.getMessage(); - } + public long getStartTime() { + return startTime_; } /** - * optional .hbase.pb.ProcedureDescription snapshot = 2; + * optional uint64 start_time = 2; */ - public Builder setSnapshot(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription value) { - if (snapshotBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - snapshot_ = value; - onChanged(); - } else { - snapshotBuilder_.setMessage(value); - } + public Builder setStartTime(long value) { bitField0_ |= 0x00000002; + startTime_ = value; + onChanged(); return this; } /** - * optional .hbase.pb.ProcedureDescription snapshot = 2; + * optional uint64 start_time = 2; */ - public Builder setSnapshot( - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.Builder builderForValue) { - if (snapshotBuilder_ == null) { - snapshot_ = builderForValue.build(); - onChanged(); - } else { - snapshotBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000002; + public Builder clearStartTime() { + bitField0_ = (bitField0_ & ~0x00000002); + startTime_ = 0L; + onChanged(); return this; } + + // optional uint64 last_update = 3; + private long lastUpdate_ ; /** - * optional .hbase.pb.ProcedureDescription snapshot = 2; + * optional uint64 last_update = 3; */ - public Builder mergeSnapshot(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription value) { - if (snapshotBuilder_ == null) { - if (((bitField0_ & 0x00000002) == 0x00000002) && - snapshot_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.getDefaultInstance()) { - snapshot_ = - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.newBuilder(snapshot_).mergeFrom(value).buildPartial(); - } else { - snapshot_ = value; - } - onChanged(); - } else { - snapshotBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000002; - return this; + public boolean hasLastUpdate() { + return ((bitField0_ & 0x00000004) == 0x00000004); } /** - * optional .hbase.pb.ProcedureDescription snapshot = 2; + * optional uint64 last_update = 3; */ - public Builder clearSnapshot() { - if (snapshotBuilder_ == null) { - snapshot_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.getDefaultInstance(); - onChanged(); - } else { - snapshotBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000002); - return this; + public long getLastUpdate() { + return lastUpdate_; } /** - * optional .hbase.pb.ProcedureDescription snapshot = 2; + * optional uint64 last_update = 3; */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.Builder getSnapshotBuilder() { - bitField0_ |= 0x00000002; + public Builder setLastUpdate(long value) { + bitField0_ |= 0x00000004; + lastUpdate_ = value; onChanged(); - return getSnapshotFieldBuilder().getBuilder(); + return this; } /** - * optional .hbase.pb.ProcedureDescription snapshot = 2; + * optional uint64 last_update = 3; */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescriptionOrBuilder getSnapshotOrBuilder() { - if (snapshotBuilder_ != null) { - return snapshotBuilder_.getMessageOrBuilder(); + public Builder clearLastUpdate() { + bitField0_ = (bitField0_ & ~0x00000004); + lastUpdate_ = 0L; + onChanged(); + return this; + } + + // optional bytes result = 4; + private com.google.protobuf.ByteString result_ = com.google.protobuf.ByteString.EMPTY; + /** + * optional bytes result = 4; + */ + public boolean hasResult() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * optional bytes result = 4; + */ + public com.google.protobuf.ByteString getResult() { + return result_; + } + /** + * optional bytes result = 4; + */ + public Builder setResult(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000008; + result_ = value; + onChanged(); + return this; + } + /** + * optional bytes result = 4; + */ + public Builder clearResult() { + bitField0_ = (bitField0_ & ~0x00000008); + result_ = getDefaultInstance().getResult(); + onChanged(); + return this; + } + + // optional .hbase.pb.ForeignExceptionMessage exception = 5; + private org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage exception_ = org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage, org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.Builder, org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessageOrBuilder> exceptionBuilder_; + /** + * optional .hbase.pb.ForeignExceptionMessage exception = 5; + */ + public boolean hasException() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + /** + * optional .hbase.pb.ForeignExceptionMessage exception = 5; + */ + public org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage getException() { + if (exceptionBuilder_ == null) { + return exception_; } else { - return snapshot_; + return exceptionBuilder_.getMessage(); } } /** - * optional .hbase.pb.ProcedureDescription snapshot = 2; + * optional .hbase.pb.ForeignExceptionMessage exception = 5; + */ + public Builder setException(org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage value) { + if (exceptionBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + exception_ = value; + onChanged(); + } else { + exceptionBuilder_.setMessage(value); + } + bitField0_ |= 0x00000010; + return this; + } + /** + * optional .hbase.pb.ForeignExceptionMessage exception = 5; + */ + public Builder setException( + org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.Builder builderForValue) { + if (exceptionBuilder_ == null) { + exception_ = builderForValue.build(); + onChanged(); + } else { + exceptionBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000010; + return this; + } + /** + * optional .hbase.pb.ForeignExceptionMessage exception = 5; + */ + public Builder mergeException(org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage value) { + if (exceptionBuilder_ == null) { + if (((bitField0_ & 0x00000010) == 0x00000010) && + exception_ != org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.getDefaultInstance()) { + exception_ = + org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.newBuilder(exception_).mergeFrom(value).buildPartial(); + } else { + exception_ = value; + } + onChanged(); + } else { + exceptionBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000010; + return this; + } + /** + * optional .hbase.pb.ForeignExceptionMessage exception = 5; + */ + public Builder clearException() { + if (exceptionBuilder_ == null) { + exception_ = org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.getDefaultInstance(); + onChanged(); + } else { + exceptionBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000010); + return this; + } + /** + * optional .hbase.pb.ForeignExceptionMessage exception = 5; + */ + public org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.Builder getExceptionBuilder() { + bitField0_ |= 0x00000010; + onChanged(); + return getExceptionFieldBuilder().getBuilder(); + } + /** + * optional .hbase.pb.ForeignExceptionMessage exception = 5; + */ + public org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessageOrBuilder getExceptionOrBuilder() { + if (exceptionBuilder_ != null) { + return exceptionBuilder_.getMessageOrBuilder(); + } else { + return exception_; + } + } + /** + * optional .hbase.pb.ForeignExceptionMessage exception = 5; */ private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescriptionOrBuilder> - getSnapshotFieldBuilder() { - if (snapshotBuilder_ == null) { - snapshotBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescriptionOrBuilder>( - snapshot_, + org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage, org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.Builder, org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessageOrBuilder> + getExceptionFieldBuilder() { + if (exceptionBuilder_ == null) { + exceptionBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage, org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.Builder, org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessageOrBuilder>( + exception_, getParentForChildren(), isClean()); - snapshot_ = null; + exception_ = null; } - return snapshotBuilder_; + return exceptionBuilder_; } - // @@protoc_insertion_point(builder_scope:hbase.pb.IsProcedureDoneResponse) + // @@protoc_insertion_point(builder_scope:hbase.pb.GetProcedureResultResponse) } static { - defaultInstance = new IsProcedureDoneResponse(true); + defaultInstance = new GetProcedureResultResponse(true); defaultInstance.initFields(); } - // @@protoc_insertion_point(class_scope:hbase.pb.IsProcedureDoneResponse) + // @@protoc_insertion_point(class_scope:hbase.pb.GetProcedureResultResponse) } - public interface GetProcedureResultRequestOrBuilder + public interface AbortProcedureRequestOrBuilder extends com.google.protobuf.MessageOrBuilder { // required uint64 proc_id = 1; @@ -54829,26 +55165,36 @@ public interface GetProcedureResultRequestOrBuilder * required uint64 proc_id = 1; */ long getProcId(); + + // optional bool mayInterruptIfRunning = 2 [default = true]; + /** + * optional bool mayInterruptIfRunning = 2 [default = true]; + */ + boolean hasMayInterruptIfRunning(); + /** + * optional bool mayInterruptIfRunning = 2 [default = true]; + */ + boolean getMayInterruptIfRunning(); } /** - * Protobuf type {@code hbase.pb.GetProcedureResultRequest} + * Protobuf type {@code hbase.pb.AbortProcedureRequest} */ - public static final class GetProcedureResultRequest extends + public static final class AbortProcedureRequest extends com.google.protobuf.GeneratedMessage - implements GetProcedureResultRequestOrBuilder { - // Use GetProcedureResultRequest.newBuilder() to construct. - private GetProcedureResultRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + implements AbortProcedureRequestOrBuilder { + // Use AbortProcedureRequest.newBuilder() to construct. + private AbortProcedureRequest(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } - private GetProcedureResultRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + private AbortProcedureRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - private static final GetProcedureResultRequest defaultInstance; - public static GetProcedureResultRequest getDefaultInstance() { + private static final AbortProcedureRequest defaultInstance; + public static AbortProcedureRequest getDefaultInstance() { return defaultInstance; } - public GetProcedureResultRequest getDefaultInstanceForType() { + public AbortProcedureRequest getDefaultInstanceForType() { return defaultInstance; } @@ -54858,7 +55204,7 @@ public GetProcedureResultRequest getDefaultInstanceForType() { getUnknownFields() { return this.unknownFields; } - private GetProcedureResultRequest( + private AbortProcedureRequest( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { @@ -54886,6 +55232,11 @@ private GetProcedureResultRequest( procId_ = input.readUInt64(); break; } + case 16: { + bitField0_ |= 0x00000002; + mayInterruptIfRunning_ = input.readBool(); + break; + } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { @@ -54900,28 +55251,28 @@ private GetProcedureResultRequest( } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetProcedureResultRequest_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_AbortProcedureRequest_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetProcedureResultRequest_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_AbortProcedureRequest_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest.Builder.class); } - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public GetProcedureResultRequest parsePartialFrom( + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public AbortProcedureRequest parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - return new GetProcedureResultRequest(input, extensionRegistry); + return new AbortProcedureRequest(input, extensionRegistry); } }; @java.lang.Override - public com.google.protobuf.Parser getParserForType() { + public com.google.protobuf.Parser getParserForType() { return PARSER; } @@ -54942,8 +55293,25 @@ public long getProcId() { return procId_; } + // optional bool mayInterruptIfRunning = 2 [default = true]; + public static final int MAYINTERRUPTIFRUNNING_FIELD_NUMBER = 2; + private boolean mayInterruptIfRunning_; + /** + * optional bool mayInterruptIfRunning = 2 [default = true]; + */ + public boolean hasMayInterruptIfRunning() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional bool mayInterruptIfRunning = 2 [default = true]; + */ + public boolean getMayInterruptIfRunning() { + return mayInterruptIfRunning_; + } + private void initFields() { procId_ = 0L; + mayInterruptIfRunning_ = true; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { @@ -54964,6 +55332,9 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeUInt64(1, procId_); } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeBool(2, mayInterruptIfRunning_); + } getUnknownFields().writeTo(output); } @@ -54977,8 +55348,12 @@ public int getSerializedSize() { size += com.google.protobuf.CodedOutputStream .computeUInt64Size(1, procId_); } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(2, mayInterruptIfRunning_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; return size; } @@ -54994,10 +55369,10 @@ public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest)) { + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest)) { return super.equals(obj); } - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest) obj; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest) obj; boolean result = true; result = result && (hasProcId() == other.hasProcId()); @@ -55005,6 +55380,11 @@ public boolean equals(final java.lang.Object obj) { result = result && (getProcId() == other.getProcId()); } + result = result && (hasMayInterruptIfRunning() == other.hasMayInterruptIfRunning()); + if (hasMayInterruptIfRunning()) { + result = result && (getMayInterruptIfRunning() + == other.getMayInterruptIfRunning()); + } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -55022,58 +55402,62 @@ public int hashCode() { hash = (37 * hash) + PROC_ID_FIELD_NUMBER; hash = (53 * hash) + hashLong(getProcId()); } + if (hasMayInterruptIfRunning()) { + hash = (37 * hash) + MAYINTERRUPTIFRUNNING_FIELD_NUMBER; + hash = (53 * hash) + hashBoolean(getMayInterruptIfRunning()); + } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest parseFrom(byte[] data) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest parseFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest parseDelimitedFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest parseDelimitedFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -55082,7 +55466,7 @@ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedu public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest prototype) { + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @@ -55094,24 +55478,24 @@ protected Builder newBuilderForType( return builder; } /** - * Protobuf type {@code hbase.pb.GetProcedureResultRequest} + * Protobuf type {@code hbase.pb.AbortProcedureRequest} */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequestOrBuilder { + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequestOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetProcedureResultRequest_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_AbortProcedureRequest_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetProcedureResultRequest_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_AbortProcedureRequest_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest.Builder.class); } - // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest.newBuilder() + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -55133,6 +55517,8 @@ public Builder clear() { super.clear(); procId_ = 0L; bitField0_ = (bitField0_ & ~0x00000001); + mayInterruptIfRunning_ = true; + bitField0_ = (bitField0_ & ~0x00000002); return this; } @@ -55142,48 +55528,55 @@ public Builder clone() { public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetProcedureResultRequest_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_AbortProcedureRequest_descriptor; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest.getDefaultInstance(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest.getDefaultInstance(); } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest build() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest result = buildPartial(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest(this); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.procId_ = procId_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.mayInterruptIfRunning_ = mayInterruptIfRunning_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest)other); + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest)other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest.getDefaultInstance()) return this; + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest.getDefaultInstance()) return this; if (other.hasProcId()) { setProcId(other.getProcId()); } + if (other.hasMayInterruptIfRunning()) { + setMayInterruptIfRunning(other.getMayInterruptIfRunning()); + } this.mergeUnknownFields(other.getUnknownFields()); return this; } @@ -55200,11 +55593,11 @@ public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest parsedMessage = null; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest) e.getUnfinishedMessage(); + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { @@ -55248,93 +55641,82 @@ public Builder clearProcId() { return this; } - // @@protoc_insertion_point(builder_scope:hbase.pb.GetProcedureResultRequest) + // optional bool mayInterruptIfRunning = 2 [default = true]; + private boolean mayInterruptIfRunning_ = true; + /** + * optional bool mayInterruptIfRunning = 2 [default = true]; + */ + public boolean hasMayInterruptIfRunning() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional bool mayInterruptIfRunning = 2 [default = true]; + */ + public boolean getMayInterruptIfRunning() { + return mayInterruptIfRunning_; + } + /** + * optional bool mayInterruptIfRunning = 2 [default = true]; + */ + public Builder setMayInterruptIfRunning(boolean value) { + bitField0_ |= 0x00000002; + mayInterruptIfRunning_ = value; + onChanged(); + return this; + } + /** + * optional bool mayInterruptIfRunning = 2 [default = true]; + */ + public Builder clearMayInterruptIfRunning() { + bitField0_ = (bitField0_ & ~0x00000002); + mayInterruptIfRunning_ = true; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.AbortProcedureRequest) } static { - defaultInstance = new GetProcedureResultRequest(true); + defaultInstance = new AbortProcedureRequest(true); defaultInstance.initFields(); } - // @@protoc_insertion_point(class_scope:hbase.pb.GetProcedureResultRequest) + // @@protoc_insertion_point(class_scope:hbase.pb.AbortProcedureRequest) } - public interface GetProcedureResultResponseOrBuilder + public interface AbortProcedureResponseOrBuilder extends com.google.protobuf.MessageOrBuilder { - // required .hbase.pb.GetProcedureResultResponse.State state = 1; - /** - * required .hbase.pb.GetProcedureResultResponse.State state = 1; - */ - boolean hasState(); - /** - * required .hbase.pb.GetProcedureResultResponse.State state = 1; - */ - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.State getState(); - - // optional uint64 start_time = 2; - /** - * optional uint64 start_time = 2; - */ - boolean hasStartTime(); - /** - * optional uint64 start_time = 2; - */ - long getStartTime(); - - // optional uint64 last_update = 3; - /** - * optional uint64 last_update = 3; - */ - boolean hasLastUpdate(); - /** - * optional uint64 last_update = 3; - */ - long getLastUpdate(); - - // optional bytes result = 4; - /** - * optional bytes result = 4; - */ - boolean hasResult(); - /** - * optional bytes result = 4; - */ - com.google.protobuf.ByteString getResult(); - - // optional .hbase.pb.ForeignExceptionMessage exception = 5; - /** - * optional .hbase.pb.ForeignExceptionMessage exception = 5; - */ - boolean hasException(); + // required bool is_procedure_aborted = 1; /** - * optional .hbase.pb.ForeignExceptionMessage exception = 5; + * required bool is_procedure_aborted = 1; */ - org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage getException(); + boolean hasIsProcedureAborted(); /** - * optional .hbase.pb.ForeignExceptionMessage exception = 5; + * required bool is_procedure_aborted = 1; */ - org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessageOrBuilder getExceptionOrBuilder(); + boolean getIsProcedureAborted(); } /** - * Protobuf type {@code hbase.pb.GetProcedureResultResponse} + * Protobuf type {@code hbase.pb.AbortProcedureResponse} */ - public static final class GetProcedureResultResponse extends + public static final class AbortProcedureResponse extends com.google.protobuf.GeneratedMessage - implements GetProcedureResultResponseOrBuilder { - // Use GetProcedureResultResponse.newBuilder() to construct. - private GetProcedureResultResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + implements AbortProcedureResponseOrBuilder { + // Use AbortProcedureResponse.newBuilder() to construct. + private AbortProcedureResponse(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } - private GetProcedureResultResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + private AbortProcedureResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - private static final GetProcedureResultResponse defaultInstance; - public static GetProcedureResultResponse getDefaultInstance() { + private static final AbortProcedureResponse defaultInstance; + public static AbortProcedureResponse getDefaultInstance() { return defaultInstance; } - public GetProcedureResultResponse getDefaultInstanceForType() { + public AbortProcedureResponse getDefaultInstanceForType() { return defaultInstance; } @@ -55344,7 +55726,7 @@ public GetProcedureResultResponse getDefaultInstanceForType() { getUnknownFields() { return this.unknownFields; } - private GetProcedureResultResponse( + private AbortProcedureResponse( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { @@ -55368,42 +55750,8 @@ private GetProcedureResultResponse( break; } case 8: { - int rawValue = input.readEnum(); - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.State value = org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.State.valueOf(rawValue); - if (value == null) { - unknownFields.mergeVarintField(1, rawValue); - } else { - bitField0_ |= 0x00000001; - state_ = value; - } - break; - } - case 16: { - bitField0_ |= 0x00000002; - startTime_ = input.readUInt64(); - break; - } - case 24: { - bitField0_ |= 0x00000004; - lastUpdate_ = input.readUInt64(); - break; - } - case 34: { - bitField0_ |= 0x00000008; - result_ = input.readBytes(); - break; - } - case 42: { - org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.Builder subBuilder = null; - if (((bitField0_ & 0x00000010) == 0x00000010)) { - subBuilder = exception_.toBuilder(); - } - exception_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.PARSER, extensionRegistry); - if (subBuilder != null) { - subBuilder.mergeFrom(exception_); - exception_ = subBuilder.buildPartial(); - } - bitField0_ |= 0x00000010; + bitField0_ |= 0x00000001; + isProcedureAborted_ = input.readBool(); break; } } @@ -55420,222 +55768,57 @@ private GetProcedureResultResponse( } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetProcedureResultResponse_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_AbortProcedureResponse_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetProcedureResultResponse_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_AbortProcedureResponse_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse.Builder.class); } - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public GetProcedureResultResponse parsePartialFrom( + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public AbortProcedureResponse parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - return new GetProcedureResultResponse(input, extensionRegistry); + return new AbortProcedureResponse(input, extensionRegistry); } }; @java.lang.Override - public com.google.protobuf.Parser getParserForType() { + public com.google.protobuf.Parser getParserForType() { return PARSER; } - /** - * Protobuf enum {@code hbase.pb.GetProcedureResultResponse.State} - */ - public enum State - implements com.google.protobuf.ProtocolMessageEnum { - /** - * NOT_FOUND = 0; - */ - NOT_FOUND(0, 0), - /** - * RUNNING = 1; - */ - RUNNING(1, 1), - /** - * FINISHED = 2; - */ - FINISHED(2, 2), - ; - - /** - * NOT_FOUND = 0; - */ - public static final int NOT_FOUND_VALUE = 0; - /** - * RUNNING = 1; - */ - public static final int RUNNING_VALUE = 1; - /** - * FINISHED = 2; - */ - public static final int FINISHED_VALUE = 2; - - - public final int getNumber() { return value; } - - public static State valueOf(int value) { - switch (value) { - case 0: return NOT_FOUND; - case 1: return RUNNING; - case 2: return FINISHED; - default: return null; - } - } - - public static com.google.protobuf.Internal.EnumLiteMap - internalGetValueMap() { - return internalValueMap; - } - private static com.google.protobuf.Internal.EnumLiteMap - internalValueMap = - new com.google.protobuf.Internal.EnumLiteMap() { - public State findValueByNumber(int number) { - return State.valueOf(number); - } - }; - - public final com.google.protobuf.Descriptors.EnumValueDescriptor - getValueDescriptor() { - return getDescriptor().getValues().get(index); - } - public final com.google.protobuf.Descriptors.EnumDescriptor - getDescriptorForType() { - return getDescriptor(); - } - public static final com.google.protobuf.Descriptors.EnumDescriptor - getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.getDescriptor().getEnumTypes().get(0); - } - - private static final State[] VALUES = values(); - - public static State valueOf( - com.google.protobuf.Descriptors.EnumValueDescriptor desc) { - if (desc.getType() != getDescriptor()) { - throw new java.lang.IllegalArgumentException( - "EnumValueDescriptor is not for this type."); - } - return VALUES[desc.getIndex()]; - } - - private final int index; - private final int value; - - private State(int index, int value) { - this.index = index; - this.value = value; - } - - // @@protoc_insertion_point(enum_scope:hbase.pb.GetProcedureResultResponse.State) - } - private int bitField0_; - // required .hbase.pb.GetProcedureResultResponse.State state = 1; - public static final int STATE_FIELD_NUMBER = 1; - private org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.State state_; + // required bool is_procedure_aborted = 1; + public static final int IS_PROCEDURE_ABORTED_FIELD_NUMBER = 1; + private boolean isProcedureAborted_; /** - * required .hbase.pb.GetProcedureResultResponse.State state = 1; + * required bool is_procedure_aborted = 1; */ - public boolean hasState() { + public boolean hasIsProcedureAborted() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** - * required .hbase.pb.GetProcedureResultResponse.State state = 1; - */ - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.State getState() { - return state_; - } - - // optional uint64 start_time = 2; - public static final int START_TIME_FIELD_NUMBER = 2; - private long startTime_; - /** - * optional uint64 start_time = 2; - */ - public boolean hasStartTime() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * optional uint64 start_time = 2; - */ - public long getStartTime() { - return startTime_; - } - - // optional uint64 last_update = 3; - public static final int LAST_UPDATE_FIELD_NUMBER = 3; - private long lastUpdate_; - /** - * optional uint64 last_update = 3; - */ - public boolean hasLastUpdate() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - /** - * optional uint64 last_update = 3; - */ - public long getLastUpdate() { - return lastUpdate_; - } - - // optional bytes result = 4; - public static final int RESULT_FIELD_NUMBER = 4; - private com.google.protobuf.ByteString result_; - /** - * optional bytes result = 4; - */ - public boolean hasResult() { - return ((bitField0_ & 0x00000008) == 0x00000008); - } - /** - * optional bytes result = 4; - */ - public com.google.protobuf.ByteString getResult() { - return result_; - } - - // optional .hbase.pb.ForeignExceptionMessage exception = 5; - public static final int EXCEPTION_FIELD_NUMBER = 5; - private org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage exception_; - /** - * optional .hbase.pb.ForeignExceptionMessage exception = 5; - */ - public boolean hasException() { - return ((bitField0_ & 0x00000010) == 0x00000010); - } - /** - * optional .hbase.pb.ForeignExceptionMessage exception = 5; - */ - public org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage getException() { - return exception_; - } - /** - * optional .hbase.pb.ForeignExceptionMessage exception = 5; + * required bool is_procedure_aborted = 1; */ - public org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessageOrBuilder getExceptionOrBuilder() { - return exception_; + public boolean getIsProcedureAborted() { + return isProcedureAborted_; } private void initFields() { - state_ = org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.State.NOT_FOUND; - startTime_ = 0L; - lastUpdate_ = 0L; - result_ = com.google.protobuf.ByteString.EMPTY; - exception_ = org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.getDefaultInstance(); + isProcedureAborted_ = false; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; - if (!hasState()) { + if (!hasIsProcedureAborted()) { memoizedIsInitialized = 0; return false; } @@ -55647,19 +55830,7 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeEnum(1, state_.getNumber()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeUInt64(2, startTime_); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - output.writeUInt64(3, lastUpdate_); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - output.writeBytes(4, result_); - } - if (((bitField0_ & 0x00000010) == 0x00000010)) { - output.writeMessage(5, exception_); + output.writeBool(1, isProcedureAborted_); } getUnknownFields().writeTo(output); } @@ -55672,23 +55843,7 @@ public int getSerializedSize() { size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream - .computeEnumSize(1, state_.getNumber()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(2, startTime_); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(3, lastUpdate_); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(4, result_); - } - if (((bitField0_ & 0x00000010) == 0x00000010)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(5, exception_); + .computeBoolSize(1, isProcedureAborted_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; @@ -55707,36 +55862,16 @@ public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse)) { + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse)) { return super.equals(obj); } - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse) obj; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse) obj; boolean result = true; - result = result && (hasState() == other.hasState()); - if (hasState()) { - result = result && - (getState() == other.getState()); - } - result = result && (hasStartTime() == other.hasStartTime()); - if (hasStartTime()) { - result = result && (getStartTime() - == other.getStartTime()); - } - result = result && (hasLastUpdate() == other.hasLastUpdate()); - if (hasLastUpdate()) { - result = result && (getLastUpdate() - == other.getLastUpdate()); - } - result = result && (hasResult() == other.hasResult()); - if (hasResult()) { - result = result && getResult() - .equals(other.getResult()); - } - result = result && (hasException() == other.hasException()); - if (hasException()) { - result = result && getException() - .equals(other.getException()); + result = result && (hasIsProcedureAborted() == other.hasIsProcedureAborted()); + if (hasIsProcedureAborted()) { + result = result && (getIsProcedureAborted() + == other.getIsProcedureAborted()); } result = result && getUnknownFields().equals(other.getUnknownFields()); @@ -55751,78 +55886,62 @@ public int hashCode() { } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasState()) { - hash = (37 * hash) + STATE_FIELD_NUMBER; - hash = (53 * hash) + hashEnum(getState()); - } - if (hasStartTime()) { - hash = (37 * hash) + START_TIME_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getStartTime()); - } - if (hasLastUpdate()) { - hash = (37 * hash) + LAST_UPDATE_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getLastUpdate()); - } - if (hasResult()) { - hash = (37 * hash) + RESULT_FIELD_NUMBER; - hash = (53 * hash) + getResult().hashCode(); - } - if (hasException()) { - hash = (37 * hash) + EXCEPTION_FIELD_NUMBER; - hash = (53 * hash) + getException().hashCode(); + if (hasIsProcedureAborted()) { + hash = (37 * hash) + IS_PROCEDURE_ABORTED_FIELD_NUMBER; + hash = (53 * hash) + hashBoolean(getIsProcedureAborted()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse parseFrom(byte[] data) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse parseFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse parseDelimitedFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse parseDelimitedFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -55831,7 +55950,7 @@ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedu public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse prototype) { + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @@ -55843,24 +55962,24 @@ protected Builder newBuilderForType( return builder; } /** - * Protobuf type {@code hbase.pb.GetProcedureResultResponse} + * Protobuf type {@code hbase.pb.AbortProcedureResponse} */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponseOrBuilder { + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponseOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetProcedureResultResponse_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_AbortProcedureResponse_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetProcedureResultResponse_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_AbortProcedureResponse_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse.Builder.class); } - // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.newBuilder() + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -55872,7 +55991,6 @@ private Builder( } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getExceptionFieldBuilder(); } } private static Builder create() { @@ -55881,20 +55999,8 @@ private static Builder create() { public Builder clear() { super.clear(); - state_ = org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.State.NOT_FOUND; + isProcedureAborted_ = false; bitField0_ = (bitField0_ & ~0x00000001); - startTime_ = 0L; - bitField0_ = (bitField0_ & ~0x00000002); - lastUpdate_ = 0L; - bitField0_ = (bitField0_ & ~0x00000004); - result_ = com.google.protobuf.ByteString.EMPTY; - bitField0_ = (bitField0_ & ~0x00000008); - if (exceptionBuilder_ == null) { - exception_ = org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.getDefaultInstance(); - } else { - exceptionBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000010); return this; } @@ -55904,86 +56010,54 @@ public Builder clone() { public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetProcedureResultResponse_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_AbortProcedureResponse_descriptor; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.getDefaultInstance(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse.getDefaultInstance(); } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse build() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse result = buildPartial(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse(this); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } - result.state_ = state_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.startTime_ = startTime_; - if (((from_bitField0_ & 0x00000004) == 0x00000004)) { - to_bitField0_ |= 0x00000004; - } - result.lastUpdate_ = lastUpdate_; - if (((from_bitField0_ & 0x00000008) == 0x00000008)) { - to_bitField0_ |= 0x00000008; - } - result.result_ = result_; - if (((from_bitField0_ & 0x00000010) == 0x00000010)) { - to_bitField0_ |= 0x00000010; - } - if (exceptionBuilder_ == null) { - result.exception_ = exception_; - } else { - result.exception_ = exceptionBuilder_.build(); - } + result.isProcedureAborted_ = isProcedureAborted_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse)other); + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse)other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.getDefaultInstance()) return this; - if (other.hasState()) { - setState(other.getState()); - } - if (other.hasStartTime()) { - setStartTime(other.getStartTime()); - } - if (other.hasLastUpdate()) { - setLastUpdate(other.getLastUpdate()); - } - if (other.hasResult()) { - setResult(other.getResult()); - } - if (other.hasException()) { - mergeException(other.getException()); + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse.getDefaultInstance()) return this; + if (other.hasIsProcedureAborted()) { + setIsProcedureAborted(other.getIsProcedureAborted()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { - if (!hasState()) { + if (!hasIsProcedureAborted()) { return false; } @@ -55994,11 +56068,11 @@ public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse parsedMessage = null; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse) e.getUnfinishedMessage(); + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { @@ -56009,314 +56083,72 @@ public Builder mergeFrom( } private int bitField0_; - // required .hbase.pb.GetProcedureResultResponse.State state = 1; - private org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.State state_ = org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.State.NOT_FOUND; + // required bool is_procedure_aborted = 1; + private boolean isProcedureAborted_ ; /** - * required .hbase.pb.GetProcedureResultResponse.State state = 1; + * required bool is_procedure_aborted = 1; */ - public boolean hasState() { + public boolean hasIsProcedureAborted() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** - * required .hbase.pb.GetProcedureResultResponse.State state = 1; + * required bool is_procedure_aborted = 1; */ - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.State getState() { - return state_; + public boolean getIsProcedureAborted() { + return isProcedureAborted_; } /** - * required .hbase.pb.GetProcedureResultResponse.State state = 1; + * required bool is_procedure_aborted = 1; */ - public Builder setState(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.State value) { - if (value == null) { - throw new NullPointerException(); - } + public Builder setIsProcedureAborted(boolean value) { bitField0_ |= 0x00000001; - state_ = value; + isProcedureAborted_ = value; onChanged(); return this; } /** - * required .hbase.pb.GetProcedureResultResponse.State state = 1; + * required bool is_procedure_aborted = 1; */ - public Builder clearState() { + public Builder clearIsProcedureAborted() { bitField0_ = (bitField0_ & ~0x00000001); - state_ = org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.State.NOT_FOUND; + isProcedureAborted_ = false; onChanged(); return this; } - // optional uint64 start_time = 2; - private long startTime_ ; - /** - * optional uint64 start_time = 2; - */ - public boolean hasStartTime() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * optional uint64 start_time = 2; - */ - public long getStartTime() { - return startTime_; - } - /** - * optional uint64 start_time = 2; - */ - public Builder setStartTime(long value) { - bitField0_ |= 0x00000002; - startTime_ = value; - onChanged(); - return this; - } - /** - * optional uint64 start_time = 2; - */ - public Builder clearStartTime() { - bitField0_ = (bitField0_ & ~0x00000002); - startTime_ = 0L; - onChanged(); - return this; - } + // @@protoc_insertion_point(builder_scope:hbase.pb.AbortProcedureResponse) + } - // optional uint64 last_update = 3; - private long lastUpdate_ ; - /** - * optional uint64 last_update = 3; - */ - public boolean hasLastUpdate() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - /** - * optional uint64 last_update = 3; - */ - public long getLastUpdate() { - return lastUpdate_; - } - /** - * optional uint64 last_update = 3; - */ - public Builder setLastUpdate(long value) { - bitField0_ |= 0x00000004; - lastUpdate_ = value; - onChanged(); - return this; - } - /** - * optional uint64 last_update = 3; - */ - public Builder clearLastUpdate() { - bitField0_ = (bitField0_ & ~0x00000004); - lastUpdate_ = 0L; - onChanged(); - return this; - } + static { + defaultInstance = new AbortProcedureResponse(true); + defaultInstance.initFields(); + } - // optional bytes result = 4; - private com.google.protobuf.ByteString result_ = com.google.protobuf.ByteString.EMPTY; - /** - * optional bytes result = 4; - */ - public boolean hasResult() { - return ((bitField0_ & 0x00000008) == 0x00000008); - } - /** - * optional bytes result = 4; - */ - public com.google.protobuf.ByteString getResult() { - return result_; - } - /** - * optional bytes result = 4; - */ - public Builder setResult(com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000008; - result_ = value; - onChanged(); - return this; - } - /** - * optional bytes result = 4; - */ - public Builder clearResult() { - bitField0_ = (bitField0_ & ~0x00000008); - result_ = getDefaultInstance().getResult(); - onChanged(); - return this; - } - - // optional .hbase.pb.ForeignExceptionMessage exception = 5; - private org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage exception_ = org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage, org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.Builder, org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessageOrBuilder> exceptionBuilder_; - /** - * optional .hbase.pb.ForeignExceptionMessage exception = 5; - */ - public boolean hasException() { - return ((bitField0_ & 0x00000010) == 0x00000010); - } - /** - * optional .hbase.pb.ForeignExceptionMessage exception = 5; - */ - public org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage getException() { - if (exceptionBuilder_ == null) { - return exception_; - } else { - return exceptionBuilder_.getMessage(); - } - } - /** - * optional .hbase.pb.ForeignExceptionMessage exception = 5; - */ - public Builder setException(org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage value) { - if (exceptionBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - exception_ = value; - onChanged(); - } else { - exceptionBuilder_.setMessage(value); - } - bitField0_ |= 0x00000010; - return this; - } - /** - * optional .hbase.pb.ForeignExceptionMessage exception = 5; - */ - public Builder setException( - org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.Builder builderForValue) { - if (exceptionBuilder_ == null) { - exception_ = builderForValue.build(); - onChanged(); - } else { - exceptionBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000010; - return this; - } - /** - * optional .hbase.pb.ForeignExceptionMessage exception = 5; - */ - public Builder mergeException(org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage value) { - if (exceptionBuilder_ == null) { - if (((bitField0_ & 0x00000010) == 0x00000010) && - exception_ != org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.getDefaultInstance()) { - exception_ = - org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.newBuilder(exception_).mergeFrom(value).buildPartial(); - } else { - exception_ = value; - } - onChanged(); - } else { - exceptionBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000010; - return this; - } - /** - * optional .hbase.pb.ForeignExceptionMessage exception = 5; - */ - public Builder clearException() { - if (exceptionBuilder_ == null) { - exception_ = org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.getDefaultInstance(); - onChanged(); - } else { - exceptionBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000010); - return this; - } - /** - * optional .hbase.pb.ForeignExceptionMessage exception = 5; - */ - public org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.Builder getExceptionBuilder() { - bitField0_ |= 0x00000010; - onChanged(); - return getExceptionFieldBuilder().getBuilder(); - } - /** - * optional .hbase.pb.ForeignExceptionMessage exception = 5; - */ - public org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessageOrBuilder getExceptionOrBuilder() { - if (exceptionBuilder_ != null) { - return exceptionBuilder_.getMessageOrBuilder(); - } else { - return exception_; - } - } - /** - * optional .hbase.pb.ForeignExceptionMessage exception = 5; - */ - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage, org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.Builder, org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessageOrBuilder> - getExceptionFieldBuilder() { - if (exceptionBuilder_ == null) { - exceptionBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage, org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.Builder, org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessageOrBuilder>( - exception_, - getParentForChildren(), - isClean()); - exception_ = null; - } - return exceptionBuilder_; - } - - // @@protoc_insertion_point(builder_scope:hbase.pb.GetProcedureResultResponse) - } - - static { - defaultInstance = new GetProcedureResultResponse(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:hbase.pb.GetProcedureResultResponse) + // @@protoc_insertion_point(class_scope:hbase.pb.AbortProcedureResponse) } - public interface AbortProcedureRequestOrBuilder + public interface ListProceduresRequestOrBuilder extends com.google.protobuf.MessageOrBuilder { - - // required uint64 proc_id = 1; - /** - * required uint64 proc_id = 1; - */ - boolean hasProcId(); - /** - * required uint64 proc_id = 1; - */ - long getProcId(); - - // optional bool mayInterruptIfRunning = 2 [default = true]; - /** - * optional bool mayInterruptIfRunning = 2 [default = true]; - */ - boolean hasMayInterruptIfRunning(); - /** - * optional bool mayInterruptIfRunning = 2 [default = true]; - */ - boolean getMayInterruptIfRunning(); } /** - * Protobuf type {@code hbase.pb.AbortProcedureRequest} + * Protobuf type {@code hbase.pb.ListProceduresRequest} */ - public static final class AbortProcedureRequest extends + public static final class ListProceduresRequest extends com.google.protobuf.GeneratedMessage - implements AbortProcedureRequestOrBuilder { - // Use AbortProcedureRequest.newBuilder() to construct. - private AbortProcedureRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + implements ListProceduresRequestOrBuilder { + // Use ListProceduresRequest.newBuilder() to construct. + private ListProceduresRequest(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } - private AbortProcedureRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + private ListProceduresRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - private static final AbortProcedureRequest defaultInstance; - public static AbortProcedureRequest getDefaultInstance() { + private static final ListProceduresRequest defaultInstance; + public static ListProceduresRequest getDefaultInstance() { return defaultInstance; } - public AbortProcedureRequest getDefaultInstanceForType() { + public ListProceduresRequest getDefaultInstanceForType() { return defaultInstance; } @@ -56326,12 +56158,11 @@ public AbortProcedureRequest getDefaultInstanceForType() { getUnknownFields() { return this.unknownFields; } - private AbortProcedureRequest( + private ListProceduresRequest( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); - int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { @@ -56349,16 +56180,6 @@ private AbortProcedureRequest( } break; } - case 8: { - bitField0_ |= 0x00000001; - procId_ = input.readUInt64(); - break; - } - case 16: { - bitField0_ |= 0x00000002; - mayInterruptIfRunning_ = input.readBool(); - break; - } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { @@ -56373,77 +56194,38 @@ private AbortProcedureRequest( } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_AbortProcedureRequest_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ListProceduresRequest_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_AbortProcedureRequest_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ListProceduresRequest_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest.Builder.class); } - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public AbortProcedureRequest parsePartialFrom( + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public ListProceduresRequest parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - return new AbortProcedureRequest(input, extensionRegistry); + return new ListProceduresRequest(input, extensionRegistry); } }; @java.lang.Override - public com.google.protobuf.Parser getParserForType() { + public com.google.protobuf.Parser getParserForType() { return PARSER; } - private int bitField0_; - // required uint64 proc_id = 1; - public static final int PROC_ID_FIELD_NUMBER = 1; - private long procId_; - /** - * required uint64 proc_id = 1; - */ - public boolean hasProcId() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required uint64 proc_id = 1; - */ - public long getProcId() { - return procId_; - } - - // optional bool mayInterruptIfRunning = 2 [default = true]; - public static final int MAYINTERRUPTIFRUNNING_FIELD_NUMBER = 2; - private boolean mayInterruptIfRunning_; - /** - * optional bool mayInterruptIfRunning = 2 [default = true]; - */ - public boolean hasMayInterruptIfRunning() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * optional bool mayInterruptIfRunning = 2 [default = true]; - */ - public boolean getMayInterruptIfRunning() { - return mayInterruptIfRunning_; - } - private void initFields() { - procId_ = 0L; - mayInterruptIfRunning_ = true; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; - if (!hasProcId()) { - memoizedIsInitialized = 0; - return false; - } memoizedIsInitialized = 1; return true; } @@ -56451,12 +56233,6 @@ public final boolean isInitialized() { public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeUInt64(1, procId_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeBool(2, mayInterruptIfRunning_); - } getUnknownFields().writeTo(output); } @@ -56466,14 +56242,6 @@ public int getSerializedSize() { if (size != -1) return size; size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(1, procId_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeBoolSize(2, mayInterruptIfRunning_); - } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; @@ -56491,22 +56259,12 @@ public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest)) { + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest)) { return super.equals(obj); } - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest) obj; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest) obj; boolean result = true; - result = result && (hasProcId() == other.hasProcId()); - if (hasProcId()) { - result = result && (getProcId() - == other.getProcId()); - } - result = result && (hasMayInterruptIfRunning() == other.hasMayInterruptIfRunning()); - if (hasMayInterruptIfRunning()) { - result = result && (getMayInterruptIfRunning() - == other.getMayInterruptIfRunning()); - } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -56520,66 +56278,58 @@ public int hashCode() { } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasProcId()) { - hash = (37 * hash) + PROC_ID_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getProcId()); - } - if (hasMayInterruptIfRunning()) { - hash = (37 * hash) + MAYINTERRUPTIFRUNNING_FIELD_NUMBER; - hash = (53 * hash) + hashBoolean(getMayInterruptIfRunning()); - } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest parseFrom(byte[] data) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest parseFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest parseDelimitedFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest parseDelimitedFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -56588,7 +56338,7 @@ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProce public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest prototype) { + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @@ -56600,24 +56350,24 @@ protected Builder newBuilderForType( return builder; } /** - * Protobuf type {@code hbase.pb.AbortProcedureRequest} + * Protobuf type {@code hbase.pb.ListProceduresRequest} */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequestOrBuilder { + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequestOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_AbortProcedureRequest_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ListProceduresRequest_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_AbortProcedureRequest_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ListProceduresRequest_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest.Builder.class); } - // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest.newBuilder() + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -56637,10 +56387,6 @@ private static Builder create() { public Builder clear() { super.clear(); - procId_ = 0L; - bitField0_ = (bitField0_ & ~0x00000001); - mayInterruptIfRunning_ = true; - bitField0_ = (bitField0_ & ~0x00000002); return this; } @@ -56650,64 +56396,43 @@ public Builder clone() { public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_AbortProcedureRequest_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ListProceduresRequest_descriptor; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest.getDefaultInstance(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest.getDefaultInstance(); } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest build() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest result = buildPartial(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.procId_ = procId_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.mayInterruptIfRunning_ = mayInterruptIfRunning_; - result.bitField0_ = to_bitField0_; + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest(this); onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest)other); + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest)other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest.getDefaultInstance()) return this; - if (other.hasProcId()) { - setProcId(other.getProcId()); - } - if (other.hasMayInterruptIfRunning()) { - setMayInterruptIfRunning(other.getMayInterruptIfRunning()); - } + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest.getDefaultInstance()) return this; this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { - if (!hasProcId()) { - - return false; - } return true; } @@ -56715,11 +56440,11 @@ public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest parsedMessage = null; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest) e.getUnfinishedMessage(); + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { @@ -56728,117 +56453,65 @@ public Builder mergeFrom( } return this; } - private int bitField0_; - - // required uint64 proc_id = 1; - private long procId_ ; - /** - * required uint64 proc_id = 1; - */ - public boolean hasProcId() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required uint64 proc_id = 1; - */ - public long getProcId() { - return procId_; - } - /** - * required uint64 proc_id = 1; - */ - public Builder setProcId(long value) { - bitField0_ |= 0x00000001; - procId_ = value; - onChanged(); - return this; - } - /** - * required uint64 proc_id = 1; - */ - public Builder clearProcId() { - bitField0_ = (bitField0_ & ~0x00000001); - procId_ = 0L; - onChanged(); - return this; - } - - // optional bool mayInterruptIfRunning = 2 [default = true]; - private boolean mayInterruptIfRunning_ = true; - /** - * optional bool mayInterruptIfRunning = 2 [default = true]; - */ - public boolean hasMayInterruptIfRunning() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * optional bool mayInterruptIfRunning = 2 [default = true]; - */ - public boolean getMayInterruptIfRunning() { - return mayInterruptIfRunning_; - } - /** - * optional bool mayInterruptIfRunning = 2 [default = true]; - */ - public Builder setMayInterruptIfRunning(boolean value) { - bitField0_ |= 0x00000002; - mayInterruptIfRunning_ = value; - onChanged(); - return this; - } - /** - * optional bool mayInterruptIfRunning = 2 [default = true]; - */ - public Builder clearMayInterruptIfRunning() { - bitField0_ = (bitField0_ & ~0x00000002); - mayInterruptIfRunning_ = true; - onChanged(); - return this; - } - // @@protoc_insertion_point(builder_scope:hbase.pb.AbortProcedureRequest) + // @@protoc_insertion_point(builder_scope:hbase.pb.ListProceduresRequest) } static { - defaultInstance = new AbortProcedureRequest(true); + defaultInstance = new ListProceduresRequest(true); defaultInstance.initFields(); } - // @@protoc_insertion_point(class_scope:hbase.pb.AbortProcedureRequest) + // @@protoc_insertion_point(class_scope:hbase.pb.ListProceduresRequest) } - public interface AbortProcedureResponseOrBuilder + public interface ListProceduresResponseOrBuilder extends com.google.protobuf.MessageOrBuilder { - // required bool is_procedure_aborted = 1; + // repeated .hbase.pb.Procedure procedure = 1; /** - * required bool is_procedure_aborted = 1; + * repeated .hbase.pb.Procedure procedure = 1; */ - boolean hasIsProcedureAborted(); + java.util.List + getProcedureList(); /** - * required bool is_procedure_aborted = 1; + * repeated .hbase.pb.Procedure procedure = 1; */ - boolean getIsProcedureAborted(); + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure getProcedure(int index); + /** + * repeated .hbase.pb.Procedure procedure = 1; + */ + int getProcedureCount(); + /** + * repeated .hbase.pb.Procedure procedure = 1; + */ + java.util.List + getProcedureOrBuilderList(); + /** + * repeated .hbase.pb.Procedure procedure = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureOrBuilder getProcedureOrBuilder( + int index); } /** - * Protobuf type {@code hbase.pb.AbortProcedureResponse} + * Protobuf type {@code hbase.pb.ListProceduresResponse} */ - public static final class AbortProcedureResponse extends + public static final class ListProceduresResponse extends com.google.protobuf.GeneratedMessage - implements AbortProcedureResponseOrBuilder { - // Use AbortProcedureResponse.newBuilder() to construct. - private AbortProcedureResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + implements ListProceduresResponseOrBuilder { + // Use ListProceduresResponse.newBuilder() to construct. + private ListProceduresResponse(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } - private AbortProcedureResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + private ListProceduresResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - private static final AbortProcedureResponse defaultInstance; - public static AbortProcedureResponse getDefaultInstance() { + private static final ListProceduresResponse defaultInstance; + public static ListProceduresResponse getDefaultInstance() { return defaultInstance; } - public AbortProcedureResponse getDefaultInstanceForType() { + public ListProceduresResponse getDefaultInstanceForType() { return defaultInstance; } @@ -56848,7 +56521,7 @@ public AbortProcedureResponse getDefaultInstanceForType() { getUnknownFields() { return this.unknownFields; } - private AbortProcedureResponse( + private ListProceduresResponse( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { @@ -56871,9 +56544,12 @@ private AbortProcedureResponse( } break; } - case 8: { - bitField0_ |= 0x00000001; - isProcedureAborted_ = input.readBool(); + case 10: { + if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + procedure_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000001; + } + procedure_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure.PARSER, extensionRegistry)); break; } } @@ -56884,65 +56560,89 @@ private AbortProcedureResponse( throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { + if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + procedure_ = java.util.Collections.unmodifiableList(procedure_); + } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_AbortProcedureResponse_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ListProceduresResponse_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_AbortProcedureResponse_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ListProceduresResponse_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse.Builder.class); } - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public AbortProcedureResponse parsePartialFrom( + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public ListProceduresResponse parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - return new AbortProcedureResponse(input, extensionRegistry); + return new ListProceduresResponse(input, extensionRegistry); } }; @java.lang.Override - public com.google.protobuf.Parser getParserForType() { + public com.google.protobuf.Parser getParserForType() { return PARSER; } - private int bitField0_; - // required bool is_procedure_aborted = 1; - public static final int IS_PROCEDURE_ABORTED_FIELD_NUMBER = 1; - private boolean isProcedureAborted_; + // repeated .hbase.pb.Procedure procedure = 1; + public static final int PROCEDURE_FIELD_NUMBER = 1; + private java.util.List procedure_; /** - * required bool is_procedure_aborted = 1; + * repeated .hbase.pb.Procedure procedure = 1; */ - public boolean hasIsProcedureAborted() { - return ((bitField0_ & 0x00000001) == 0x00000001); + public java.util.List getProcedureList() { + return procedure_; } /** - * required bool is_procedure_aborted = 1; + * repeated .hbase.pb.Procedure procedure = 1; */ - public boolean getIsProcedureAborted() { - return isProcedureAborted_; + public java.util.List + getProcedureOrBuilderList() { + return procedure_; + } + /** + * repeated .hbase.pb.Procedure procedure = 1; + */ + public int getProcedureCount() { + return procedure_.size(); + } + /** + * repeated .hbase.pb.Procedure procedure = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure getProcedure(int index) { + return procedure_.get(index); + } + /** + * repeated .hbase.pb.Procedure procedure = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureOrBuilder getProcedureOrBuilder( + int index) { + return procedure_.get(index); } private void initFields() { - isProcedureAborted_ = false; + procedure_ = java.util.Collections.emptyList(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; - if (!hasIsProcedureAborted()) { - memoizedIsInitialized = 0; - return false; + for (int i = 0; i < getProcedureCount(); i++) { + if (!getProcedure(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } } memoizedIsInitialized = 1; return true; @@ -56951,8 +56651,8 @@ public final boolean isInitialized() { public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBool(1, isProcedureAborted_); + for (int i = 0; i < procedure_.size(); i++) { + output.writeMessage(1, procedure_.get(i)); } getUnknownFields().writeTo(output); } @@ -56963,9 +56663,9 @@ public int getSerializedSize() { if (size != -1) return size; size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { + for (int i = 0; i < procedure_.size(); i++) { size += com.google.protobuf.CodedOutputStream - .computeBoolSize(1, isProcedureAborted_); + .computeMessageSize(1, procedure_.get(i)); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; @@ -56984,17 +56684,14 @@ public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse)) { + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse)) { return super.equals(obj); } - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse) obj; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse) obj; boolean result = true; - result = result && (hasIsProcedureAborted() == other.hasIsProcedureAborted()); - if (hasIsProcedureAborted()) { - result = result && (getIsProcedureAborted() - == other.getIsProcedureAborted()); - } + result = result && getProcedureList() + .equals(other.getProcedureList()); result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -57008,62 +56705,62 @@ public int hashCode() { } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasIsProcedureAborted()) { - hash = (37 * hash) + IS_PROCEDURE_ABORTED_FIELD_NUMBER; - hash = (53 * hash) + hashBoolean(getIsProcedureAborted()); + if (getProcedureCount() > 0) { + hash = (37 * hash) + PROCEDURE_FIELD_NUMBER; + hash = (53 * hash) + getProcedureList().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse parseFrom(byte[] data) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse parseFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse parseDelimitedFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse parseDelimitedFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -57072,7 +56769,7 @@ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProce public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse prototype) { + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @@ -57084,24 +56781,24 @@ protected Builder newBuilderForType( return builder; } /** - * Protobuf type {@code hbase.pb.AbortProcedureResponse} + * Protobuf type {@code hbase.pb.ListProceduresResponse} */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponseOrBuilder { + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponseOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_AbortProcedureResponse_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ListProceduresResponse_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_AbortProcedureResponse_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ListProceduresResponse_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse.Builder.class); } - // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse.newBuilder() + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -57113,6 +56810,7 @@ private Builder( } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getProcedureFieldBuilder(); } } private static Builder create() { @@ -57121,8 +56819,12 @@ private static Builder create() { public Builder clear() { super.clear(); - isProcedureAborted_ = false; - bitField0_ = (bitField0_ & ~0x00000001); + if (procedureBuilder_ == null) { + procedure_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + } else { + procedureBuilder_.clear(); + } return this; } @@ -57132,56 +56834,84 @@ public Builder clone() { public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_AbortProcedureResponse_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ListProceduresResponse_descriptor; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse.getDefaultInstance(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse.getDefaultInstance(); } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse build() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse result = buildPartial(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse(this); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse(this); int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; + if (procedureBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001)) { + procedure_ = java.util.Collections.unmodifiableList(procedure_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.procedure_ = procedure_; + } else { + result.procedure_ = procedureBuilder_.build(); } - result.isProcedureAborted_ = isProcedureAborted_; - result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse)other); + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse)other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse.getDefaultInstance()) return this; - if (other.hasIsProcedureAborted()) { - setIsProcedureAborted(other.getIsProcedureAborted()); + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse.getDefaultInstance()) return this; + if (procedureBuilder_ == null) { + if (!other.procedure_.isEmpty()) { + if (procedure_.isEmpty()) { + procedure_ = other.procedure_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureProcedureIsMutable(); + procedure_.addAll(other.procedure_); + } + onChanged(); + } + } else { + if (!other.procedure_.isEmpty()) { + if (procedureBuilder_.isEmpty()) { + procedureBuilder_.dispose(); + procedureBuilder_ = null; + procedure_ = other.procedure_; + bitField0_ = (bitField0_ & ~0x00000001); + procedureBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getProcedureFieldBuilder() : null; + } else { + procedureBuilder_.addAllMessages(other.procedure_); + } + } } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { - if (!hasIsProcedureAborted()) { - - return false; + for (int i = 0; i < getProcedureCount(); i++) { + if (!getProcedure(i).isInitialized()) { + + return false; + } } return true; } @@ -57190,11 +56920,11 @@ public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse parsedMessage = null; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse) e.getUnfinishedMessage(); + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { @@ -57205,435 +56935,372 @@ public Builder mergeFrom( } private int bitField0_; - // required bool is_procedure_aborted = 1; - private boolean isProcedureAborted_ ; + // repeated .hbase.pb.Procedure procedure = 1; + private java.util.List procedure_ = + java.util.Collections.emptyList(); + private void ensureProcedureIsMutable() { + if (!((bitField0_ & 0x00000001) == 0x00000001)) { + procedure_ = new java.util.ArrayList(procedure_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure.Builder, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureOrBuilder> procedureBuilder_; + /** - * required bool is_procedure_aborted = 1; + * repeated .hbase.pb.Procedure procedure = 1; */ - public boolean hasIsProcedureAborted() { - return ((bitField0_ & 0x00000001) == 0x00000001); + public java.util.List getProcedureList() { + if (procedureBuilder_ == null) { + return java.util.Collections.unmodifiableList(procedure_); + } else { + return procedureBuilder_.getMessageList(); + } } /** - * required bool is_procedure_aborted = 1; + * repeated .hbase.pb.Procedure procedure = 1; */ - public boolean getIsProcedureAborted() { - return isProcedureAborted_; + public int getProcedureCount() { + if (procedureBuilder_ == null) { + return procedure_.size(); + } else { + return procedureBuilder_.getCount(); + } } /** - * required bool is_procedure_aborted = 1; + * repeated .hbase.pb.Procedure procedure = 1; */ - public Builder setIsProcedureAborted(boolean value) { - bitField0_ |= 0x00000001; - isProcedureAborted_ = value; - onChanged(); - return this; + public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure getProcedure(int index) { + if (procedureBuilder_ == null) { + return procedure_.get(index); + } else { + return procedureBuilder_.getMessage(index); + } } /** - * required bool is_procedure_aborted = 1; + * repeated .hbase.pb.Procedure procedure = 1; */ - public Builder clearIsProcedureAborted() { - bitField0_ = (bitField0_ & ~0x00000001); - isProcedureAborted_ = false; - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:hbase.pb.AbortProcedureResponse) - } - - static { - defaultInstance = new AbortProcedureResponse(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:hbase.pb.AbortProcedureResponse) - } - - public interface ListProceduresRequestOrBuilder - extends com.google.protobuf.MessageOrBuilder { - } - /** - * Protobuf type {@code hbase.pb.ListProceduresRequest} - */ - public static final class ListProceduresRequest extends - com.google.protobuf.GeneratedMessage - implements ListProceduresRequestOrBuilder { - // Use ListProceduresRequest.newBuilder() to construct. - private ListProceduresRequest(com.google.protobuf.GeneratedMessage.Builder builder) { - super(builder); - this.unknownFields = builder.getUnknownFields(); - } - private ListProceduresRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - - private static final ListProceduresRequest defaultInstance; - public static ListProceduresRequest getDefaultInstance() { - return defaultInstance; - } - - public ListProceduresRequest getDefaultInstanceForType() { - return defaultInstance; - } - - private final com.google.protobuf.UnknownFieldSet unknownFields; - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { - return this.unknownFields; - } - private ListProceduresRequest( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - initFields(); - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - done = true; - } - break; - } + public Builder setProcedure( + int index, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure value) { + if (procedureBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); } + ensureProcedureIsMutable(); + procedure_.set(index, value); + onChanged(); + } else { + procedureBuilder_.setMessage(index, value); } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException( - e.getMessage()).setUnfinishedMessage(this); - } finally { - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); - } - } - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ListProceduresRequest_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ListProceduresRequest_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest.Builder.class); - } - - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public ListProceduresRequest parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new ListProceduresRequest(input, extensionRegistry); - } - }; - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - private void initFields() { - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest)) { - return super.equals(obj); - } - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest) obj; - - boolean result = true; - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - private int memoizedHashCode = 0; - @java.lang.Override - public int hashCode() { - if (memoizedHashCode != 0) { - return memoizedHashCode; - } - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - hash = (29 * hash) + getUnknownFields().hashCode(); - memoizedHashCode = hash; - return hash; - } - - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest parseFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input); - } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - /** - * Protobuf type {@code hbase.pb.ListProceduresRequest} - */ - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequestOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ListProceduresRequest_descriptor; + return this; } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ListProceduresRequest_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest.Builder.class); + /** + * repeated .hbase.pb.Procedure procedure = 1; + */ + public Builder setProcedure( + int index, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure.Builder builderForValue) { + if (procedureBuilder_ == null) { + ensureProcedureIsMutable(); + procedure_.set(index, builderForValue.build()); + onChanged(); + } else { + procedureBuilder_.setMessage(index, builderForValue.build()); + } + return this; } - - // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); + /** + * repeated .hbase.pb.Procedure procedure = 1; + */ + public Builder addProcedure(org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure value) { + if (procedureBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureProcedureIsMutable(); + procedure_.add(value); + onChanged(); + } else { + procedureBuilder_.addMessage(value); + } + return this; } - - private Builder( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); + /** + * repeated .hbase.pb.Procedure procedure = 1; + */ + public Builder addProcedure( + int index, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure value) { + if (procedureBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureProcedureIsMutable(); + procedure_.add(index, value); + onChanged(); + } else { + procedureBuilder_.addMessage(index, value); + } + return this; } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + /** + * repeated .hbase.pb.Procedure procedure = 1; + */ + public Builder addProcedure( + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure.Builder builderForValue) { + if (procedureBuilder_ == null) { + ensureProcedureIsMutable(); + procedure_.add(builderForValue.build()); + onChanged(); + } else { + procedureBuilder_.addMessage(builderForValue.build()); } + return this; } - private static Builder create() { - return new Builder(); + /** + * repeated .hbase.pb.Procedure procedure = 1; + */ + public Builder addProcedure( + int index, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure.Builder builderForValue) { + if (procedureBuilder_ == null) { + ensureProcedureIsMutable(); + procedure_.add(index, builderForValue.build()); + onChanged(); + } else { + procedureBuilder_.addMessage(index, builderForValue.build()); + } + return this; } - - public Builder clear() { - super.clear(); + /** + * repeated .hbase.pb.Procedure procedure = 1; + */ + public Builder addAllProcedure( + java.lang.Iterable values) { + if (procedureBuilder_ == null) { + ensureProcedureIsMutable(); + super.addAll(values, procedure_); + onChanged(); + } else { + procedureBuilder_.addAllMessages(values); + } return this; } - - public Builder clone() { - return create().mergeFrom(buildPartial()); + /** + * repeated .hbase.pb.Procedure procedure = 1; + */ + public Builder clearProcedure() { + if (procedureBuilder_ == null) { + procedure_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + procedureBuilder_.clear(); + } + return this; } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ListProceduresRequest_descriptor; + /** + * repeated .hbase.pb.Procedure procedure = 1; + */ + public Builder removeProcedure(int index) { + if (procedureBuilder_ == null) { + ensureProcedureIsMutable(); + procedure_.remove(index); + onChanged(); + } else { + procedureBuilder_.remove(index); + } + return this; } - - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest.getDefaultInstance(); + /** + * repeated .hbase.pb.Procedure procedure = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure.Builder getProcedureBuilder( + int index) { + return getProcedureFieldBuilder().getBuilder(index); } - - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest build() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); + /** + * repeated .hbase.pb.Procedure procedure = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureOrBuilder getProcedureOrBuilder( + int index) { + if (procedureBuilder_ == null) { + return procedure_.get(index); } else { + return procedureBuilder_.getMessageOrBuilder(index); } - return result; - } - - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest(this); - onBuilt(); - return result; } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest)other); + /** + * repeated .hbase.pb.Procedure procedure = 1; + */ + public java.util.List + getProcedureOrBuilderList() { + if (procedureBuilder_ != null) { + return procedureBuilder_.getMessageOrBuilderList(); } else { - super.mergeFrom(other); - return this; + return java.util.Collections.unmodifiableList(procedure_); } } - - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest.getDefaultInstance()) return this; - this.mergeUnknownFields(other.getUnknownFields()); - return this; + /** + * repeated .hbase.pb.Procedure procedure = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure.Builder addProcedureBuilder() { + return getProcedureFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure.getDefaultInstance()); } - - public final boolean isInitialized() { - return true; + /** + * repeated .hbase.pb.Procedure procedure = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure.Builder addProcedureBuilder( + int index) { + return getProcedureFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure.getDefaultInstance()); } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest) e.getUnfinishedMessage(); - throw e; - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } + /** + * repeated .hbase.pb.Procedure procedure = 1; + */ + public java.util.List + getProcedureBuilderList() { + return getProcedureFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure.Builder, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureOrBuilder> + getProcedureFieldBuilder() { + if (procedureBuilder_ == null) { + procedureBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure.Builder, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureOrBuilder>( + procedure_, + ((bitField0_ & 0x00000001) == 0x00000001), + getParentForChildren(), + isClean()); + procedure_ = null; } - return this; + return procedureBuilder_; } - // @@protoc_insertion_point(builder_scope:hbase.pb.ListProceduresRequest) + // @@protoc_insertion_point(builder_scope:hbase.pb.ListProceduresResponse) + } + + static { + defaultInstance = new ListProceduresResponse(true); + defaultInstance.initFields(); } - static { - defaultInstance = new ListProceduresRequest(true); - defaultInstance.initFields(); - } + // @@protoc_insertion_point(class_scope:hbase.pb.ListProceduresResponse) + } + + public interface SetQuotaRequestOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // optional string user_name = 1; + /** + * optional string user_name = 1; + */ + boolean hasUserName(); + /** + * optional string user_name = 1; + */ + java.lang.String getUserName(); + /** + * optional string user_name = 1; + */ + com.google.protobuf.ByteString + getUserNameBytes(); + + // optional string user_group = 2; + /** + * optional string user_group = 2; + */ + boolean hasUserGroup(); + /** + * optional string user_group = 2; + */ + java.lang.String getUserGroup(); + /** + * optional string user_group = 2; + */ + com.google.protobuf.ByteString + getUserGroupBytes(); + + // optional string namespace = 3; + /** + * optional string namespace = 3; + */ + boolean hasNamespace(); + /** + * optional string namespace = 3; + */ + java.lang.String getNamespace(); + /** + * optional string namespace = 3; + */ + com.google.protobuf.ByteString + getNamespaceBytes(); - // @@protoc_insertion_point(class_scope:hbase.pb.ListProceduresRequest) - } + // optional .hbase.pb.TableName table_name = 4; + /** + * optional .hbase.pb.TableName table_name = 4; + */ + boolean hasTableName(); + /** + * optional .hbase.pb.TableName table_name = 4; + */ + org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName getTableName(); + /** + * optional .hbase.pb.TableName table_name = 4; + */ + org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder getTableNameOrBuilder(); - public interface ListProceduresResponseOrBuilder - extends com.google.protobuf.MessageOrBuilder { + // optional bool remove_all = 5; + /** + * optional bool remove_all = 5; + */ + boolean hasRemoveAll(); + /** + * optional bool remove_all = 5; + */ + boolean getRemoveAll(); - // repeated .hbase.pb.Procedure procedure = 1; + // optional bool bypass_globals = 6; /** - * repeated .hbase.pb.Procedure procedure = 1; + * optional bool bypass_globals = 6; */ - java.util.List - getProcedureList(); + boolean hasBypassGlobals(); /** - * repeated .hbase.pb.Procedure procedure = 1; + * optional bool bypass_globals = 6; */ - org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure getProcedure(int index); + boolean getBypassGlobals(); + + // optional .hbase.pb.ThrottleRequest throttle = 7; /** - * repeated .hbase.pb.Procedure procedure = 1; + * optional .hbase.pb.ThrottleRequest throttle = 7; */ - int getProcedureCount(); + boolean hasThrottle(); /** - * repeated .hbase.pb.Procedure procedure = 1; + * optional .hbase.pb.ThrottleRequest throttle = 7; */ - java.util.List - getProcedureOrBuilderList(); + org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest getThrottle(); /** - * repeated .hbase.pb.Procedure procedure = 1; + * optional .hbase.pb.ThrottleRequest throttle = 7; */ - org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureOrBuilder getProcedureOrBuilder( - int index); + org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequestOrBuilder getThrottleOrBuilder(); } /** - * Protobuf type {@code hbase.pb.ListProceduresResponse} + * Protobuf type {@code hbase.pb.SetQuotaRequest} */ - public static final class ListProceduresResponse extends + public static final class SetQuotaRequest extends com.google.protobuf.GeneratedMessage - implements ListProceduresResponseOrBuilder { - // Use ListProceduresResponse.newBuilder() to construct. - private ListProceduresResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + implements SetQuotaRequestOrBuilder { + // Use SetQuotaRequest.newBuilder() to construct. + private SetQuotaRequest(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } - private ListProceduresResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + private SetQuotaRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - private static final ListProceduresResponse defaultInstance; - public static ListProceduresResponse getDefaultInstance() { + private static final SetQuotaRequest defaultInstance; + public static SetQuotaRequest getDefaultInstance() { return defaultInstance; } - public ListProceduresResponse getDefaultInstanceForType() { + public SetQuotaRequest getDefaultInstanceForType() { return defaultInstance; } @@ -57643,7 +57310,7 @@ public ListProceduresResponse getDefaultInstanceForType() { getUnknownFields() { return this.unknownFields; } - private ListProceduresResponse( + private SetQuotaRequest( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { @@ -57667,11 +57334,54 @@ private ListProceduresResponse( break; } case 10: { - if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { - procedure_ = new java.util.ArrayList(); - mutable_bitField0_ |= 0x00000001; + bitField0_ |= 0x00000001; + userName_ = input.readBytes(); + break; + } + case 18: { + bitField0_ |= 0x00000002; + userGroup_ = input.readBytes(); + break; + } + case 26: { + bitField0_ |= 0x00000004; + namespace_ = input.readBytes(); + break; + } + case 34: { + org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder subBuilder = null; + if (((bitField0_ & 0x00000008) == 0x00000008)) { + subBuilder = tableName_.toBuilder(); } - procedure_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure.PARSER, extensionRegistry)); + tableName_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(tableName_); + tableName_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000008; + break; + } + case 40: { + bitField0_ |= 0x00000010; + removeAll_ = input.readBool(); + break; + } + case 48: { + bitField0_ |= 0x00000020; + bypassGlobals_ = input.readBool(); + break; + } + case 58: { + org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest.Builder subBuilder = null; + if (((bitField0_ & 0x00000040) == 0x00000040)) { + subBuilder = throttle_.toBuilder(); + } + throttle_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(throttle_); + throttle_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000040; break; } } @@ -57682,86 +57392,265 @@ private ListProceduresResponse( throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { - if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { - procedure_ = java.util.Collections.unmodifiableList(procedure_); + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SetQuotaRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SetQuotaRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public SetQuotaRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new SetQuotaRequest(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // optional string user_name = 1; + public static final int USER_NAME_FIELD_NUMBER = 1; + private java.lang.Object userName_; + /** + * optional string user_name = 1; + */ + public boolean hasUserName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional string user_name = 1; + */ + public java.lang.String getUserName() { + java.lang.Object ref = userName_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + userName_ = s; + } + return s; + } + } + /** + * optional string user_name = 1; + */ + public com.google.protobuf.ByteString + getUserNameBytes() { + java.lang.Object ref = userName_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + userName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // optional string user_group = 2; + public static final int USER_GROUP_FIELD_NUMBER = 2; + private java.lang.Object userGroup_; + /** + * optional string user_group = 2; + */ + public boolean hasUserGroup() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional string user_group = 2; + */ + public java.lang.String getUserGroup() { + java.lang.Object ref = userGroup_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + userGroup_ = s; + } + return s; + } + } + /** + * optional string user_group = 2; + */ + public com.google.protobuf.ByteString + getUserGroupBytes() { + java.lang.Object ref = userGroup_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + userGroup_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // optional string namespace = 3; + public static final int NAMESPACE_FIELD_NUMBER = 3; + private java.lang.Object namespace_; + /** + * optional string namespace = 3; + */ + public boolean hasNamespace() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional string namespace = 3; + */ + public java.lang.String getNamespace() { + java.lang.Object ref = namespace_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + namespace_ = s; } - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); + return s; } } - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ListProceduresResponse_descriptor; + /** + * optional string namespace = 3; + */ + public com.google.protobuf.ByteString + getNamespaceBytes() { + java.lang.Object ref = namespace_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + namespace_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } } - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ListProceduresResponse_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse.Builder.class); + // optional .hbase.pb.TableName table_name = 4; + public static final int TABLE_NAME_FIELD_NUMBER = 4; + private org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName tableName_; + /** + * optional .hbase.pb.TableName table_name = 4; + */ + public boolean hasTableName() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * optional .hbase.pb.TableName table_name = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName getTableName() { + return tableName_; + } + /** + * optional .hbase.pb.TableName table_name = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder getTableNameOrBuilder() { + return tableName_; } - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public ListProceduresResponse parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new ListProceduresResponse(input, extensionRegistry); - } - }; - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; + // optional bool remove_all = 5; + public static final int REMOVE_ALL_FIELD_NUMBER = 5; + private boolean removeAll_; + /** + * optional bool remove_all = 5; + */ + public boolean hasRemoveAll() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + /** + * optional bool remove_all = 5; + */ + public boolean getRemoveAll() { + return removeAll_; } - // repeated .hbase.pb.Procedure procedure = 1; - public static final int PROCEDURE_FIELD_NUMBER = 1; - private java.util.List procedure_; + // optional bool bypass_globals = 6; + public static final int BYPASS_GLOBALS_FIELD_NUMBER = 6; + private boolean bypassGlobals_; /** - * repeated .hbase.pb.Procedure procedure = 1; + * optional bool bypass_globals = 6; */ - public java.util.List getProcedureList() { - return procedure_; + public boolean hasBypassGlobals() { + return ((bitField0_ & 0x00000020) == 0x00000020); } /** - * repeated .hbase.pb.Procedure procedure = 1; + * optional bool bypass_globals = 6; */ - public java.util.List - getProcedureOrBuilderList() { - return procedure_; + public boolean getBypassGlobals() { + return bypassGlobals_; } + + // optional .hbase.pb.ThrottleRequest throttle = 7; + public static final int THROTTLE_FIELD_NUMBER = 7; + private org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest throttle_; /** - * repeated .hbase.pb.Procedure procedure = 1; + * optional .hbase.pb.ThrottleRequest throttle = 7; */ - public int getProcedureCount() { - return procedure_.size(); + public boolean hasThrottle() { + return ((bitField0_ & 0x00000040) == 0x00000040); } /** - * repeated .hbase.pb.Procedure procedure = 1; + * optional .hbase.pb.ThrottleRequest throttle = 7; */ - public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure getProcedure(int index) { - return procedure_.get(index); + public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest getThrottle() { + return throttle_; } /** - * repeated .hbase.pb.Procedure procedure = 1; + * optional .hbase.pb.ThrottleRequest throttle = 7; */ - public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureOrBuilder getProcedureOrBuilder( - int index) { - return procedure_.get(index); + public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequestOrBuilder getThrottleOrBuilder() { + return throttle_; } private void initFields() { - procedure_ = java.util.Collections.emptyList(); + userName_ = ""; + userGroup_ = ""; + namespace_ = ""; + tableName_ = org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance(); + removeAll_ = false; + bypassGlobals_ = false; + throttle_ = org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest.getDefaultInstance(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; - for (int i = 0; i < getProcedureCount(); i++) { - if (!getProcedure(i).isInitialized()) { + if (hasTableName()) { + if (!getTableName().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + if (hasThrottle()) { + if (!getThrottle().isInitialized()) { memoizedIsInitialized = 0; return false; } @@ -57773,8 +57662,26 @@ public final boolean isInitialized() { public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); - for (int i = 0; i < procedure_.size(); i++) { - output.writeMessage(1, procedure_.get(i)); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getUserNameBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeBytes(2, getUserGroupBytes()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeBytes(3, getNamespaceBytes()); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + output.writeMessage(4, tableName_); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + output.writeBool(5, removeAll_); + } + if (((bitField0_ & 0x00000020) == 0x00000020)) { + output.writeBool(6, bypassGlobals_); + } + if (((bitField0_ & 0x00000040) == 0x00000040)) { + output.writeMessage(7, throttle_); } getUnknownFields().writeTo(output); } @@ -57785,9 +57692,33 @@ public int getSerializedSize() { if (size != -1) return size; size = 0; - for (int i = 0; i < procedure_.size(); i++) { + if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, procedure_.get(i)); + .computeBytesSize(1, getUserNameBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(2, getUserGroupBytes()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(3, getNamespaceBytes()); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(4, tableName_); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(5, removeAll_); + } + if (((bitField0_ & 0x00000020) == 0x00000020)) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(6, bypassGlobals_); + } + if (((bitField0_ & 0x00000040) == 0x00000040)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(7, throttle_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; @@ -57806,14 +57737,47 @@ public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse)) { + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest)) { return super.equals(obj); } - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse) obj; - - boolean result = true; - result = result && getProcedureList() - .equals(other.getProcedureList()); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest) obj; + + boolean result = true; + result = result && (hasUserName() == other.hasUserName()); + if (hasUserName()) { + result = result && getUserName() + .equals(other.getUserName()); + } + result = result && (hasUserGroup() == other.hasUserGroup()); + if (hasUserGroup()) { + result = result && getUserGroup() + .equals(other.getUserGroup()); + } + result = result && (hasNamespace() == other.hasNamespace()); + if (hasNamespace()) { + result = result && getNamespace() + .equals(other.getNamespace()); + } + result = result && (hasTableName() == other.hasTableName()); + if (hasTableName()) { + result = result && getTableName() + .equals(other.getTableName()); + } + result = result && (hasRemoveAll() == other.hasRemoveAll()); + if (hasRemoveAll()) { + result = result && (getRemoveAll() + == other.getRemoveAll()); + } + result = result && (hasBypassGlobals() == other.hasBypassGlobals()); + if (hasBypassGlobals()) { + result = result && (getBypassGlobals() + == other.getBypassGlobals()); + } + result = result && (hasThrottle() == other.hasThrottle()); + if (hasThrottle()) { + result = result && getThrottle() + .equals(other.getThrottle()); + } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -57827,62 +57791,86 @@ public int hashCode() { } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); - if (getProcedureCount() > 0) { - hash = (37 * hash) + PROCEDURE_FIELD_NUMBER; - hash = (53 * hash) + getProcedureList().hashCode(); + if (hasUserName()) { + hash = (37 * hash) + USER_NAME_FIELD_NUMBER; + hash = (53 * hash) + getUserName().hashCode(); + } + if (hasUserGroup()) { + hash = (37 * hash) + USER_GROUP_FIELD_NUMBER; + hash = (53 * hash) + getUserGroup().hashCode(); + } + if (hasNamespace()) { + hash = (37 * hash) + NAMESPACE_FIELD_NUMBER; + hash = (53 * hash) + getNamespace().hashCode(); + } + if (hasTableName()) { + hash = (37 * hash) + TABLE_NAME_FIELD_NUMBER; + hash = (53 * hash) + getTableName().hashCode(); + } + if (hasRemoveAll()) { + hash = (37 * hash) + REMOVE_ALL_FIELD_NUMBER; + hash = (53 * hash) + hashBoolean(getRemoveAll()); + } + if (hasBypassGlobals()) { + hash = (37 * hash) + BYPASS_GLOBALS_FIELD_NUMBER; + hash = (53 * hash) + hashBoolean(getBypassGlobals()); + } + if (hasThrottle()) { + hash = (37 * hash) + THROTTLE_FIELD_NUMBER; + hash = (53 * hash) + getThrottle().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse parseFrom(byte[] data) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse parseFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse parseDelimitedFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse parseDelimitedFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -57891,7 +57879,7 @@ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProced public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse prototype) { + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @@ -57903,526 +57891,768 @@ protected Builder newBuilderForType( return builder; } /** - * Protobuf type {@code hbase.pb.ListProceduresResponse} + * Protobuf type {@code hbase.pb.SetQuotaRequest} */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponseOrBuilder { + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequestOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ListProceduresResponse_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SetQuotaRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SetQuotaRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getTableNameFieldBuilder(); + getThrottleFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + userName_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + userGroup_ = ""; + bitField0_ = (bitField0_ & ~0x00000002); + namespace_ = ""; + bitField0_ = (bitField0_ & ~0x00000004); + if (tableNameBuilder_ == null) { + tableName_ = org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance(); + } else { + tableNameBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000008); + removeAll_ = false; + bitField0_ = (bitField0_ & ~0x00000010); + bypassGlobals_ = false; + bitField0_ = (bitField0_ & ~0x00000020); + if (throttleBuilder_ == null) { + throttle_ = org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest.getDefaultInstance(); + } else { + throttleBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000040); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SetQuotaRequest_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.userName_ = userName_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.userGroup_ = userGroup_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.namespace_ = namespace_; + if (((from_bitField0_ & 0x00000008) == 0x00000008)) { + to_bitField0_ |= 0x00000008; + } + if (tableNameBuilder_ == null) { + result.tableName_ = tableName_; + } else { + result.tableName_ = tableNameBuilder_.build(); + } + if (((from_bitField0_ & 0x00000010) == 0x00000010)) { + to_bitField0_ |= 0x00000010; + } + result.removeAll_ = removeAll_; + if (((from_bitField0_ & 0x00000020) == 0x00000020)) { + to_bitField0_ |= 0x00000020; + } + result.bypassGlobals_ = bypassGlobals_; + if (((from_bitField0_ & 0x00000040) == 0x00000040)) { + to_bitField0_ |= 0x00000040; + } + if (throttleBuilder_ == null) { + result.throttle_ = throttle_; + } else { + result.throttle_ = throttleBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest.getDefaultInstance()) return this; + if (other.hasUserName()) { + bitField0_ |= 0x00000001; + userName_ = other.userName_; + onChanged(); + } + if (other.hasUserGroup()) { + bitField0_ |= 0x00000002; + userGroup_ = other.userGroup_; + onChanged(); + } + if (other.hasNamespace()) { + bitField0_ |= 0x00000004; + namespace_ = other.namespace_; + onChanged(); + } + if (other.hasTableName()) { + mergeTableName(other.getTableName()); + } + if (other.hasRemoveAll()) { + setRemoveAll(other.getRemoveAll()); + } + if (other.hasBypassGlobals()) { + setBypassGlobals(other.getBypassGlobals()); + } + if (other.hasThrottle()) { + mergeThrottle(other.getThrottle()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (hasTableName()) { + if (!getTableName().isInitialized()) { + + return false; + } + } + if (hasThrottle()) { + if (!getThrottle().isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; } + private int bitField0_; - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ListProceduresResponse_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse.Builder.class); + // optional string user_name = 1; + private java.lang.Object userName_ = ""; + /** + * optional string user_name = 1; + */ + public boolean hasUserName() { + return ((bitField0_ & 0x00000001) == 0x00000001); } - - // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); + /** + * optional string user_name = 1; + */ + public java.lang.String getUserName() { + java.lang.Object ref = userName_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + userName_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * optional string user_name = 1; + */ + public com.google.protobuf.ByteString + getUserNameBytes() { + java.lang.Object ref = userName_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + userName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * optional string user_name = 1; + */ + public Builder setUserName( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + userName_ = value; + onChanged(); + return this; + } + /** + * optional string user_name = 1; + */ + public Builder clearUserName() { + bitField0_ = (bitField0_ & ~0x00000001); + userName_ = getDefaultInstance().getUserName(); + onChanged(); + return this; + } + /** + * optional string user_name = 1; + */ + public Builder setUserNameBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + userName_ = value; + onChanged(); + return this; } - private Builder( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); + // optional string user_group = 2; + private java.lang.Object userGroup_ = ""; + /** + * optional string user_group = 2; + */ + public boolean hasUserGroup() { + return ((bitField0_ & 0x00000002) == 0x00000002); } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getProcedureFieldBuilder(); + /** + * optional string user_group = 2; + */ + public java.lang.String getUserGroup() { + java.lang.Object ref = userGroup_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + userGroup_ = s; + return s; + } else { + return (java.lang.String) ref; } } - private static Builder create() { - return new Builder(); + /** + * optional string user_group = 2; + */ + public com.google.protobuf.ByteString + getUserGroupBytes() { + java.lang.Object ref = userGroup_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + userGroup_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * optional string user_group = 2; + */ + public Builder setUserGroup( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + userGroup_ = value; + onChanged(); + return this; + } + /** + * optional string user_group = 2; + */ + public Builder clearUserGroup() { + bitField0_ = (bitField0_ & ~0x00000002); + userGroup_ = getDefaultInstance().getUserGroup(); + onChanged(); + return this; + } + /** + * optional string user_group = 2; + */ + public Builder setUserGroupBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + userGroup_ = value; + onChanged(); + return this; } - public Builder clear() { - super.clear(); - if (procedureBuilder_ == null) { - procedure_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000001); + // optional string namespace = 3; + private java.lang.Object namespace_ = ""; + /** + * optional string namespace = 3; + */ + public boolean hasNamespace() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional string namespace = 3; + */ + public java.lang.String getNamespace() { + java.lang.Object ref = namespace_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + namespace_ = s; + return s; } else { - procedureBuilder_.clear(); + return (java.lang.String) ref; + } + } + /** + * optional string namespace = 3; + */ + public com.google.protobuf.ByteString + getNamespaceBytes() { + java.lang.Object ref = namespace_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + namespace_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; } + } + /** + * optional string namespace = 3; + */ + public Builder setNamespace( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000004; + namespace_ = value; + onChanged(); return this; } - - public Builder clone() { - return create().mergeFrom(buildPartial()); + /** + * optional string namespace = 3; + */ + public Builder clearNamespace() { + bitField0_ = (bitField0_ & ~0x00000004); + namespace_ = getDefaultInstance().getNamespace(); + onChanged(); + return this; } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ListProceduresResponse_descriptor; + /** + * optional string namespace = 3; + */ + public Builder setNamespaceBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000004; + namespace_ = value; + onChanged(); + return this; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse.getDefaultInstance(); + // optional .hbase.pb.TableName table_name = 4; + private org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName tableName_ = org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder> tableNameBuilder_; + /** + * optional .hbase.pb.TableName table_name = 4; + */ + public boolean hasTableName() { + return ((bitField0_ & 0x00000008) == 0x00000008); } - - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse build() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); + /** + * optional .hbase.pb.TableName table_name = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName getTableName() { + if (tableNameBuilder_ == null) { + return tableName_; + } else { + return tableNameBuilder_.getMessage(); } - return result; } - - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse(this); - int from_bitField0_ = bitField0_; - if (procedureBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001)) { - procedure_ = java.util.Collections.unmodifiableList(procedure_); - bitField0_ = (bitField0_ & ~0x00000001); + /** + * optional .hbase.pb.TableName table_name = 4; + */ + public Builder setTableName(org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName value) { + if (tableNameBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); } - result.procedure_ = procedure_; + tableName_ = value; + onChanged(); } else { - result.procedure_ = procedureBuilder_.build(); + tableNameBuilder_.setMessage(value); } - onBuilt(); - return result; + bitField0_ |= 0x00000008; + return this; } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse)other); + /** + * optional .hbase.pb.TableName table_name = 4; + */ + public Builder setTableName( + org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder builderForValue) { + if (tableNameBuilder_ == null) { + tableName_ = builderForValue.build(); + onChanged(); } else { - super.mergeFrom(other); - return this; + tableNameBuilder_.setMessage(builderForValue.build()); } + bitField0_ |= 0x00000008; + return this; } - - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse.getDefaultInstance()) return this; - if (procedureBuilder_ == null) { - if (!other.procedure_.isEmpty()) { - if (procedure_.isEmpty()) { - procedure_ = other.procedure_; - bitField0_ = (bitField0_ & ~0x00000001); - } else { - ensureProcedureIsMutable(); - procedure_.addAll(other.procedure_); - } - onChanged(); + /** + * optional .hbase.pb.TableName table_name = 4; + */ + public Builder mergeTableName(org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName value) { + if (tableNameBuilder_ == null) { + if (((bitField0_ & 0x00000008) == 0x00000008) && + tableName_ != org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance()) { + tableName_ = + org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.newBuilder(tableName_).mergeFrom(value).buildPartial(); + } else { + tableName_ = value; } + onChanged(); } else { - if (!other.procedure_.isEmpty()) { - if (procedureBuilder_.isEmpty()) { - procedureBuilder_.dispose(); - procedureBuilder_ = null; - procedure_ = other.procedure_; - bitField0_ = (bitField0_ & ~0x00000001); - procedureBuilder_ = - com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? - getProcedureFieldBuilder() : null; - } else { - procedureBuilder_.addAllMessages(other.procedure_); - } - } + tableNameBuilder_.mergeFrom(value); } - this.mergeUnknownFields(other.getUnknownFields()); + bitField0_ |= 0x00000008; return this; } - - public final boolean isInitialized() { - for (int i = 0; i < getProcedureCount(); i++) { - if (!getProcedure(i).isInitialized()) { - - return false; - } - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse) e.getUnfinishedMessage(); - throw e; - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } + /** + * optional .hbase.pb.TableName table_name = 4; + */ + public Builder clearTableName() { + if (tableNameBuilder_ == null) { + tableName_ = org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance(); + onChanged(); + } else { + tableNameBuilder_.clear(); } + bitField0_ = (bitField0_ & ~0x00000008); return this; } - private int bitField0_; - - // repeated .hbase.pb.Procedure procedure = 1; - private java.util.List procedure_ = - java.util.Collections.emptyList(); - private void ensureProcedureIsMutable() { - if (!((bitField0_ & 0x00000001) == 0x00000001)) { - procedure_ = new java.util.ArrayList(procedure_); - bitField0_ |= 0x00000001; - } + /** + * optional .hbase.pb.TableName table_name = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder getTableNameBuilder() { + bitField0_ |= 0x00000008; + onChanged(); + return getTableNameFieldBuilder().getBuilder(); } - - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure.Builder, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureOrBuilder> procedureBuilder_; - /** - * repeated .hbase.pb.Procedure procedure = 1; + * optional .hbase.pb.TableName table_name = 4; */ - public java.util.List getProcedureList() { - if (procedureBuilder_ == null) { - return java.util.Collections.unmodifiableList(procedure_); + public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder getTableNameOrBuilder() { + if (tableNameBuilder_ != null) { + return tableNameBuilder_.getMessageOrBuilder(); } else { - return procedureBuilder_.getMessageList(); + return tableName_; } } /** - * repeated .hbase.pb.Procedure procedure = 1; + * optional .hbase.pb.TableName table_name = 4; */ - public int getProcedureCount() { - if (procedureBuilder_ == null) { - return procedure_.size(); - } else { - return procedureBuilder_.getCount(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder> + getTableNameFieldBuilder() { + if (tableNameBuilder_ == null) { + tableNameBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder>( + tableName_, + getParentForChildren(), + isClean()); + tableName_ = null; } + return tableNameBuilder_; } + + // optional bool remove_all = 5; + private boolean removeAll_ ; /** - * repeated .hbase.pb.Procedure procedure = 1; + * optional bool remove_all = 5; */ - public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure getProcedure(int index) { - if (procedureBuilder_ == null) { - return procedure_.get(index); - } else { - return procedureBuilder_.getMessage(index); - } + public boolean hasRemoveAll() { + return ((bitField0_ & 0x00000010) == 0x00000010); } /** - * repeated .hbase.pb.Procedure procedure = 1; + * optional bool remove_all = 5; */ - public Builder setProcedure( - int index, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure value) { - if (procedureBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureProcedureIsMutable(); - procedure_.set(index, value); - onChanged(); - } else { - procedureBuilder_.setMessage(index, value); - } + public boolean getRemoveAll() { + return removeAll_; + } + /** + * optional bool remove_all = 5; + */ + public Builder setRemoveAll(boolean value) { + bitField0_ |= 0x00000010; + removeAll_ = value; + onChanged(); return this; } /** - * repeated .hbase.pb.Procedure procedure = 1; + * optional bool remove_all = 5; */ - public Builder setProcedure( - int index, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure.Builder builderForValue) { - if (procedureBuilder_ == null) { - ensureProcedureIsMutable(); - procedure_.set(index, builderForValue.build()); - onChanged(); - } else { - procedureBuilder_.setMessage(index, builderForValue.build()); - } + public Builder clearRemoveAll() { + bitField0_ = (bitField0_ & ~0x00000010); + removeAll_ = false; + onChanged(); + return this; + } + + // optional bool bypass_globals = 6; + private boolean bypassGlobals_ ; + /** + * optional bool bypass_globals = 6; + */ + public boolean hasBypassGlobals() { + return ((bitField0_ & 0x00000020) == 0x00000020); + } + /** + * optional bool bypass_globals = 6; + */ + public boolean getBypassGlobals() { + return bypassGlobals_; + } + /** + * optional bool bypass_globals = 6; + */ + public Builder setBypassGlobals(boolean value) { + bitField0_ |= 0x00000020; + bypassGlobals_ = value; + onChanged(); return this; } /** - * repeated .hbase.pb.Procedure procedure = 1; + * optional bool bypass_globals = 6; */ - public Builder addProcedure(org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure value) { - if (procedureBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureProcedureIsMutable(); - procedure_.add(value); - onChanged(); - } else { - procedureBuilder_.addMessage(value); - } + public Builder clearBypassGlobals() { + bitField0_ = (bitField0_ & ~0x00000020); + bypassGlobals_ = false; + onChanged(); return this; } + + // optional .hbase.pb.ThrottleRequest throttle = 7; + private org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest throttle_ = org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest.Builder, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequestOrBuilder> throttleBuilder_; /** - * repeated .hbase.pb.Procedure procedure = 1; + * optional .hbase.pb.ThrottleRequest throttle = 7; */ - public Builder addProcedure( - int index, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure value) { - if (procedureBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureProcedureIsMutable(); - procedure_.add(index, value); - onChanged(); - } else { - procedureBuilder_.addMessage(index, value); - } - return this; + public boolean hasThrottle() { + return ((bitField0_ & 0x00000040) == 0x00000040); } /** - * repeated .hbase.pb.Procedure procedure = 1; + * optional .hbase.pb.ThrottleRequest throttle = 7; */ - public Builder addProcedure( - org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure.Builder builderForValue) { - if (procedureBuilder_ == null) { - ensureProcedureIsMutable(); - procedure_.add(builderForValue.build()); - onChanged(); + public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest getThrottle() { + if (throttleBuilder_ == null) { + return throttle_; } else { - procedureBuilder_.addMessage(builderForValue.build()); + return throttleBuilder_.getMessage(); } - return this; } /** - * repeated .hbase.pb.Procedure procedure = 1; + * optional .hbase.pb.ThrottleRequest throttle = 7; */ - public Builder addProcedure( - int index, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure.Builder builderForValue) { - if (procedureBuilder_ == null) { - ensureProcedureIsMutable(); - procedure_.add(index, builderForValue.build()); + public Builder setThrottle(org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest value) { + if (throttleBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + throttle_ = value; onChanged(); } else { - procedureBuilder_.addMessage(index, builderForValue.build()); + throttleBuilder_.setMessage(value); } + bitField0_ |= 0x00000040; return this; } /** - * repeated .hbase.pb.Procedure procedure = 1; + * optional .hbase.pb.ThrottleRequest throttle = 7; */ - public Builder addAllProcedure( - java.lang.Iterable values) { - if (procedureBuilder_ == null) { - ensureProcedureIsMutable(); - super.addAll(values, procedure_); + public Builder setThrottle( + org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest.Builder builderForValue) { + if (throttleBuilder_ == null) { + throttle_ = builderForValue.build(); onChanged(); } else { - procedureBuilder_.addAllMessages(values); + throttleBuilder_.setMessage(builderForValue.build()); } + bitField0_ |= 0x00000040; return this; } /** - * repeated .hbase.pb.Procedure procedure = 1; + * optional .hbase.pb.ThrottleRequest throttle = 7; */ - public Builder clearProcedure() { - if (procedureBuilder_ == null) { - procedure_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000001); + public Builder mergeThrottle(org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest value) { + if (throttleBuilder_ == null) { + if (((bitField0_ & 0x00000040) == 0x00000040) && + throttle_ != org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest.getDefaultInstance()) { + throttle_ = + org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest.newBuilder(throttle_).mergeFrom(value).buildPartial(); + } else { + throttle_ = value; + } onChanged(); } else { - procedureBuilder_.clear(); + throttleBuilder_.mergeFrom(value); } + bitField0_ |= 0x00000040; return this; } /** - * repeated .hbase.pb.Procedure procedure = 1; + * optional .hbase.pb.ThrottleRequest throttle = 7; */ - public Builder removeProcedure(int index) { - if (procedureBuilder_ == null) { - ensureProcedureIsMutable(); - procedure_.remove(index); + public Builder clearThrottle() { + if (throttleBuilder_ == null) { + throttle_ = org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest.getDefaultInstance(); onChanged(); } else { - procedureBuilder_.remove(index); + throttleBuilder_.clear(); } + bitField0_ = (bitField0_ & ~0x00000040); return this; } /** - * repeated .hbase.pb.Procedure procedure = 1; - */ - public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure.Builder getProcedureBuilder( - int index) { - return getProcedureFieldBuilder().getBuilder(index); - } - /** - * repeated .hbase.pb.Procedure procedure = 1; + * optional .hbase.pb.ThrottleRequest throttle = 7; */ - public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureOrBuilder getProcedureOrBuilder( - int index) { - if (procedureBuilder_ == null) { - return procedure_.get(index); } else { - return procedureBuilder_.getMessageOrBuilder(index); - } + public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest.Builder getThrottleBuilder() { + bitField0_ |= 0x00000040; + onChanged(); + return getThrottleFieldBuilder().getBuilder(); } /** - * repeated .hbase.pb.Procedure procedure = 1; + * optional .hbase.pb.ThrottleRequest throttle = 7; */ - public java.util.List - getProcedureOrBuilderList() { - if (procedureBuilder_ != null) { - return procedureBuilder_.getMessageOrBuilderList(); + public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequestOrBuilder getThrottleOrBuilder() { + if (throttleBuilder_ != null) { + return throttleBuilder_.getMessageOrBuilder(); } else { - return java.util.Collections.unmodifiableList(procedure_); + return throttle_; } } /** - * repeated .hbase.pb.Procedure procedure = 1; - */ - public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure.Builder addProcedureBuilder() { - return getProcedureFieldBuilder().addBuilder( - org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure.getDefaultInstance()); - } - /** - * repeated .hbase.pb.Procedure procedure = 1; - */ - public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure.Builder addProcedureBuilder( - int index) { - return getProcedureFieldBuilder().addBuilder( - index, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure.getDefaultInstance()); - } - /** - * repeated .hbase.pb.Procedure procedure = 1; + * optional .hbase.pb.ThrottleRequest throttle = 7; */ - public java.util.List - getProcedureBuilderList() { - return getProcedureFieldBuilder().getBuilderList(); - } - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure.Builder, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureOrBuilder> - getProcedureFieldBuilder() { - if (procedureBuilder_ == null) { - procedureBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure.Builder, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureOrBuilder>( - procedure_, - ((bitField0_ & 0x00000001) == 0x00000001), - getParentForChildren(), - isClean()); - procedure_ = null; - } - return procedureBuilder_; - } - - // @@protoc_insertion_point(builder_scope:hbase.pb.ListProceduresResponse) - } - - static { - defaultInstance = new ListProceduresResponse(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:hbase.pb.ListProceduresResponse) - } - - public interface SetQuotaRequestOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // optional string user_name = 1; - /** - * optional string user_name = 1; - */ - boolean hasUserName(); - /** - * optional string user_name = 1; - */ - java.lang.String getUserName(); - /** - * optional string user_name = 1; - */ - com.google.protobuf.ByteString - getUserNameBytes(); - - // optional string user_group = 2; - /** - * optional string user_group = 2; - */ - boolean hasUserGroup(); - /** - * optional string user_group = 2; - */ - java.lang.String getUserGroup(); - /** - * optional string user_group = 2; - */ - com.google.protobuf.ByteString - getUserGroupBytes(); - - // optional string namespace = 3; - /** - * optional string namespace = 3; - */ - boolean hasNamespace(); - /** - * optional string namespace = 3; - */ - java.lang.String getNamespace(); - /** - * optional string namespace = 3; - */ - com.google.protobuf.ByteString - getNamespaceBytes(); - - // optional .hbase.pb.TableName table_name = 4; - /** - * optional .hbase.pb.TableName table_name = 4; - */ - boolean hasTableName(); - /** - * optional .hbase.pb.TableName table_name = 4; - */ - org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName getTableName(); - /** - * optional .hbase.pb.TableName table_name = 4; - */ - org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder getTableNameOrBuilder(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest.Builder, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequestOrBuilder> + getThrottleFieldBuilder() { + if (throttleBuilder_ == null) { + throttleBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest.Builder, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequestOrBuilder>( + throttle_, + getParentForChildren(), + isClean()); + throttle_ = null; + } + return throttleBuilder_; + } - // optional bool remove_all = 5; - /** - * optional bool remove_all = 5; - */ - boolean hasRemoveAll(); - /** - * optional bool remove_all = 5; - */ - boolean getRemoveAll(); + // @@protoc_insertion_point(builder_scope:hbase.pb.SetQuotaRequest) + } - // optional bool bypass_globals = 6; - /** - * optional bool bypass_globals = 6; - */ - boolean hasBypassGlobals(); - /** - * optional bool bypass_globals = 6; - */ - boolean getBypassGlobals(); + static { + defaultInstance = new SetQuotaRequest(true); + defaultInstance.initFields(); + } - // optional .hbase.pb.ThrottleRequest throttle = 7; - /** - * optional .hbase.pb.ThrottleRequest throttle = 7; - */ - boolean hasThrottle(); - /** - * optional .hbase.pb.ThrottleRequest throttle = 7; - */ - org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest getThrottle(); - /** - * optional .hbase.pb.ThrottleRequest throttle = 7; - */ - org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequestOrBuilder getThrottleOrBuilder(); + // @@protoc_insertion_point(class_scope:hbase.pb.SetQuotaRequest) + } + + public interface SetQuotaResponseOrBuilder + extends com.google.protobuf.MessageOrBuilder { } /** - * Protobuf type {@code hbase.pb.SetQuotaRequest} + * Protobuf type {@code hbase.pb.SetQuotaResponse} */ - public static final class SetQuotaRequest extends + public static final class SetQuotaResponse extends com.google.protobuf.GeneratedMessage - implements SetQuotaRequestOrBuilder { - // Use SetQuotaRequest.newBuilder() to construct. - private SetQuotaRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + implements SetQuotaResponseOrBuilder { + // Use SetQuotaResponse.newBuilder() to construct. + private SetQuotaResponse(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } - private SetQuotaRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + private SetQuotaResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - private static final SetQuotaRequest defaultInstance; - public static SetQuotaRequest getDefaultInstance() { + private static final SetQuotaResponse defaultInstance; + public static SetQuotaResponse getDefaultInstance() { return defaultInstance; } - public SetQuotaRequest getDefaultInstanceForType() { + public SetQuotaResponse getDefaultInstanceForType() { return defaultInstance; } @@ -58432,12 +58662,11 @@ public SetQuotaRequest getDefaultInstanceForType() { getUnknownFields() { return this.unknownFields; } - private SetQuotaRequest( + private SetQuotaResponse( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); - int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { @@ -58455,57 +58684,6 @@ private SetQuotaRequest( } break; } - case 10: { - bitField0_ |= 0x00000001; - userName_ = input.readBytes(); - break; - } - case 18: { - bitField0_ |= 0x00000002; - userGroup_ = input.readBytes(); - break; - } - case 26: { - bitField0_ |= 0x00000004; - namespace_ = input.readBytes(); - break; - } - case 34: { - org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder subBuilder = null; - if (((bitField0_ & 0x00000008) == 0x00000008)) { - subBuilder = tableName_.toBuilder(); - } - tableName_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.PARSER, extensionRegistry); - if (subBuilder != null) { - subBuilder.mergeFrom(tableName_); - tableName_ = subBuilder.buildPartial(); - } - bitField0_ |= 0x00000008; - break; - } - case 40: { - bitField0_ |= 0x00000010; - removeAll_ = input.readBool(); - break; - } - case 48: { - bitField0_ |= 0x00000020; - bypassGlobals_ = input.readBool(); - break; - } - case 58: { - org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest.Builder subBuilder = null; - if (((bitField0_ & 0x00000040) == 0x00000040)) { - subBuilder = throttle_.toBuilder(); - } - throttle_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest.PARSER, extensionRegistry); - if (subBuilder != null) { - subBuilder.mergeFrom(throttle_); - throttle_ = subBuilder.buildPartial(); - } - bitField0_ |= 0x00000040; - break; - } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { @@ -58518,264 +58696,437 @@ private SetQuotaRequest( makeExtensionsImmutable(); } } - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SetQuotaRequest_descriptor; + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SetQuotaResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SetQuotaResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public SetQuotaResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new SetQuotaResponse(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private void initFields() { + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse) obj; + + boolean result = true; + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); } - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SetQuotaRequest_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest.Builder.class); + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse prototype) { + return newBuilder().mergeFrom(prototype); } - - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public SetQuotaRequest parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new SetQuotaRequest(input, extensionRegistry); - } - }; + public Builder toBuilder() { return newBuilder(this); } @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - private int bitField0_; - // optional string user_name = 1; - public static final int USER_NAME_FIELD_NUMBER = 1; - private java.lang.Object userName_; - /** - * optional string user_name = 1; - */ - public boolean hasUserName() { - return ((bitField0_ & 0x00000001) == 0x00000001); + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; } /** - * optional string user_name = 1; + * Protobuf type {@code hbase.pb.SetQuotaResponse} */ - public java.lang.String getUserName() { - java.lang.Object ref = userName_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - userName_ = s; + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SetQuotaResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SetQuotaResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } - return s; } - } - /** - * optional string user_name = 1; - */ - public com.google.protobuf.ByteString - getUserNameBytes() { - java.lang.Object ref = userName_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - userName_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; + private static Builder create() { + return new Builder(); } - } - // optional string user_group = 2; - public static final int USER_GROUP_FIELD_NUMBER = 2; - private java.lang.Object userGroup_; - /** - * optional string user_group = 2; - */ - public boolean hasUserGroup() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * optional string user_group = 2; - */ - public java.lang.String getUserGroup() { - java.lang.Object ref = userGroup_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - userGroup_ = s; + public Builder clear() { + super.clear(); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SetQuotaResponse_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); } - return s; + return result; } - } - /** - * optional string user_group = 2; - */ - public com.google.protobuf.ByteString - getUserGroupBytes() { - java.lang.Object ref = userGroup_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - userGroup_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse(this); + onBuilt(); + return result; } - } - // optional string namespace = 3; - public static final int NAMESPACE_FIELD_NUMBER = 3; - private java.lang.Object namespace_; - /** - * optional string namespace = 3; - */ - public boolean hasNamespace() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - /** - * optional string namespace = 3; - */ - public java.lang.String getNamespace() { - java.lang.Object ref = namespace_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - namespace_ = s; + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse)other); + } else { + super.mergeFrom(other); + return this; } - return s; } - } - /** - * optional string namespace = 3; - */ - public com.google.protobuf.ByteString - getNamespaceBytes() { - java.lang.Object ref = namespace_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - namespace_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse.getDefaultInstance()) return this; + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; } + + // @@protoc_insertion_point(builder_scope:hbase.pb.SetQuotaResponse) + } + + static { + defaultInstance = new SetQuotaResponse(true); + defaultInstance.initFields(); } - // optional .hbase.pb.TableName table_name = 4; - public static final int TABLE_NAME_FIELD_NUMBER = 4; - private org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName tableName_; + // @@protoc_insertion_point(class_scope:hbase.pb.SetQuotaResponse) + } + + public interface MajorCompactionTimestampRequestOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .hbase.pb.TableName table_name = 1; /** - * optional .hbase.pb.TableName table_name = 4; + * required .hbase.pb.TableName table_name = 1; */ - public boolean hasTableName() { - return ((bitField0_ & 0x00000008) == 0x00000008); - } + boolean hasTableName(); /** - * optional .hbase.pb.TableName table_name = 4; + * required .hbase.pb.TableName table_name = 1; */ - public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName getTableName() { - return tableName_; - } + org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName getTableName(); /** - * optional .hbase.pb.TableName table_name = 4; + * required .hbase.pb.TableName table_name = 1; */ - public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder getTableNameOrBuilder() { - return tableName_; + org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder getTableNameOrBuilder(); + } + /** + * Protobuf type {@code hbase.pb.MajorCompactionTimestampRequest} + */ + public static final class MajorCompactionTimestampRequest extends + com.google.protobuf.GeneratedMessage + implements MajorCompactionTimestampRequestOrBuilder { + // Use MajorCompactionTimestampRequest.newBuilder() to construct. + private MajorCompactionTimestampRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); } + private MajorCompactionTimestampRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - // optional bool remove_all = 5; - public static final int REMOVE_ALL_FIELD_NUMBER = 5; - private boolean removeAll_; - /** - * optional bool remove_all = 5; - */ - public boolean hasRemoveAll() { - return ((bitField0_ & 0x00000010) == 0x00000010); + private static final MajorCompactionTimestampRequest defaultInstance; + public static MajorCompactionTimestampRequest getDefaultInstance() { + return defaultInstance; } - /** - * optional bool remove_all = 5; - */ - public boolean getRemoveAll() { - return removeAll_; + + public MajorCompactionTimestampRequest getDefaultInstanceForType() { + return defaultInstance; } - // optional bool bypass_globals = 6; - public static final int BYPASS_GLOBALS_FIELD_NUMBER = 6; - private boolean bypassGlobals_; - /** - * optional bool bypass_globals = 6; - */ - public boolean hasBypassGlobals() { - return ((bitField0_ & 0x00000020) == 0x00000020); + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; } - /** - * optional bool bypass_globals = 6; - */ - public boolean getBypassGlobals() { - return bypassGlobals_; + private MajorCompactionTimestampRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = tableName_.toBuilder(); + } + tableName_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(tableName_); + tableName_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_MajorCompactionTimestampRequest_descriptor; } - // optional .hbase.pb.ThrottleRequest throttle = 7; - public static final int THROTTLE_FIELD_NUMBER = 7; - private org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest throttle_; + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_MajorCompactionTimestampRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public MajorCompactionTimestampRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new MajorCompactionTimestampRequest(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required .hbase.pb.TableName table_name = 1; + public static final int TABLE_NAME_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName tableName_; /** - * optional .hbase.pb.ThrottleRequest throttle = 7; + * required .hbase.pb.TableName table_name = 1; */ - public boolean hasThrottle() { - return ((bitField0_ & 0x00000040) == 0x00000040); + public boolean hasTableName() { + return ((bitField0_ & 0x00000001) == 0x00000001); } /** - * optional .hbase.pb.ThrottleRequest throttle = 7; + * required .hbase.pb.TableName table_name = 1; */ - public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest getThrottle() { - return throttle_; + public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName getTableName() { + return tableName_; } /** - * optional .hbase.pb.ThrottleRequest throttle = 7; + * required .hbase.pb.TableName table_name = 1; */ - public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequestOrBuilder getThrottleOrBuilder() { - return throttle_; + public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder getTableNameOrBuilder() { + return tableName_; } private void initFields() { - userName_ = ""; - userGroup_ = ""; - namespace_ = ""; tableName_ = org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance(); - removeAll_ = false; - bypassGlobals_ = false; - throttle_ = org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest.getDefaultInstance(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; - if (hasTableName()) { - if (!getTableName().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } + if (!hasTableName()) { + memoizedIsInitialized = 0; + return false; } - if (hasThrottle()) { - if (!getThrottle().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } + if (!getTableName().isInitialized()) { + memoizedIsInitialized = 0; + return false; } memoizedIsInitialized = 1; return true; @@ -58785,25 +59136,7 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBytes(1, getUserNameBytes()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeBytes(2, getUserGroupBytes()); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - output.writeBytes(3, getNamespaceBytes()); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - output.writeMessage(4, tableName_); - } - if (((bitField0_ & 0x00000010) == 0x00000010)) { - output.writeBool(5, removeAll_); - } - if (((bitField0_ & 0x00000020) == 0x00000020)) { - output.writeBool(6, bypassGlobals_); - } - if (((bitField0_ & 0x00000040) == 0x00000040)) { - output.writeMessage(7, throttle_); + output.writeMessage(1, tableName_); } getUnknownFields().writeTo(output); } @@ -58816,31 +59149,7 @@ public int getSerializedSize() { size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream - .computeBytesSize(1, getUserNameBytes()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(2, getUserGroupBytes()); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(3, getNamespaceBytes()); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(4, tableName_); - } - if (((bitField0_ & 0x00000010) == 0x00000010)) { - size += com.google.protobuf.CodedOutputStream - .computeBoolSize(5, removeAll_); - } - if (((bitField0_ & 0x00000020) == 0x00000020)) { - size += com.google.protobuf.CodedOutputStream - .computeBoolSize(6, bypassGlobals_); - } - if (((bitField0_ & 0x00000040) == 0x00000040)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(7, throttle_); + .computeMessageSize(1, tableName_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; @@ -58855,51 +59164,21 @@ protected java.lang.Object writeReplace() } @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest)) { - return super.equals(obj); - } - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest) obj; - - boolean result = true; - result = result && (hasUserName() == other.hasUserName()); - if (hasUserName()) { - result = result && getUserName() - .equals(other.getUserName()); - } - result = result && (hasUserGroup() == other.hasUserGroup()); - if (hasUserGroup()) { - result = result && getUserGroup() - .equals(other.getUserGroup()); + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; } - result = result && (hasNamespace() == other.hasNamespace()); - if (hasNamespace()) { - result = result && getNamespace() - .equals(other.getNamespace()); + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest)) { + return super.equals(obj); } + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest) obj; + + boolean result = true; result = result && (hasTableName() == other.hasTableName()); if (hasTableName()) { result = result && getTableName() .equals(other.getTableName()); } - result = result && (hasRemoveAll() == other.hasRemoveAll()); - if (hasRemoveAll()) { - result = result && (getRemoveAll() - == other.getRemoveAll()); - } - result = result && (hasBypassGlobals() == other.hasBypassGlobals()); - if (hasBypassGlobals()) { - result = result && (getBypassGlobals() - == other.getBypassGlobals()); - } - result = result && (hasThrottle() == other.hasThrottle()); - if (hasThrottle()) { - result = result && getThrottle() - .equals(other.getThrottle()); - } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -58913,86 +59192,62 @@ public int hashCode() { } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasUserName()) { - hash = (37 * hash) + USER_NAME_FIELD_NUMBER; - hash = (53 * hash) + getUserName().hashCode(); - } - if (hasUserGroup()) { - hash = (37 * hash) + USER_GROUP_FIELD_NUMBER; - hash = (53 * hash) + getUserGroup().hashCode(); - } - if (hasNamespace()) { - hash = (37 * hash) + NAMESPACE_FIELD_NUMBER; - hash = (53 * hash) + getNamespace().hashCode(); - } if (hasTableName()) { hash = (37 * hash) + TABLE_NAME_FIELD_NUMBER; hash = (53 * hash) + getTableName().hashCode(); } - if (hasRemoveAll()) { - hash = (37 * hash) + REMOVE_ALL_FIELD_NUMBER; - hash = (53 * hash) + hashBoolean(getRemoveAll()); - } - if (hasBypassGlobals()) { - hash = (37 * hash) + BYPASS_GLOBALS_FIELD_NUMBER; - hash = (53 * hash) + hashBoolean(getBypassGlobals()); - } - if (hasThrottle()) { - hash = (37 * hash) + THROTTLE_FIELD_NUMBER; - hash = (53 * hash) + getThrottle().hashCode(); - } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest parseFrom(byte[] data) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest parseFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest parseDelimitedFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest parseDelimitedFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -59001,7 +59256,7 @@ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRe public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest prototype) { + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @@ -59013,447 +59268,152 @@ protected Builder newBuilderForType( return builder; } /** - * Protobuf type {@code hbase.pb.SetQuotaRequest} + * Protobuf type {@code hbase.pb.MajorCompactionTimestampRequest} */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequestOrBuilder { + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequestOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SetQuotaRequest_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SetQuotaRequest_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest.Builder.class); - } - - // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getTableNameFieldBuilder(); - getThrottleFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - userName_ = ""; - bitField0_ = (bitField0_ & ~0x00000001); - userGroup_ = ""; - bitField0_ = (bitField0_ & ~0x00000002); - namespace_ = ""; - bitField0_ = (bitField0_ & ~0x00000004); - if (tableNameBuilder_ == null) { - tableName_ = org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance(); - } else { - tableNameBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000008); - removeAll_ = false; - bitField0_ = (bitField0_ & ~0x00000010); - bypassGlobals_ = false; - bitField0_ = (bitField0_ & ~0x00000020); - if (throttleBuilder_ == null) { - throttle_ = org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest.getDefaultInstance(); - } else { - throttleBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000040); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SetQuotaRequest_descriptor; - } - - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest.getDefaultInstance(); - } - - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest build() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.userName_ = userName_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.userGroup_ = userGroup_; - if (((from_bitField0_ & 0x00000004) == 0x00000004)) { - to_bitField0_ |= 0x00000004; - } - result.namespace_ = namespace_; - if (((from_bitField0_ & 0x00000008) == 0x00000008)) { - to_bitField0_ |= 0x00000008; - } - if (tableNameBuilder_ == null) { - result.tableName_ = tableName_; - } else { - result.tableName_ = tableNameBuilder_.build(); - } - if (((from_bitField0_ & 0x00000010) == 0x00000010)) { - to_bitField0_ |= 0x00000010; - } - result.removeAll_ = removeAll_; - if (((from_bitField0_ & 0x00000020) == 0x00000020)) { - to_bitField0_ |= 0x00000020; - } - result.bypassGlobals_ = bypassGlobals_; - if (((from_bitField0_ & 0x00000040) == 0x00000040)) { - to_bitField0_ |= 0x00000040; - } - if (throttleBuilder_ == null) { - result.throttle_ = throttle_; - } else { - result.throttle_ = throttleBuilder_.build(); - } - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest.getDefaultInstance()) return this; - if (other.hasUserName()) { - bitField0_ |= 0x00000001; - userName_ = other.userName_; - onChanged(); - } - if (other.hasUserGroup()) { - bitField0_ |= 0x00000002; - userGroup_ = other.userGroup_; - onChanged(); - } - if (other.hasNamespace()) { - bitField0_ |= 0x00000004; - namespace_ = other.namespace_; - onChanged(); - } - if (other.hasTableName()) { - mergeTableName(other.getTableName()); - } - if (other.hasRemoveAll()) { - setRemoveAll(other.getRemoveAll()); - } - if (other.hasBypassGlobals()) { - setBypassGlobals(other.getBypassGlobals()); - } - if (other.hasThrottle()) { - mergeThrottle(other.getThrottle()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (hasTableName()) { - if (!getTableName().isInitialized()) { - - return false; - } - } - if (hasThrottle()) { - if (!getThrottle().isInitialized()) { - - return false; - } - } - return true; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_MajorCompactionTimestampRequest_descriptor; } - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest) e.getUnfinishedMessage(); - throw e; - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_MajorCompactionTimestampRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest.Builder.class); } - private int bitField0_; - // optional string user_name = 1; - private java.lang.Object userName_ = ""; - /** - * optional string user_name = 1; - */ - public boolean hasUserName() { - return ((bitField0_ & 0x00000001) == 0x00000001); + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); } - /** - * optional string user_name = 1; - */ - public java.lang.String getUserName() { - java.lang.Object ref = userName_; - if (!(ref instanceof java.lang.String)) { - java.lang.String s = ((com.google.protobuf.ByteString) ref) - .toStringUtf8(); - userName_ = s; - return s; - } else { - return (java.lang.String) ref; - } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); } - /** - * optional string user_name = 1; - */ - public com.google.protobuf.ByteString - getUserNameBytes() { - java.lang.Object ref = userName_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - userName_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getTableNameFieldBuilder(); } } - /** - * optional string user_name = 1; - */ - public Builder setUserName( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - userName_ = value; - onChanged(); - return this; + private static Builder create() { + return new Builder(); } - /** - * optional string user_name = 1; - */ - public Builder clearUserName() { + + public Builder clear() { + super.clear(); + if (tableNameBuilder_ == null) { + tableName_ = org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance(); + } else { + tableNameBuilder_.clear(); + } bitField0_ = (bitField0_ & ~0x00000001); - userName_ = getDefaultInstance().getUserName(); - onChanged(); return this; } - /** - * optional string user_name = 1; - */ - public Builder setUserNameBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - userName_ = value; - onChanged(); - return this; + + public Builder clone() { + return create().mergeFrom(buildPartial()); } - // optional string user_group = 2; - private java.lang.Object userGroup_ = ""; - /** - * optional string user_group = 2; - */ - public boolean hasUserGroup() { - return ((bitField0_ & 0x00000002) == 0x00000002); + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_MajorCompactionTimestampRequest_descriptor; } - /** - * optional string user_group = 2; - */ - public java.lang.String getUserGroup() { - java.lang.Object ref = userGroup_; - if (!(ref instanceof java.lang.String)) { - java.lang.String s = ((com.google.protobuf.ByteString) ref) - .toStringUtf8(); - userGroup_ = s; - return s; - } else { - return (java.lang.String) ref; - } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest.getDefaultInstance(); } - /** - * optional string user_group = 2; - */ - public com.google.protobuf.ByteString - getUserGroupBytes() { - java.lang.Object ref = userGroup_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - userGroup_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); } - } - /** - * optional string user_group = 2; - */ - public Builder setUserGroup( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000002; - userGroup_ = value; - onChanged(); - return this; - } - /** - * optional string user_group = 2; - */ - public Builder clearUserGroup() { - bitField0_ = (bitField0_ & ~0x00000002); - userGroup_ = getDefaultInstance().getUserGroup(); - onChanged(); - return this; - } - /** - * optional string user_group = 2; - */ - public Builder setUserGroupBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000002; - userGroup_ = value; - onChanged(); - return this; + return result; } - // optional string namespace = 3; - private java.lang.Object namespace_ = ""; - /** - * optional string namespace = 3; - */ - public boolean hasNamespace() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - /** - * optional string namespace = 3; - */ - public java.lang.String getNamespace() { - java.lang.Object ref = namespace_; - if (!(ref instanceof java.lang.String)) { - java.lang.String s = ((com.google.protobuf.ByteString) ref) - .toStringUtf8(); - namespace_ = s; - return s; + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (tableNameBuilder_ == null) { + result.tableName_ = tableName_; } else { - return (java.lang.String) ref; + result.tableName_ = tableNameBuilder_.build(); } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; } - /** - * optional string namespace = 3; - */ - public com.google.protobuf.ByteString - getNamespaceBytes() { - java.lang.Object ref = namespace_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - namespace_ = b; - return b; + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest)other); } else { - return (com.google.protobuf.ByteString) ref; + super.mergeFrom(other); + return this; } } - /** - * optional string namespace = 3; - */ - public Builder setNamespace( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000004; - namespace_ = value; - onChanged(); + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest.getDefaultInstance()) return this; + if (other.hasTableName()) { + mergeTableName(other.getTableName()); + } + this.mergeUnknownFields(other.getUnknownFields()); return this; } - /** - * optional string namespace = 3; - */ - public Builder clearNamespace() { - bitField0_ = (bitField0_ & ~0x00000004); - namespace_ = getDefaultInstance().getNamespace(); - onChanged(); - return this; + + public final boolean isInitialized() { + if (!hasTableName()) { + + return false; + } + if (!getTableName().isInitialized()) { + + return false; + } + return true; } - /** - * optional string namespace = 3; - */ - public Builder setNamespaceBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000004; - namespace_ = value; - onChanged(); + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } return this; } + private int bitField0_; - // optional .hbase.pb.TableName table_name = 4; + // required .hbase.pb.TableName table_name = 1; private org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName tableName_ = org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance(); private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder> tableNameBuilder_; /** - * optional .hbase.pb.TableName table_name = 4; + * required .hbase.pb.TableName table_name = 1; */ public boolean hasTableName() { - return ((bitField0_ & 0x00000008) == 0x00000008); + return ((bitField0_ & 0x00000001) == 0x00000001); } /** - * optional .hbase.pb.TableName table_name = 4; + * required .hbase.pb.TableName table_name = 1; */ public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName getTableName() { if (tableNameBuilder_ == null) { @@ -59463,7 +59423,7 @@ public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName getTable } } /** - * optional .hbase.pb.TableName table_name = 4; + * required .hbase.pb.TableName table_name = 1; */ public Builder setTableName(org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName value) { if (tableNameBuilder_ == null) { @@ -59475,11 +59435,11 @@ public Builder setTableName(org.apache.hadoop.hbase.protobuf.generated.TableProt } else { tableNameBuilder_.setMessage(value); } - bitField0_ |= 0x00000008; + bitField0_ |= 0x00000001; return this; } /** - * optional .hbase.pb.TableName table_name = 4; + * required .hbase.pb.TableName table_name = 1; */ public Builder setTableName( org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder builderForValue) { @@ -59489,15 +59449,15 @@ public Builder setTableName( } else { tableNameBuilder_.setMessage(builderForValue.build()); } - bitField0_ |= 0x00000008; + bitField0_ |= 0x00000001; return this; } /** - * optional .hbase.pb.TableName table_name = 4; + * required .hbase.pb.TableName table_name = 1; */ public Builder mergeTableName(org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName value) { if (tableNameBuilder_ == null) { - if (((bitField0_ & 0x00000008) == 0x00000008) && + if (((bitField0_ & 0x00000001) == 0x00000001) && tableName_ != org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance()) { tableName_ = org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.newBuilder(tableName_).mergeFrom(value).buildPartial(); @@ -59506,275 +59466,106 @@ public Builder mergeTableName(org.apache.hadoop.hbase.protobuf.generated.TablePr } onChanged(); } else { - tableNameBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000008; - return this; - } - /** - * optional .hbase.pb.TableName table_name = 4; - */ - public Builder clearTableName() { - if (tableNameBuilder_ == null) { - tableName_ = org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance(); - onChanged(); - } else { - tableNameBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000008); - return this; - } - /** - * optional .hbase.pb.TableName table_name = 4; - */ - public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder getTableNameBuilder() { - bitField0_ |= 0x00000008; - onChanged(); - return getTableNameFieldBuilder().getBuilder(); - } - /** - * optional .hbase.pb.TableName table_name = 4; - */ - public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder getTableNameOrBuilder() { - if (tableNameBuilder_ != null) { - return tableNameBuilder_.getMessageOrBuilder(); - } else { - return tableName_; - } - } - /** - * optional .hbase.pb.TableName table_name = 4; - */ - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder> - getTableNameFieldBuilder() { - if (tableNameBuilder_ == null) { - tableNameBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder>( - tableName_, - getParentForChildren(), - isClean()); - tableName_ = null; - } - return tableNameBuilder_; - } - - // optional bool remove_all = 5; - private boolean removeAll_ ; - /** - * optional bool remove_all = 5; - */ - public boolean hasRemoveAll() { - return ((bitField0_ & 0x00000010) == 0x00000010); - } - /** - * optional bool remove_all = 5; - */ - public boolean getRemoveAll() { - return removeAll_; - } - /** - * optional bool remove_all = 5; - */ - public Builder setRemoveAll(boolean value) { - bitField0_ |= 0x00000010; - removeAll_ = value; - onChanged(); - return this; - } - /** - * optional bool remove_all = 5; - */ - public Builder clearRemoveAll() { - bitField0_ = (bitField0_ & ~0x00000010); - removeAll_ = false; - onChanged(); - return this; - } - - // optional bool bypass_globals = 6; - private boolean bypassGlobals_ ; - /** - * optional bool bypass_globals = 6; - */ - public boolean hasBypassGlobals() { - return ((bitField0_ & 0x00000020) == 0x00000020); - } - /** - * optional bool bypass_globals = 6; - */ - public boolean getBypassGlobals() { - return bypassGlobals_; - } - /** - * optional bool bypass_globals = 6; - */ - public Builder setBypassGlobals(boolean value) { - bitField0_ |= 0x00000020; - bypassGlobals_ = value; - onChanged(); - return this; - } - /** - * optional bool bypass_globals = 6; - */ - public Builder clearBypassGlobals() { - bitField0_ = (bitField0_ & ~0x00000020); - bypassGlobals_ = false; - onChanged(); - return this; - } - - // optional .hbase.pb.ThrottleRequest throttle = 7; - private org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest throttle_ = org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest.Builder, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequestOrBuilder> throttleBuilder_; - /** - * optional .hbase.pb.ThrottleRequest throttle = 7; - */ - public boolean hasThrottle() { - return ((bitField0_ & 0x00000040) == 0x00000040); - } - /** - * optional .hbase.pb.ThrottleRequest throttle = 7; - */ - public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest getThrottle() { - if (throttleBuilder_ == null) { - return throttle_; - } else { - return throttleBuilder_.getMessage(); - } - } - /** - * optional .hbase.pb.ThrottleRequest throttle = 7; - */ - public Builder setThrottle(org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest value) { - if (throttleBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - throttle_ = value; - onChanged(); - } else { - throttleBuilder_.setMessage(value); - } - bitField0_ |= 0x00000040; - return this; - } - /** - * optional .hbase.pb.ThrottleRequest throttle = 7; - */ - public Builder setThrottle( - org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest.Builder builderForValue) { - if (throttleBuilder_ == null) { - throttle_ = builderForValue.build(); - onChanged(); - } else { - throttleBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000040; - return this; - } - /** - * optional .hbase.pb.ThrottleRequest throttle = 7; - */ - public Builder mergeThrottle(org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest value) { - if (throttleBuilder_ == null) { - if (((bitField0_ & 0x00000040) == 0x00000040) && - throttle_ != org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest.getDefaultInstance()) { - throttle_ = - org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest.newBuilder(throttle_).mergeFrom(value).buildPartial(); - } else { - throttle_ = value; - } - onChanged(); - } else { - throttleBuilder_.mergeFrom(value); + tableNameBuilder_.mergeFrom(value); } - bitField0_ |= 0x00000040; + bitField0_ |= 0x00000001; return this; } /** - * optional .hbase.pb.ThrottleRequest throttle = 7; + * required .hbase.pb.TableName table_name = 1; */ - public Builder clearThrottle() { - if (throttleBuilder_ == null) { - throttle_ = org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest.getDefaultInstance(); + public Builder clearTableName() { + if (tableNameBuilder_ == null) { + tableName_ = org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance(); onChanged(); } else { - throttleBuilder_.clear(); + tableNameBuilder_.clear(); } - bitField0_ = (bitField0_ & ~0x00000040); + bitField0_ = (bitField0_ & ~0x00000001); return this; } /** - * optional .hbase.pb.ThrottleRequest throttle = 7; + * required .hbase.pb.TableName table_name = 1; */ - public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest.Builder getThrottleBuilder() { - bitField0_ |= 0x00000040; + public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder getTableNameBuilder() { + bitField0_ |= 0x00000001; onChanged(); - return getThrottleFieldBuilder().getBuilder(); + return getTableNameFieldBuilder().getBuilder(); } /** - * optional .hbase.pb.ThrottleRequest throttle = 7; + * required .hbase.pb.TableName table_name = 1; */ - public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequestOrBuilder getThrottleOrBuilder() { - if (throttleBuilder_ != null) { - return throttleBuilder_.getMessageOrBuilder(); + public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder getTableNameOrBuilder() { + if (tableNameBuilder_ != null) { + return tableNameBuilder_.getMessageOrBuilder(); } else { - return throttle_; + return tableName_; } } /** - * optional .hbase.pb.ThrottleRequest throttle = 7; + * required .hbase.pb.TableName table_name = 1; */ private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest.Builder, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequestOrBuilder> - getThrottleFieldBuilder() { - if (throttleBuilder_ == null) { - throttleBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest.Builder, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequestOrBuilder>( - throttle_, + org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder> + getTableNameFieldBuilder() { + if (tableNameBuilder_ == null) { + tableNameBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder>( + tableName_, getParentForChildren(), isClean()); - throttle_ = null; + tableName_ = null; } - return throttleBuilder_; + return tableNameBuilder_; } - // @@protoc_insertion_point(builder_scope:hbase.pb.SetQuotaRequest) + // @@protoc_insertion_point(builder_scope:hbase.pb.MajorCompactionTimestampRequest) } static { - defaultInstance = new SetQuotaRequest(true); + defaultInstance = new MajorCompactionTimestampRequest(true); defaultInstance.initFields(); } - // @@protoc_insertion_point(class_scope:hbase.pb.SetQuotaRequest) + // @@protoc_insertion_point(class_scope:hbase.pb.MajorCompactionTimestampRequest) } - public interface SetQuotaResponseOrBuilder + public interface MajorCompactionTimestampForRegionRequestOrBuilder extends com.google.protobuf.MessageOrBuilder { + + // required .hbase.pb.RegionSpecifier region = 1; + /** + * required .hbase.pb.RegionSpecifier region = 1; + */ + boolean hasRegion(); + /** + * required .hbase.pb.RegionSpecifier region = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier getRegion(); + /** + * required .hbase.pb.RegionSpecifier region = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getRegionOrBuilder(); } /** - * Protobuf type {@code hbase.pb.SetQuotaResponse} + * Protobuf type {@code hbase.pb.MajorCompactionTimestampForRegionRequest} */ - public static final class SetQuotaResponse extends + public static final class MajorCompactionTimestampForRegionRequest extends com.google.protobuf.GeneratedMessage - implements SetQuotaResponseOrBuilder { - // Use SetQuotaResponse.newBuilder() to construct. - private SetQuotaResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + implements MajorCompactionTimestampForRegionRequestOrBuilder { + // Use MajorCompactionTimestampForRegionRequest.newBuilder() to construct. + private MajorCompactionTimestampForRegionRequest(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } - private SetQuotaResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + private MajorCompactionTimestampForRegionRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - private static final SetQuotaResponse defaultInstance; - public static SetQuotaResponse getDefaultInstance() { + private static final MajorCompactionTimestampForRegionRequest defaultInstance; + public static MajorCompactionTimestampForRegionRequest getDefaultInstance() { return defaultInstance; } - public SetQuotaResponse getDefaultInstanceForType() { + public MajorCompactionTimestampForRegionRequest getDefaultInstanceForType() { return defaultInstance; } @@ -59784,11 +59575,12 @@ public SetQuotaResponse getDefaultInstanceForType() { getUnknownFields() { return this.unknownFields; } - private SetQuotaResponse( + private MajorCompactionTimestampForRegionRequest( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); + int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { @@ -59806,6 +59598,19 @@ private SetQuotaResponse( } break; } + case 10: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = region_.toBuilder(); + } + region_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(region_); + region_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; + break; + } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { @@ -59820,38 +59625,70 @@ private SetQuotaResponse( } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SetQuotaResponse_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_MajorCompactionTimestampForRegionRequest_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SetQuotaResponse_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_MajorCompactionTimestampForRegionRequest_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest.Builder.class); } - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public SetQuotaResponse parsePartialFrom( + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public MajorCompactionTimestampForRegionRequest parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - return new SetQuotaResponse(input, extensionRegistry); + return new MajorCompactionTimestampForRegionRequest(input, extensionRegistry); } }; @java.lang.Override - public com.google.protobuf.Parser getParserForType() { + public com.google.protobuf.Parser getParserForType() { return PARSER; } + private int bitField0_; + // required .hbase.pb.RegionSpecifier region = 1; + public static final int REGION_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier region_; + /** + * required .hbase.pb.RegionSpecifier region = 1; + */ + public boolean hasRegion() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .hbase.pb.RegionSpecifier region = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier getRegion() { + return region_; + } + /** + * required .hbase.pb.RegionSpecifier region = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getRegionOrBuilder() { + return region_; + } + private void initFields() { + region_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; + if (!hasRegion()) { + memoizedIsInitialized = 0; + return false; + } + if (!getRegion().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } memoizedIsInitialized = 1; return true; } @@ -59859,6 +59696,9 @@ public final boolean isInitialized() { public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, region_); + } getUnknownFields().writeTo(output); } @@ -59868,6 +59708,10 @@ public int getSerializedSize() { if (size != -1) return size; size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, region_); + } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; @@ -59885,12 +59729,17 @@ public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse)) { + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest)) { return super.equals(obj); } - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse) obj; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest) obj; boolean result = true; + result = result && (hasRegion() == other.hasRegion()); + if (hasRegion()) { + result = result && getRegion() + .equals(other.getRegion()); + } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -59904,58 +59753,62 @@ public int hashCode() { } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasRegion()) { + hash = (37 * hash) + REGION_FIELD_NUMBER; + hash = (53 * hash) + getRegion().hashCode(); + } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse parseFrom(byte[] data) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse parseFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse parseDelimitedFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse parseDelimitedFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -59964,7 +59817,7 @@ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRe public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse prototype) { + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @@ -59976,24 +59829,24 @@ protected Builder newBuilderForType( return builder; } /** - * Protobuf type {@code hbase.pb.SetQuotaResponse} + * Protobuf type {@code hbase.pb.MajorCompactionTimestampForRegionRequest} */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponseOrBuilder { + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequestOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SetQuotaResponse_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_MajorCompactionTimestampForRegionRequest_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SetQuotaResponse_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_MajorCompactionTimestampForRegionRequest_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest.Builder.class); } - // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse.newBuilder() + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -60005,6 +59858,7 @@ private Builder( } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getRegionFieldBuilder(); } } private static Builder create() { @@ -60013,6 +59867,12 @@ private static Builder create() { public Builder clear() { super.clear(); + if (regionBuilder_ == null) { + region_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance(); + } else { + regionBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); return this; } @@ -60022,43 +59882,65 @@ public Builder clone() { public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SetQuotaResponse_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_MajorCompactionTimestampForRegionRequest_descriptor; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse.getDefaultInstance(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest.getDefaultInstance(); } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse build() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse result = buildPartial(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse(this); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (regionBuilder_ == null) { + result.region_ = region_; + } else { + result.region_ = regionBuilder_.build(); + } + result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse)other); + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest)other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse.getDefaultInstance()) return this; + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest.getDefaultInstance()) return this; + if (other.hasRegion()) { + mergeRegion(other.getRegion()); + } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { + if (!hasRegion()) { + + return false; + } + if (!getRegion().isInitialized()) { + + return false; + } return true; } @@ -60066,11 +59948,11 @@ public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse parsedMessage = null; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse) e.getUnfinishedMessage(); + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { @@ -60079,54 +59961,168 @@ public Builder mergeFrom( } return this; } + private int bitField0_; - // @@protoc_insertion_point(builder_scope:hbase.pb.SetQuotaResponse) + // required .hbase.pb.RegionSpecifier region = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier region_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder> regionBuilder_; + /** + * required .hbase.pb.RegionSpecifier region = 1; + */ + public boolean hasRegion() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .hbase.pb.RegionSpecifier region = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier getRegion() { + if (regionBuilder_ == null) { + return region_; + } else { + return regionBuilder_.getMessage(); + } + } + /** + * required .hbase.pb.RegionSpecifier region = 1; + */ + public Builder setRegion(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier value) { + if (regionBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + region_ = value; + onChanged(); + } else { + regionBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .hbase.pb.RegionSpecifier region = 1; + */ + public Builder setRegion( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder builderForValue) { + if (regionBuilder_ == null) { + region_ = builderForValue.build(); + onChanged(); + } else { + regionBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .hbase.pb.RegionSpecifier region = 1; + */ + public Builder mergeRegion(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier value) { + if (regionBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + region_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance()) { + region_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.newBuilder(region_).mergeFrom(value).buildPartial(); + } else { + region_ = value; + } + onChanged(); + } else { + regionBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .hbase.pb.RegionSpecifier region = 1; + */ + public Builder clearRegion() { + if (regionBuilder_ == null) { + region_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance(); + onChanged(); + } else { + regionBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + /** + * required .hbase.pb.RegionSpecifier region = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder getRegionBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getRegionFieldBuilder().getBuilder(); + } + /** + * required .hbase.pb.RegionSpecifier region = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getRegionOrBuilder() { + if (regionBuilder_ != null) { + return regionBuilder_.getMessageOrBuilder(); + } else { + return region_; + } + } + /** + * required .hbase.pb.RegionSpecifier region = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder> + getRegionFieldBuilder() { + if (regionBuilder_ == null) { + regionBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder>( + region_, + getParentForChildren(), + isClean()); + region_ = null; + } + return regionBuilder_; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.MajorCompactionTimestampForRegionRequest) } static { - defaultInstance = new SetQuotaResponse(true); + defaultInstance = new MajorCompactionTimestampForRegionRequest(true); defaultInstance.initFields(); } - // @@protoc_insertion_point(class_scope:hbase.pb.SetQuotaResponse) + // @@protoc_insertion_point(class_scope:hbase.pb.MajorCompactionTimestampForRegionRequest) } - public interface MajorCompactionTimestampRequestOrBuilder + public interface MajorCompactionTimestampResponseOrBuilder extends com.google.protobuf.MessageOrBuilder { - // required .hbase.pb.TableName table_name = 1; - /** - * required .hbase.pb.TableName table_name = 1; - */ - boolean hasTableName(); + // required int64 compaction_timestamp = 1; /** - * required .hbase.pb.TableName table_name = 1; + * required int64 compaction_timestamp = 1; */ - org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName getTableName(); + boolean hasCompactionTimestamp(); /** - * required .hbase.pb.TableName table_name = 1; + * required int64 compaction_timestamp = 1; */ - org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder getTableNameOrBuilder(); + long getCompactionTimestamp(); } /** - * Protobuf type {@code hbase.pb.MajorCompactionTimestampRequest} + * Protobuf type {@code hbase.pb.MajorCompactionTimestampResponse} */ - public static final class MajorCompactionTimestampRequest extends + public static final class MajorCompactionTimestampResponse extends com.google.protobuf.GeneratedMessage - implements MajorCompactionTimestampRequestOrBuilder { - // Use MajorCompactionTimestampRequest.newBuilder() to construct. - private MajorCompactionTimestampRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + implements MajorCompactionTimestampResponseOrBuilder { + // Use MajorCompactionTimestampResponse.newBuilder() to construct. + private MajorCompactionTimestampResponse(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } - private MajorCompactionTimestampRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + private MajorCompactionTimestampResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - private static final MajorCompactionTimestampRequest defaultInstance; - public static MajorCompactionTimestampRequest getDefaultInstance() { + private static final MajorCompactionTimestampResponse defaultInstance; + public static MajorCompactionTimestampResponse getDefaultInstance() { return defaultInstance; } - public MajorCompactionTimestampRequest getDefaultInstanceForType() { + public MajorCompactionTimestampResponse getDefaultInstanceForType() { return defaultInstance; } @@ -60136,7 +60132,7 @@ public MajorCompactionTimestampRequest getDefaultInstanceForType() { getUnknownFields() { return this.unknownFields; } - private MajorCompactionTimestampRequest( + private MajorCompactionTimestampResponse( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { @@ -60159,17 +60155,9 @@ private MajorCompactionTimestampRequest( } break; } - case 10: { - org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder subBuilder = null; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - subBuilder = tableName_.toBuilder(); - } - tableName_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.PARSER, extensionRegistry); - if (subBuilder != null) { - subBuilder.mergeFrom(tableName_); - tableName_ = subBuilder.buildPartial(); - } + case 8: { bitField0_ |= 0x00000001; + compactionTimestamp_ = input.readInt64(); break; } } @@ -60186,67 +60174,57 @@ private MajorCompactionTimestampRequest( } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_MajorCompactionTimestampRequest_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_MajorCompactionTimestampResponse_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_MajorCompactionTimestampRequest_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_MajorCompactionTimestampResponse_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.Builder.class); } - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public MajorCompactionTimestampRequest parsePartialFrom( + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public MajorCompactionTimestampResponse parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - return new MajorCompactionTimestampRequest(input, extensionRegistry); + return new MajorCompactionTimestampResponse(input, extensionRegistry); } }; @java.lang.Override - public com.google.protobuf.Parser getParserForType() { + public com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; - // required .hbase.pb.TableName table_name = 1; - public static final int TABLE_NAME_FIELD_NUMBER = 1; - private org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName tableName_; + // required int64 compaction_timestamp = 1; + public static final int COMPACTION_TIMESTAMP_FIELD_NUMBER = 1; + private long compactionTimestamp_; /** - * required .hbase.pb.TableName table_name = 1; + * required int64 compaction_timestamp = 1; */ - public boolean hasTableName() { + public boolean hasCompactionTimestamp() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** - * required .hbase.pb.TableName table_name = 1; - */ - public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName getTableName() { - return tableName_; - } - /** - * required .hbase.pb.TableName table_name = 1; + * required int64 compaction_timestamp = 1; */ - public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder getTableNameOrBuilder() { - return tableName_; + public long getCompactionTimestamp() { + return compactionTimestamp_; } private void initFields() { - tableName_ = org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance(); + compactionTimestamp_ = 0L; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; - if (!hasTableName()) { - memoizedIsInitialized = 0; - return false; - } - if (!getTableName().isInitialized()) { + if (!hasCompactionTimestamp()) { memoizedIsInitialized = 0; return false; } @@ -60258,7 +60236,7 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeMessage(1, tableName_); + output.writeInt64(1, compactionTimestamp_); } getUnknownFields().writeTo(output); } @@ -60271,7 +60249,7 @@ public int getSerializedSize() { size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, tableName_); + .computeInt64Size(1, compactionTimestamp_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; @@ -60290,16 +60268,16 @@ public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest)) { + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse)) { return super.equals(obj); } - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest) obj; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse) obj; boolean result = true; - result = result && (hasTableName() == other.hasTableName()); - if (hasTableName()) { - result = result && getTableName() - .equals(other.getTableName()); + result = result && (hasCompactionTimestamp() == other.hasCompactionTimestamp()); + if (hasCompactionTimestamp()) { + result = result && (getCompactionTimestamp() + == other.getCompactionTimestamp()); } result = result && getUnknownFields().equals(other.getUnknownFields()); @@ -60314,62 +60292,62 @@ public int hashCode() { } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasTableName()) { - hash = (37 * hash) + TABLE_NAME_FIELD_NUMBER; - hash = (53 * hash) + getTableName().hashCode(); + if (hasCompactionTimestamp()) { + hash = (37 * hash) + COMPACTION_TIMESTAMP_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getCompactionTimestamp()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest parseFrom(byte[] data) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest parseFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest parseDelimitedFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest parseDelimitedFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -60378,7 +60356,7 @@ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompa public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest prototype) { + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @@ -60390,24 +60368,24 @@ protected Builder newBuilderForType( return builder; } /** - * Protobuf type {@code hbase.pb.MajorCompactionTimestampRequest} + * Protobuf type {@code hbase.pb.MajorCompactionTimestampResponse} */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequestOrBuilder { + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponseOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_MajorCompactionTimestampRequest_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_MajorCompactionTimestampResponse_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_MajorCompactionTimestampRequest_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_MajorCompactionTimestampResponse_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.Builder.class); } - // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest.newBuilder() + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -60419,7 +60397,6 @@ private Builder( } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getTableNameFieldBuilder(); } } private static Builder create() { @@ -60428,11 +60405,7 @@ private static Builder create() { public Builder clear() { super.clear(); - if (tableNameBuilder_ == null) { - tableName_ = org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance(); - } else { - tableNameBuilder_.clear(); - } + compactionTimestamp_ = 0L; bitField0_ = (bitField0_ & ~0x00000001); return this; } @@ -60443,62 +60416,54 @@ public Builder clone() { public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_MajorCompactionTimestampRequest_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_MajorCompactionTimestampResponse_descriptor; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest.getDefaultInstance(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance(); } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest build() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest result = buildPartial(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest(this); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } - if (tableNameBuilder_ == null) { - result.tableName_ = tableName_; - } else { - result.tableName_ = tableNameBuilder_.build(); - } + result.compactionTimestamp_ = compactionTimestamp_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest)other); + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse)other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest.getDefaultInstance()) return this; - if (other.hasTableName()) { - mergeTableName(other.getTableName()); + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance()) return this; + if (other.hasCompactionTimestamp()) { + setCompactionTimestamp(other.getCompactionTimestamp()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { - if (!hasTableName()) { - - return false; - } - if (!getTableName().isInitialized()) { + if (!hasCompactionTimestamp()) { return false; } @@ -60509,11 +60474,11 @@ public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest parsedMessage = null; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest) e.getUnfinishedMessage(); + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { @@ -60524,170 +60489,72 @@ public Builder mergeFrom( } private int bitField0_; - // required .hbase.pb.TableName table_name = 1; - private org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName tableName_ = org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder> tableNameBuilder_; + // required int64 compaction_timestamp = 1; + private long compactionTimestamp_ ; /** - * required .hbase.pb.TableName table_name = 1; + * required int64 compaction_timestamp = 1; */ - public boolean hasTableName() { + public boolean hasCompactionTimestamp() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** - * required .hbase.pb.TableName table_name = 1; - */ - public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName getTableName() { - if (tableNameBuilder_ == null) { - return tableName_; - } else { - return tableNameBuilder_.getMessage(); - } - } - /** - * required .hbase.pb.TableName table_name = 1; - */ - public Builder setTableName(org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName value) { - if (tableNameBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - tableName_ = value; - onChanged(); - } else { - tableNameBuilder_.setMessage(value); - } - bitField0_ |= 0x00000001; - return this; - } - /** - * required .hbase.pb.TableName table_name = 1; + * required int64 compaction_timestamp = 1; */ - public Builder setTableName( - org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder builderForValue) { - if (tableNameBuilder_ == null) { - tableName_ = builderForValue.build(); - onChanged(); - } else { - tableNameBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000001; - return this; + public long getCompactionTimestamp() { + return compactionTimestamp_; } /** - * required .hbase.pb.TableName table_name = 1; + * required int64 compaction_timestamp = 1; */ - public Builder mergeTableName(org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName value) { - if (tableNameBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001) && - tableName_ != org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance()) { - tableName_ = - org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.newBuilder(tableName_).mergeFrom(value).buildPartial(); - } else { - tableName_ = value; - } - onChanged(); - } else { - tableNameBuilder_.mergeFrom(value); - } + public Builder setCompactionTimestamp(long value) { bitField0_ |= 0x00000001; + compactionTimestamp_ = value; + onChanged(); return this; } /** - * required .hbase.pb.TableName table_name = 1; + * required int64 compaction_timestamp = 1; */ - public Builder clearTableName() { - if (tableNameBuilder_ == null) { - tableName_ = org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance(); - onChanged(); - } else { - tableNameBuilder_.clear(); - } + public Builder clearCompactionTimestamp() { bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - /** - * required .hbase.pb.TableName table_name = 1; - */ - public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder getTableNameBuilder() { - bitField0_ |= 0x00000001; + compactionTimestamp_ = 0L; onChanged(); - return getTableNameFieldBuilder().getBuilder(); - } - /** - * required .hbase.pb.TableName table_name = 1; - */ - public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder getTableNameOrBuilder() { - if (tableNameBuilder_ != null) { - return tableNameBuilder_.getMessageOrBuilder(); - } else { - return tableName_; - } - } - /** - * required .hbase.pb.TableName table_name = 1; - */ - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder> - getTableNameFieldBuilder() { - if (tableNameBuilder_ == null) { - tableNameBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder>( - tableName_, - getParentForChildren(), - isClean()); - tableName_ = null; - } - return tableNameBuilder_; + return this; } - // @@protoc_insertion_point(builder_scope:hbase.pb.MajorCompactionTimestampRequest) + // @@protoc_insertion_point(builder_scope:hbase.pb.MajorCompactionTimestampResponse) } static { - defaultInstance = new MajorCompactionTimestampRequest(true); + defaultInstance = new MajorCompactionTimestampResponse(true); defaultInstance.initFields(); } - // @@protoc_insertion_point(class_scope:hbase.pb.MajorCompactionTimestampRequest) + // @@protoc_insertion_point(class_scope:hbase.pb.MajorCompactionTimestampResponse) } - public interface MajorCompactionTimestampForRegionRequestOrBuilder + public interface SecurityCapabilitiesRequestOrBuilder extends com.google.protobuf.MessageOrBuilder { - - // required .hbase.pb.RegionSpecifier region = 1; - /** - * required .hbase.pb.RegionSpecifier region = 1; - */ - boolean hasRegion(); - /** - * required .hbase.pb.RegionSpecifier region = 1; - */ - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier getRegion(); - /** - * required .hbase.pb.RegionSpecifier region = 1; - */ - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getRegionOrBuilder(); } /** - * Protobuf type {@code hbase.pb.MajorCompactionTimestampForRegionRequest} + * Protobuf type {@code hbase.pb.SecurityCapabilitiesRequest} */ - public static final class MajorCompactionTimestampForRegionRequest extends + public static final class SecurityCapabilitiesRequest extends com.google.protobuf.GeneratedMessage - implements MajorCompactionTimestampForRegionRequestOrBuilder { - // Use MajorCompactionTimestampForRegionRequest.newBuilder() to construct. - private MajorCompactionTimestampForRegionRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + implements SecurityCapabilitiesRequestOrBuilder { + // Use SecurityCapabilitiesRequest.newBuilder() to construct. + private SecurityCapabilitiesRequest(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } - private MajorCompactionTimestampForRegionRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + private SecurityCapabilitiesRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - private static final MajorCompactionTimestampForRegionRequest defaultInstance; - public static MajorCompactionTimestampForRegionRequest getDefaultInstance() { + private static final SecurityCapabilitiesRequest defaultInstance; + public static SecurityCapabilitiesRequest getDefaultInstance() { return defaultInstance; } - public MajorCompactionTimestampForRegionRequest getDefaultInstanceForType() { + public SecurityCapabilitiesRequest getDefaultInstanceForType() { return defaultInstance; } @@ -60697,12 +60564,11 @@ public MajorCompactionTimestampForRegionRequest getDefaultInstanceForType() { getUnknownFields() { return this.unknownFields; } - private MajorCompactionTimestampForRegionRequest( + private SecurityCapabilitiesRequest( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); - int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { @@ -60720,19 +60586,6 @@ private MajorCompactionTimestampForRegionRequest( } break; } - case 10: { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder subBuilder = null; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - subBuilder = region_.toBuilder(); - } - region_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.PARSER, extensionRegistry); - if (subBuilder != null) { - subBuilder.mergeFrom(region_); - region_ = subBuilder.buildPartial(); - } - bitField0_ |= 0x00000001; - break; - } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { @@ -60747,70 +60600,38 @@ private MajorCompactionTimestampForRegionRequest( } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_MajorCompactionTimestampForRegionRequest_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SecurityCapabilitiesRequest_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_MajorCompactionTimestampForRegionRequest_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SecurityCapabilitiesRequest_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest.Builder.class); } - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public MajorCompactionTimestampForRegionRequest parsePartialFrom( + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public SecurityCapabilitiesRequest parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - return new MajorCompactionTimestampForRegionRequest(input, extensionRegistry); + return new SecurityCapabilitiesRequest(input, extensionRegistry); } }; @java.lang.Override - public com.google.protobuf.Parser getParserForType() { + public com.google.protobuf.Parser getParserForType() { return PARSER; } - private int bitField0_; - // required .hbase.pb.RegionSpecifier region = 1; - public static final int REGION_FIELD_NUMBER = 1; - private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier region_; - /** - * required .hbase.pb.RegionSpecifier region = 1; - */ - public boolean hasRegion() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required .hbase.pb.RegionSpecifier region = 1; - */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier getRegion() { - return region_; - } - /** - * required .hbase.pb.RegionSpecifier region = 1; - */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getRegionOrBuilder() { - return region_; - } - private void initFields() { - region_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; - if (!hasRegion()) { - memoizedIsInitialized = 0; - return false; - } - if (!getRegion().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } memoizedIsInitialized = 1; return true; } @@ -60818,9 +60639,6 @@ public final boolean isInitialized() { public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeMessage(1, region_); - } getUnknownFields().writeTo(output); } @@ -60830,10 +60648,6 @@ public int getSerializedSize() { if (size != -1) return size; size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, region_); - } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; @@ -60851,17 +60665,12 @@ public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest)) { + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest)) { return super.equals(obj); } - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest) obj; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest) obj; boolean result = true; - result = result && (hasRegion() == other.hasRegion()); - if (hasRegion()) { - result = result && getRegion() - .equals(other.getRegion()); - } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -60875,62 +60684,58 @@ public int hashCode() { } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasRegion()) { - hash = (37 * hash) + REGION_FIELD_NUMBER; - hash = (53 * hash) + getRegion().hashCode(); - } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest parseFrom(byte[] data) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest parseFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest parseDelimitedFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest parseDelimitedFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -60939,7 +60744,7 @@ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompa public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest prototype) { + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @@ -60951,24 +60756,24 @@ protected Builder newBuilderForType( return builder; } /** - * Protobuf type {@code hbase.pb.MajorCompactionTimestampForRegionRequest} + * Protobuf type {@code hbase.pb.SecurityCapabilitiesRequest} */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequestOrBuilder { + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequestOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_MajorCompactionTimestampForRegionRequest_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SecurityCapabilitiesRequest_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_MajorCompactionTimestampForRegionRequest_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SecurityCapabilitiesRequest_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest.Builder.class); } - // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest.newBuilder() + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -60980,7 +60785,6 @@ private Builder( } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getRegionFieldBuilder(); } } private static Builder create() { @@ -60989,12 +60793,6 @@ private static Builder create() { public Builder clear() { super.clear(); - if (regionBuilder_ == null) { - region_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance(); - } else { - regionBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); return this; } @@ -61004,247 +60802,111 @@ public Builder clone() { public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_MajorCompactionTimestampForRegionRequest_descriptor; - } - - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest.getDefaultInstance(); - } - - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest build() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - if (regionBuilder_ == null) { - result.region_ = region_; - } else { - result.region_ = regionBuilder_.build(); - } - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest.getDefaultInstance()) return this; - if (other.hasRegion()) { - mergeRegion(other.getRegion()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasRegion()) { - - return false; - } - if (!getRegion().isInitialized()) { - - return false; - } - return true; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SecurityCapabilitiesRequest_descriptor; } - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest) e.getUnfinishedMessage(); - throw e; - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest.getDefaultInstance(); } - private int bitField0_; - // required .hbase.pb.RegionSpecifier region = 1; - private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier region_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder> regionBuilder_; - /** - * required .hbase.pb.RegionSpecifier region = 1; - */ - public boolean hasRegion() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required .hbase.pb.RegionSpecifier region = 1; - */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier getRegion() { - if (regionBuilder_ == null) { - return region_; - } else { - return regionBuilder_.getMessage(); - } - } - /** - * required .hbase.pb.RegionSpecifier region = 1; - */ - public Builder setRegion(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier value) { - if (regionBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - region_ = value; - onChanged(); - } else { - regionBuilder_.setMessage(value); - } - bitField0_ |= 0x00000001; - return this; - } - /** - * required .hbase.pb.RegionSpecifier region = 1; - */ - public Builder setRegion( - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder builderForValue) { - if (regionBuilder_ == null) { - region_ = builderForValue.build(); - onChanged(); - } else { - regionBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000001; - return this; - } - /** - * required .hbase.pb.RegionSpecifier region = 1; - */ - public Builder mergeRegion(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier value) { - if (regionBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001) && - region_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance()) { - region_ = - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.newBuilder(region_).mergeFrom(value).buildPartial(); - } else { - region_ = value; - } - onChanged(); - } else { - regionBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000001; - return this; - } - /** - * required .hbase.pb.RegionSpecifier region = 1; - */ - public Builder clearRegion() { - if (regionBuilder_ == null) { - region_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance(); - onChanged(); - } else { - regionBuilder_.clear(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); } - bitField0_ = (bitField0_ & ~0x00000001); - return this; + return result; } - /** - * required .hbase.pb.RegionSpecifier region = 1; - */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder getRegionBuilder() { - bitField0_ |= 0x00000001; - onChanged(); - return getRegionFieldBuilder().getBuilder(); + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest(this); + onBuilt(); + return result; } - /** - * required .hbase.pb.RegionSpecifier region = 1; - */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getRegionOrBuilder() { - if (regionBuilder_ != null) { - return regionBuilder_.getMessageOrBuilder(); + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest)other); } else { - return region_; + super.mergeFrom(other); + return this; } } - /** - * required .hbase.pb.RegionSpecifier region = 1; - */ - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder> - getRegionFieldBuilder() { - if (regionBuilder_ == null) { - regionBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder>( - region_, - getParentForChildren(), - isClean()); - region_ = null; + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest.getDefaultInstance()) return this; + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } } - return regionBuilder_; + return this; } - // @@protoc_insertion_point(builder_scope:hbase.pb.MajorCompactionTimestampForRegionRequest) + // @@protoc_insertion_point(builder_scope:hbase.pb.SecurityCapabilitiesRequest) } static { - defaultInstance = new MajorCompactionTimestampForRegionRequest(true); + defaultInstance = new SecurityCapabilitiesRequest(true); defaultInstance.initFields(); } - // @@protoc_insertion_point(class_scope:hbase.pb.MajorCompactionTimestampForRegionRequest) + // @@protoc_insertion_point(class_scope:hbase.pb.SecurityCapabilitiesRequest) } - public interface MajorCompactionTimestampResponseOrBuilder + public interface SecurityCapabilitiesResponseOrBuilder extends com.google.protobuf.MessageOrBuilder { - // required int64 compaction_timestamp = 1; + // repeated .hbase.pb.SecurityCapabilitiesResponse.Capability capabilities = 1; /** - * required int64 compaction_timestamp = 1; + * repeated .hbase.pb.SecurityCapabilitiesResponse.Capability capabilities = 1; */ - boolean hasCompactionTimestamp(); + java.util.List getCapabilitiesList(); /** - * required int64 compaction_timestamp = 1; + * repeated .hbase.pb.SecurityCapabilitiesResponse.Capability capabilities = 1; */ - long getCompactionTimestamp(); + int getCapabilitiesCount(); + /** + * repeated .hbase.pb.SecurityCapabilitiesResponse.Capability capabilities = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.Capability getCapabilities(int index); } /** - * Protobuf type {@code hbase.pb.MajorCompactionTimestampResponse} + * Protobuf type {@code hbase.pb.SecurityCapabilitiesResponse} */ - public static final class MajorCompactionTimestampResponse extends + public static final class SecurityCapabilitiesResponse extends com.google.protobuf.GeneratedMessage - implements MajorCompactionTimestampResponseOrBuilder { - // Use MajorCompactionTimestampResponse.newBuilder() to construct. - private MajorCompactionTimestampResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + implements SecurityCapabilitiesResponseOrBuilder { + // Use SecurityCapabilitiesResponse.newBuilder() to construct. + private SecurityCapabilitiesResponse(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } - private MajorCompactionTimestampResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + private SecurityCapabilitiesResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - private static final MajorCompactionTimestampResponse defaultInstance; - public static MajorCompactionTimestampResponse getDefaultInstance() { + private static final SecurityCapabilitiesResponse defaultInstance; + public static SecurityCapabilitiesResponse getDefaultInstance() { return defaultInstance; } - public MajorCompactionTimestampResponse getDefaultInstanceForType() { + public SecurityCapabilitiesResponse getDefaultInstanceForType() { return defaultInstance; } @@ -61254,7 +60916,7 @@ public MajorCompactionTimestampResponse getDefaultInstanceForType() { getUnknownFields() { return this.unknownFields; } - private MajorCompactionTimestampResponse( + private SecurityCapabilitiesResponse( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { @@ -61275,81 +60937,222 @@ private MajorCompactionTimestampResponse( extensionRegistry, tag)) { done = true; } - break; - } - case 8: { - bitField0_ |= 0x00000001; - compactionTimestamp_ = input.readInt64(); - break; - } - } + break; + } + case 8: { + int rawValue = input.readEnum(); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.Capability value = org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.Capability.valueOf(rawValue); + if (value == null) { + unknownFields.mergeVarintField(1, rawValue); + } else { + if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + capabilities_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000001; + } + capabilities_.add(value); + } + break; + } + case 10: { + int length = input.readRawVarint32(); + int oldLimit = input.pushLimit(length); + while(input.getBytesUntilLimit() > 0) { + int rawValue = input.readEnum(); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.Capability value = org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.Capability.valueOf(rawValue); + if (value == null) { + unknownFields.mergeVarintField(1, rawValue); + } else { + if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + capabilities_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000001; + } + capabilities_.add(value); + } + } + input.popLimit(oldLimit); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + capabilities_ = java.util.Collections.unmodifiableList(capabilities_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SecurityCapabilitiesResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SecurityCapabilitiesResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public SecurityCapabilitiesResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new SecurityCapabilitiesResponse(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + /** + * Protobuf enum {@code hbase.pb.SecurityCapabilitiesResponse.Capability} + */ + public enum Capability + implements com.google.protobuf.ProtocolMessageEnum { + /** + * SIMPLE_AUTHENTICATION = 0; + */ + SIMPLE_AUTHENTICATION(0, 0), + /** + * SECURE_AUTHENTICATION = 1; + */ + SECURE_AUTHENTICATION(1, 1), + /** + * AUTHORIZATION = 2; + */ + AUTHORIZATION(2, 2), + /** + * CELL_AUTHORIZATION = 3; + */ + CELL_AUTHORIZATION(3, 3), + /** + * CELL_VISIBILITY = 4; + */ + CELL_VISIBILITY(4, 4), + ; + + /** + * SIMPLE_AUTHENTICATION = 0; + */ + public static final int SIMPLE_AUTHENTICATION_VALUE = 0; + /** + * SECURE_AUTHENTICATION = 1; + */ + public static final int SECURE_AUTHENTICATION_VALUE = 1; + /** + * AUTHORIZATION = 2; + */ + public static final int AUTHORIZATION_VALUE = 2; + /** + * CELL_AUTHORIZATION = 3; + */ + public static final int CELL_AUTHORIZATION_VALUE = 3; + /** + * CELL_VISIBILITY = 4; + */ + public static final int CELL_VISIBILITY_VALUE = 4; + + + public final int getNumber() { return value; } + + public static Capability valueOf(int value) { + switch (value) { + case 0: return SIMPLE_AUTHENTICATION; + case 1: return SECURE_AUTHENTICATION; + case 2: return AUTHORIZATION; + case 3: return CELL_AUTHORIZATION; + case 4: return CELL_VISIBILITY; + default: return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + private static com.google.protobuf.Internal.EnumLiteMap + internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public Capability findValueByNumber(int number) { + return Capability.valueOf(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor + getValueDescriptor() { + return getDescriptor().getValues().get(index); + } + public final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptorForType() { + return getDescriptor(); + } + public static final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.getDescriptor().getEnumTypes().get(0); + } + + private static final Capability[] VALUES = values(); + + public static Capability valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "EnumValueDescriptor is not for this type."); } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException( - e.getMessage()).setUnfinishedMessage(this); - } finally { - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); + return VALUES[desc.getIndex()]; } - } - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_MajorCompactionTimestampResponse_descriptor; - } - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_MajorCompactionTimestampResponse_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.Builder.class); - } + private final int index; + private final int value; - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public MajorCompactionTimestampResponse parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new MajorCompactionTimestampResponse(input, extensionRegistry); + private Capability(int index, int value) { + this.index = index; + this.value = value; } - }; - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; + // @@protoc_insertion_point(enum_scope:hbase.pb.SecurityCapabilitiesResponse.Capability) } - private int bitField0_; - // required int64 compaction_timestamp = 1; - public static final int COMPACTION_TIMESTAMP_FIELD_NUMBER = 1; - private long compactionTimestamp_; + // repeated .hbase.pb.SecurityCapabilitiesResponse.Capability capabilities = 1; + public static final int CAPABILITIES_FIELD_NUMBER = 1; + private java.util.List capabilities_; /** - * required int64 compaction_timestamp = 1; + * repeated .hbase.pb.SecurityCapabilitiesResponse.Capability capabilities = 1; */ - public boolean hasCompactionTimestamp() { - return ((bitField0_ & 0x00000001) == 0x00000001); + public java.util.List getCapabilitiesList() { + return capabilities_; } /** - * required int64 compaction_timestamp = 1; + * repeated .hbase.pb.SecurityCapabilitiesResponse.Capability capabilities = 1; */ - public long getCompactionTimestamp() { - return compactionTimestamp_; + public int getCapabilitiesCount() { + return capabilities_.size(); + } + /** + * repeated .hbase.pb.SecurityCapabilitiesResponse.Capability capabilities = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.Capability getCapabilities(int index) { + return capabilities_.get(index); } private void initFields() { - compactionTimestamp_ = 0L; + capabilities_ = java.util.Collections.emptyList(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; - if (!hasCompactionTimestamp()) { - memoizedIsInitialized = 0; - return false; - } memoizedIsInitialized = 1; return true; } @@ -61357,8 +61160,8 @@ public final boolean isInitialized() { public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeInt64(1, compactionTimestamp_); + for (int i = 0; i < capabilities_.size(); i++) { + output.writeEnum(1, capabilities_.get(i).getNumber()); } getUnknownFields().writeTo(output); } @@ -61369,9 +61172,14 @@ public int getSerializedSize() { if (size != -1) return size; size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeInt64Size(1, compactionTimestamp_); + { + int dataSize = 0; + for (int i = 0; i < capabilities_.size(); i++) { + dataSize += com.google.protobuf.CodedOutputStream + .computeEnumSizeNoTag(capabilities_.get(i).getNumber()); + } + size += dataSize; + size += 1 * capabilities_.size(); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; @@ -61390,17 +61198,14 @@ public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse)) { + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse)) { return super.equals(obj); } - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse) obj; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse) obj; boolean result = true; - result = result && (hasCompactionTimestamp() == other.hasCompactionTimestamp()); - if (hasCompactionTimestamp()) { - result = result && (getCompactionTimestamp() - == other.getCompactionTimestamp()); - } + result = result && getCapabilitiesList() + .equals(other.getCapabilitiesList()); result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -61414,62 +61219,62 @@ public int hashCode() { } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasCompactionTimestamp()) { - hash = (37 * hash) + COMPACTION_TIMESTAMP_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getCompactionTimestamp()); + if (getCapabilitiesCount() > 0) { + hash = (37 * hash) + CAPABILITIES_FIELD_NUMBER; + hash = (53 * hash) + hashEnumList(getCapabilitiesList()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse parseFrom(byte[] data) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse parseFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse parseDelimitedFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse parseDelimitedFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -61478,7 +61283,7 @@ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompa public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse prototype) { + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @@ -61490,24 +61295,24 @@ protected Builder newBuilderForType( return builder; } /** - * Protobuf type {@code hbase.pb.MajorCompactionTimestampResponse} + * Protobuf type {@code hbase.pb.SecurityCapabilitiesResponse} */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponseOrBuilder { + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponseOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_MajorCompactionTimestampResponse_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SecurityCapabilitiesResponse_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_MajorCompactionTimestampResponse_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SecurityCapabilitiesResponse_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.Builder.class); } - // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.newBuilder() + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -61527,7 +61332,7 @@ private static Builder create() { public Builder clear() { super.clear(); - compactionTimestamp_ = 0L; + capabilities_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000001); return this; } @@ -61538,57 +61343,59 @@ public Builder clone() { public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_MajorCompactionTimestampResponse_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SecurityCapabilitiesResponse_descriptor; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.getDefaultInstance(); } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse build() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse result = buildPartial(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse(this); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse(this); int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + capabilities_ = java.util.Collections.unmodifiableList(capabilities_); + bitField0_ = (bitField0_ & ~0x00000001); } - result.compactionTimestamp_ = compactionTimestamp_; - result.bitField0_ = to_bitField0_; + result.capabilities_ = capabilities_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse)other); + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse)other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance()) return this; - if (other.hasCompactionTimestamp()) { - setCompactionTimestamp(other.getCompactionTimestamp()); + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.getDefaultInstance()) return this; + if (!other.capabilities_.isEmpty()) { + if (capabilities_.isEmpty()) { + capabilities_ = other.capabilities_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureCapabilitiesIsMutable(); + capabilities_.addAll(other.capabilities_); + } + onChanged(); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { - if (!hasCompactionTimestamp()) { - - return false; - } return true; } @@ -61596,11 +61403,11 @@ public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse parsedMessage = null; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse) e.getUnfinishedMessage(); + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { @@ -61611,72 +61418,136 @@ public Builder mergeFrom( } private int bitField0_; - // required int64 compaction_timestamp = 1; - private long compactionTimestamp_ ; + // repeated .hbase.pb.SecurityCapabilitiesResponse.Capability capabilities = 1; + private java.util.List capabilities_ = + java.util.Collections.emptyList(); + private void ensureCapabilitiesIsMutable() { + if (!((bitField0_ & 0x00000001) == 0x00000001)) { + capabilities_ = new java.util.ArrayList(capabilities_); + bitField0_ |= 0x00000001; + } + } /** - * required int64 compaction_timestamp = 1; + * repeated .hbase.pb.SecurityCapabilitiesResponse.Capability capabilities = 1; */ - public boolean hasCompactionTimestamp() { - return ((bitField0_ & 0x00000001) == 0x00000001); + public java.util.List getCapabilitiesList() { + return java.util.Collections.unmodifiableList(capabilities_); + } + /** + * repeated .hbase.pb.SecurityCapabilitiesResponse.Capability capabilities = 1; + */ + public int getCapabilitiesCount() { + return capabilities_.size(); + } + /** + * repeated .hbase.pb.SecurityCapabilitiesResponse.Capability capabilities = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.Capability getCapabilities(int index) { + return capabilities_.get(index); + } + /** + * repeated .hbase.pb.SecurityCapabilitiesResponse.Capability capabilities = 1; + */ + public Builder setCapabilities( + int index, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.Capability value) { + if (value == null) { + throw new NullPointerException(); + } + ensureCapabilitiesIsMutable(); + capabilities_.set(index, value); + onChanged(); + return this; } /** - * required int64 compaction_timestamp = 1; + * repeated .hbase.pb.SecurityCapabilitiesResponse.Capability capabilities = 1; */ - public long getCompactionTimestamp() { - return compactionTimestamp_; + public Builder addCapabilities(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.Capability value) { + if (value == null) { + throw new NullPointerException(); + } + ensureCapabilitiesIsMutable(); + capabilities_.add(value); + onChanged(); + return this; } /** - * required int64 compaction_timestamp = 1; + * repeated .hbase.pb.SecurityCapabilitiesResponse.Capability capabilities = 1; */ - public Builder setCompactionTimestamp(long value) { - bitField0_ |= 0x00000001; - compactionTimestamp_ = value; + public Builder addAllCapabilities( + java.lang.Iterable values) { + ensureCapabilitiesIsMutable(); + super.addAll(values, capabilities_); onChanged(); return this; } /** - * required int64 compaction_timestamp = 1; + * repeated .hbase.pb.SecurityCapabilitiesResponse.Capability capabilities = 1; */ - public Builder clearCompactionTimestamp() { + public Builder clearCapabilities() { + capabilities_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000001); - compactionTimestamp_ = 0L; onChanged(); return this; } - // @@protoc_insertion_point(builder_scope:hbase.pb.MajorCompactionTimestampResponse) + // @@protoc_insertion_point(builder_scope:hbase.pb.SecurityCapabilitiesResponse) } static { - defaultInstance = new MajorCompactionTimestampResponse(true); + defaultInstance = new SecurityCapabilitiesResponse(true); defaultInstance.initFields(); } - // @@protoc_insertion_point(class_scope:hbase.pb.MajorCompactionTimestampResponse) + // @@protoc_insertion_point(class_scope:hbase.pb.SecurityCapabilitiesResponse) } - public interface SecurityCapabilitiesRequestOrBuilder + public interface ClearDeadServersRequestOrBuilder extends com.google.protobuf.MessageOrBuilder { + + // repeated .hbase.pb.ServerName server_name = 1; + /** + * repeated .hbase.pb.ServerName server_name = 1; + */ + java.util.List + getServerNameList(); + /** + * repeated .hbase.pb.ServerName server_name = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServerName(int index); + /** + * repeated .hbase.pb.ServerName server_name = 1; + */ + int getServerNameCount(); + /** + * repeated .hbase.pb.ServerName server_name = 1; + */ + java.util.List + getServerNameOrBuilderList(); + /** + * repeated .hbase.pb.ServerName server_name = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerNameOrBuilder( + int index); } /** - * Protobuf type {@code hbase.pb.SecurityCapabilitiesRequest} + * Protobuf type {@code hbase.pb.ClearDeadServersRequest} */ - public static final class SecurityCapabilitiesRequest extends + public static final class ClearDeadServersRequest extends com.google.protobuf.GeneratedMessage - implements SecurityCapabilitiesRequestOrBuilder { - // Use SecurityCapabilitiesRequest.newBuilder() to construct. - private SecurityCapabilitiesRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + implements ClearDeadServersRequestOrBuilder { + // Use ClearDeadServersRequest.newBuilder() to construct. + private ClearDeadServersRequest(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } - private SecurityCapabilitiesRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + private ClearDeadServersRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - private static final SecurityCapabilitiesRequest defaultInstance; - public static SecurityCapabilitiesRequest getDefaultInstance() { + private static final ClearDeadServersRequest defaultInstance; + public static ClearDeadServersRequest getDefaultInstance() { return defaultInstance; } - public SecurityCapabilitiesRequest getDefaultInstanceForType() { + public ClearDeadServersRequest getDefaultInstanceForType() { return defaultInstance; } @@ -61686,11 +61557,12 @@ public SecurityCapabilitiesRequest getDefaultInstanceForType() { getUnknownFields() { return this.unknownFields; } - private SecurityCapabilitiesRequest( + private ClearDeadServersRequest( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); + int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { @@ -61708,6 +61580,14 @@ private SecurityCapabilitiesRequest( } break; } + case 10: { + if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + serverName_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000001; + } + serverName_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.PARSER, extensionRegistry)); + break; + } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { @@ -61716,44 +61596,90 @@ private SecurityCapabilitiesRequest( throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { + if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + serverName_ = java.util.Collections.unmodifiableList(serverName_); + } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SecurityCapabilitiesRequest_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ClearDeadServersRequest_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SecurityCapabilitiesRequest_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ClearDeadServersRequest_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest.Builder.class); } - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public SecurityCapabilitiesRequest parsePartialFrom( + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public ClearDeadServersRequest parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - return new SecurityCapabilitiesRequest(input, extensionRegistry); + return new ClearDeadServersRequest(input, extensionRegistry); } }; @java.lang.Override - public com.google.protobuf.Parser getParserForType() { + public com.google.protobuf.Parser getParserForType() { return PARSER; } + // repeated .hbase.pb.ServerName server_name = 1; + public static final int SERVER_NAME_FIELD_NUMBER = 1; + private java.util.List serverName_; + /** + * repeated .hbase.pb.ServerName server_name = 1; + */ + public java.util.List getServerNameList() { + return serverName_; + } + /** + * repeated .hbase.pb.ServerName server_name = 1; + */ + public java.util.List + getServerNameOrBuilderList() { + return serverName_; + } + /** + * repeated .hbase.pb.ServerName server_name = 1; + */ + public int getServerNameCount() { + return serverName_.size(); + } + /** + * repeated .hbase.pb.ServerName server_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServerName(int index) { + return serverName_.get(index); + } + /** + * repeated .hbase.pb.ServerName server_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerNameOrBuilder( + int index) { + return serverName_.get(index); + } + private void initFields() { + serverName_ = java.util.Collections.emptyList(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; + for (int i = 0; i < getServerNameCount(); i++) { + if (!getServerName(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } memoizedIsInitialized = 1; return true; } @@ -61761,6 +61687,9 @@ public final boolean isInitialized() { public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); + for (int i = 0; i < serverName_.size(); i++) { + output.writeMessage(1, serverName_.get(i)); + } getUnknownFields().writeTo(output); } @@ -61770,6 +61699,10 @@ public int getSerializedSize() { if (size != -1) return size; size = 0; + for (int i = 0; i < serverName_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, serverName_.get(i)); + } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; @@ -61787,12 +61720,14 @@ public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest)) { + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest)) { return super.equals(obj); } - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest) obj; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest) obj; boolean result = true; + result = result && getServerNameList() + .equals(other.getServerNameList()); result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -61806,58 +61741,62 @@ public int hashCode() { } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); + if (getServerNameCount() > 0) { + hash = (37 * hash) + SERVER_NAME_FIELD_NUMBER; + hash = (53 * hash) + getServerNameList().hashCode(); + } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest parseFrom(byte[] data) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest parseFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest parseDelimitedFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest parseDelimitedFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -61866,7 +61805,7 @@ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCa public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest prototype) { + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @@ -61878,24 +61817,24 @@ protected Builder newBuilderForType( return builder; } /** - * Protobuf type {@code hbase.pb.SecurityCapabilitiesRequest} + * Protobuf type {@code hbase.pb.ClearDeadServersRequest} */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequestOrBuilder { + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequestOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SecurityCapabilitiesRequest_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ClearDeadServersRequest_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SecurityCapabilitiesRequest_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ClearDeadServersRequest_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest.Builder.class); } - // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest.newBuilder() + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -61907,6 +61846,7 @@ private Builder( } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getServerNameFieldBuilder(); } } private static Builder create() { @@ -61915,6 +61855,12 @@ private static Builder create() { public Builder clear() { super.clear(); + if (serverNameBuilder_ == null) { + serverName_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + } else { + serverNameBuilder_.clear(); + } return this; } @@ -61924,43 +61870,85 @@ public Builder clone() { public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SecurityCapabilitiesRequest_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ClearDeadServersRequest_descriptor; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest.getDefaultInstance(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest.getDefaultInstance(); } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest build() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest result = buildPartial(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest(this); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest(this); + int from_bitField0_ = bitField0_; + if (serverNameBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001)) { + serverName_ = java.util.Collections.unmodifiableList(serverName_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.serverName_ = serverName_; + } else { + result.serverName_ = serverNameBuilder_.build(); + } onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest)other); + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest.getDefaultInstance()) return this; + if (serverNameBuilder_ == null) { + if (!other.serverName_.isEmpty()) { + if (serverName_.isEmpty()) { + serverName_ = other.serverName_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureServerNameIsMutable(); + serverName_.addAll(other.serverName_); + } + onChanged(); + } } else { - super.mergeFrom(other); - return this; + if (!other.serverName_.isEmpty()) { + if (serverNameBuilder_.isEmpty()) { + serverNameBuilder_.dispose(); + serverNameBuilder_ = null; + serverName_ = other.serverName_; + bitField0_ = (bitField0_ & ~0x00000001); + serverNameBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getServerNameFieldBuilder() : null; + } else { + serverNameBuilder_.addAllMessages(other.serverName_); + } + } } - } - - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest.getDefaultInstance()) return this; this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { + for (int i = 0; i < getServerNameCount(); i++) { + if (!getServerName(i).isInitialized()) { + + return false; + } + } return true; } @@ -61968,67 +61956,319 @@ public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest parsedMessage = null; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest) e.getUnfinishedMessage(); + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } - return this; + return this; + } + private int bitField0_; + + // repeated .hbase.pb.ServerName server_name = 1; + private java.util.List serverName_ = + java.util.Collections.emptyList(); + private void ensureServerNameIsMutable() { + if (!((bitField0_ & 0x00000001) == 0x00000001)) { + serverName_ = new java.util.ArrayList(serverName_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> serverNameBuilder_; + + /** + * repeated .hbase.pb.ServerName server_name = 1; + */ + public java.util.List getServerNameList() { + if (serverNameBuilder_ == null) { + return java.util.Collections.unmodifiableList(serverName_); + } else { + return serverNameBuilder_.getMessageList(); + } + } + /** + * repeated .hbase.pb.ServerName server_name = 1; + */ + public int getServerNameCount() { + if (serverNameBuilder_ == null) { + return serverName_.size(); + } else { + return serverNameBuilder_.getCount(); + } + } + /** + * repeated .hbase.pb.ServerName server_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServerName(int index) { + if (serverNameBuilder_ == null) { + return serverName_.get(index); + } else { + return serverNameBuilder_.getMessage(index); + } + } + /** + * repeated .hbase.pb.ServerName server_name = 1; + */ + public Builder setServerName( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) { + if (serverNameBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureServerNameIsMutable(); + serverName_.set(index, value); + onChanged(); + } else { + serverNameBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.ServerName server_name = 1; + */ + public Builder setServerName( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) { + if (serverNameBuilder_ == null) { + ensureServerNameIsMutable(); + serverName_.set(index, builderForValue.build()); + onChanged(); + } else { + serverNameBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.ServerName server_name = 1; + */ + public Builder addServerName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) { + if (serverNameBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureServerNameIsMutable(); + serverName_.add(value); + onChanged(); + } else { + serverNameBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .hbase.pb.ServerName server_name = 1; + */ + public Builder addServerName( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) { + if (serverNameBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureServerNameIsMutable(); + serverName_.add(index, value); + onChanged(); + } else { + serverNameBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.ServerName server_name = 1; + */ + public Builder addServerName( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) { + if (serverNameBuilder_ == null) { + ensureServerNameIsMutable(); + serverName_.add(builderForValue.build()); + onChanged(); + } else { + serverNameBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.ServerName server_name = 1; + */ + public Builder addServerName( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) { + if (serverNameBuilder_ == null) { + ensureServerNameIsMutable(); + serverName_.add(index, builderForValue.build()); + onChanged(); + } else { + serverNameBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.ServerName server_name = 1; + */ + public Builder addAllServerName( + java.lang.Iterable values) { + if (serverNameBuilder_ == null) { + ensureServerNameIsMutable(); + super.addAll(values, serverName_); + onChanged(); + } else { + serverNameBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .hbase.pb.ServerName server_name = 1; + */ + public Builder clearServerName() { + if (serverNameBuilder_ == null) { + serverName_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + serverNameBuilder_.clear(); + } + return this; + } + /** + * repeated .hbase.pb.ServerName server_name = 1; + */ + public Builder removeServerName(int index) { + if (serverNameBuilder_ == null) { + ensureServerNameIsMutable(); + serverName_.remove(index); + onChanged(); + } else { + serverNameBuilder_.remove(index); + } + return this; + } + /** + * repeated .hbase.pb.ServerName server_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder getServerNameBuilder( + int index) { + return getServerNameFieldBuilder().getBuilder(index); + } + /** + * repeated .hbase.pb.ServerName server_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerNameOrBuilder( + int index) { + if (serverNameBuilder_ == null) { + return serverName_.get(index); } else { + return serverNameBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .hbase.pb.ServerName server_name = 1; + */ + public java.util.List + getServerNameOrBuilderList() { + if (serverNameBuilder_ != null) { + return serverNameBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(serverName_); + } + } + /** + * repeated .hbase.pb.ServerName server_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder addServerNameBuilder() { + return getServerNameFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance()); + } + /** + * repeated .hbase.pb.ServerName server_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder addServerNameBuilder( + int index) { + return getServerNameFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance()); + } + /** + * repeated .hbase.pb.ServerName server_name = 1; + */ + public java.util.List + getServerNameBuilderList() { + return getServerNameFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> + getServerNameFieldBuilder() { + if (serverNameBuilder_ == null) { + serverNameBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>( + serverName_, + ((bitField0_ & 0x00000001) == 0x00000001), + getParentForChildren(), + isClean()); + serverName_ = null; + } + return serverNameBuilder_; } - // @@protoc_insertion_point(builder_scope:hbase.pb.SecurityCapabilitiesRequest) + // @@protoc_insertion_point(builder_scope:hbase.pb.ClearDeadServersRequest) } static { - defaultInstance = new SecurityCapabilitiesRequest(true); + defaultInstance = new ClearDeadServersRequest(true); defaultInstance.initFields(); } - // @@protoc_insertion_point(class_scope:hbase.pb.SecurityCapabilitiesRequest) + // @@protoc_insertion_point(class_scope:hbase.pb.ClearDeadServersRequest) } - public interface SecurityCapabilitiesResponseOrBuilder + public interface ClearDeadServersResponseOrBuilder extends com.google.protobuf.MessageOrBuilder { - // repeated .hbase.pb.SecurityCapabilitiesResponse.Capability capabilities = 1; + // repeated .hbase.pb.ServerName server_name = 1; /** - * repeated .hbase.pb.SecurityCapabilitiesResponse.Capability capabilities = 1; + * repeated .hbase.pb.ServerName server_name = 1; */ - java.util.List getCapabilitiesList(); + java.util.List + getServerNameList(); /** - * repeated .hbase.pb.SecurityCapabilitiesResponse.Capability capabilities = 1; + * repeated .hbase.pb.ServerName server_name = 1; */ - int getCapabilitiesCount(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServerName(int index); /** - * repeated .hbase.pb.SecurityCapabilitiesResponse.Capability capabilities = 1; + * repeated .hbase.pb.ServerName server_name = 1; */ - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.Capability getCapabilities(int index); + int getServerNameCount(); + /** + * repeated .hbase.pb.ServerName server_name = 1; + */ + java.util.List + getServerNameOrBuilderList(); + /** + * repeated .hbase.pb.ServerName server_name = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerNameOrBuilder( + int index); } /** - * Protobuf type {@code hbase.pb.SecurityCapabilitiesResponse} + * Protobuf type {@code hbase.pb.ClearDeadServersResponse} */ - public static final class SecurityCapabilitiesResponse extends + public static final class ClearDeadServersResponse extends com.google.protobuf.GeneratedMessage - implements SecurityCapabilitiesResponseOrBuilder { - // Use SecurityCapabilitiesResponse.newBuilder() to construct. - private SecurityCapabilitiesResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + implements ClearDeadServersResponseOrBuilder { + // Use ClearDeadServersResponse.newBuilder() to construct. + private ClearDeadServersResponse(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } - private SecurityCapabilitiesResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + private ClearDeadServersResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - private static final SecurityCapabilitiesResponse defaultInstance; - public static SecurityCapabilitiesResponse getDefaultInstance() { + private static final ClearDeadServersResponse defaultInstance; + public static ClearDeadServersResponse getDefaultInstance() { return defaultInstance; } - public SecurityCapabilitiesResponse getDefaultInstanceForType() { + public ClearDeadServersResponse getDefaultInstanceForType() { return defaultInstance; } @@ -62038,7 +62278,7 @@ public SecurityCapabilitiesResponse getDefaultInstanceForType() { getUnknownFields() { return this.unknownFields; } - private SecurityCapabilitiesResponse( + private ClearDeadServersResponse( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { @@ -62061,37 +62301,12 @@ private SecurityCapabilitiesResponse( } break; } - case 8: { - int rawValue = input.readEnum(); - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.Capability value = org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.Capability.valueOf(rawValue); - if (value == null) { - unknownFields.mergeVarintField(1, rawValue); - } else { - if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { - capabilities_ = new java.util.ArrayList(); - mutable_bitField0_ |= 0x00000001; - } - capabilities_.add(value); - } - break; - } case 10: { - int length = input.readRawVarint32(); - int oldLimit = input.pushLimit(length); - while(input.getBytesUntilLimit() > 0) { - int rawValue = input.readEnum(); - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.Capability value = org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.Capability.valueOf(rawValue); - if (value == null) { - unknownFields.mergeVarintField(1, rawValue); - } else { - if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { - capabilities_ = new java.util.ArrayList(); - mutable_bitField0_ |= 0x00000001; - } - capabilities_.add(value); - } + if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + serverName_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000001; } - input.popLimit(oldLimit); + serverName_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.PARSER, extensionRegistry)); break; } } @@ -62103,7 +62318,7 @@ private SecurityCapabilitiesResponse( e.getMessage()).setUnfinishedMessage(this); } finally { if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { - capabilities_ = java.util.Collections.unmodifiableList(capabilities_); + serverName_ = java.util.Collections.unmodifiableList(serverName_); } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); @@ -62111,170 +62326,81 @@ private SecurityCapabilitiesResponse( } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SecurityCapabilitiesResponse_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ClearDeadServersResponse_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SecurityCapabilitiesResponse_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ClearDeadServersResponse_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse.Builder.class); } - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public SecurityCapabilitiesResponse parsePartialFrom( + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public ClearDeadServersResponse parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - return new SecurityCapabilitiesResponse(input, extensionRegistry); + return new ClearDeadServersResponse(input, extensionRegistry); } }; @java.lang.Override - public com.google.protobuf.Parser getParserForType() { + public com.google.protobuf.Parser getParserForType() { return PARSER; } + // repeated .hbase.pb.ServerName server_name = 1; + public static final int SERVER_NAME_FIELD_NUMBER = 1; + private java.util.List serverName_; /** - * Protobuf enum {@code hbase.pb.SecurityCapabilitiesResponse.Capability} + * repeated .hbase.pb.ServerName server_name = 1; */ - public enum Capability - implements com.google.protobuf.ProtocolMessageEnum { - /** - * SIMPLE_AUTHENTICATION = 0; - */ - SIMPLE_AUTHENTICATION(0, 0), - /** - * SECURE_AUTHENTICATION = 1; - */ - SECURE_AUTHENTICATION(1, 1), - /** - * AUTHORIZATION = 2; - */ - AUTHORIZATION(2, 2), - /** - * CELL_AUTHORIZATION = 3; - */ - CELL_AUTHORIZATION(3, 3), - /** - * CELL_VISIBILITY = 4; - */ - CELL_VISIBILITY(4, 4), - ; - - /** - * SIMPLE_AUTHENTICATION = 0; - */ - public static final int SIMPLE_AUTHENTICATION_VALUE = 0; - /** - * SECURE_AUTHENTICATION = 1; - */ - public static final int SECURE_AUTHENTICATION_VALUE = 1; - /** - * AUTHORIZATION = 2; - */ - public static final int AUTHORIZATION_VALUE = 2; - /** - * CELL_AUTHORIZATION = 3; - */ - public static final int CELL_AUTHORIZATION_VALUE = 3; - /** - * CELL_VISIBILITY = 4; - */ - public static final int CELL_VISIBILITY_VALUE = 4; - - - public final int getNumber() { return value; } - - public static Capability valueOf(int value) { - switch (value) { - case 0: return SIMPLE_AUTHENTICATION; - case 1: return SECURE_AUTHENTICATION; - case 2: return AUTHORIZATION; - case 3: return CELL_AUTHORIZATION; - case 4: return CELL_VISIBILITY; - default: return null; - } - } - - public static com.google.protobuf.Internal.EnumLiteMap - internalGetValueMap() { - return internalValueMap; - } - private static com.google.protobuf.Internal.EnumLiteMap - internalValueMap = - new com.google.protobuf.Internal.EnumLiteMap() { - public Capability findValueByNumber(int number) { - return Capability.valueOf(number); - } - }; - - public final com.google.protobuf.Descriptors.EnumValueDescriptor - getValueDescriptor() { - return getDescriptor().getValues().get(index); - } - public final com.google.protobuf.Descriptors.EnumDescriptor - getDescriptorForType() { - return getDescriptor(); - } - public static final com.google.protobuf.Descriptors.EnumDescriptor - getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.getDescriptor().getEnumTypes().get(0); - } - - private static final Capability[] VALUES = values(); - - public static Capability valueOf( - com.google.protobuf.Descriptors.EnumValueDescriptor desc) { - if (desc.getType() != getDescriptor()) { - throw new java.lang.IllegalArgumentException( - "EnumValueDescriptor is not for this type."); - } - return VALUES[desc.getIndex()]; - } - - private final int index; - private final int value; - - private Capability(int index, int value) { - this.index = index; - this.value = value; - } - - // @@protoc_insertion_point(enum_scope:hbase.pb.SecurityCapabilitiesResponse.Capability) + public java.util.List getServerNameList() { + return serverName_; } - - // repeated .hbase.pb.SecurityCapabilitiesResponse.Capability capabilities = 1; - public static final int CAPABILITIES_FIELD_NUMBER = 1; - private java.util.List capabilities_; /** - * repeated .hbase.pb.SecurityCapabilitiesResponse.Capability capabilities = 1; + * repeated .hbase.pb.ServerName server_name = 1; */ - public java.util.List getCapabilitiesList() { - return capabilities_; + public java.util.List + getServerNameOrBuilderList() { + return serverName_; + } + /** + * repeated .hbase.pb.ServerName server_name = 1; + */ + public int getServerNameCount() { + return serverName_.size(); } /** - * repeated .hbase.pb.SecurityCapabilitiesResponse.Capability capabilities = 1; + * repeated .hbase.pb.ServerName server_name = 1; */ - public int getCapabilitiesCount() { - return capabilities_.size(); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServerName(int index) { + return serverName_.get(index); } /** - * repeated .hbase.pb.SecurityCapabilitiesResponse.Capability capabilities = 1; + * repeated .hbase.pb.ServerName server_name = 1; */ - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.Capability getCapabilities(int index) { - return capabilities_.get(index); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerNameOrBuilder( + int index) { + return serverName_.get(index); } private void initFields() { - capabilities_ = java.util.Collections.emptyList(); + serverName_ = java.util.Collections.emptyList(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; + for (int i = 0; i < getServerNameCount(); i++) { + if (!getServerName(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } memoizedIsInitialized = 1; return true; } @@ -62282,8 +62408,8 @@ public final boolean isInitialized() { public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); - for (int i = 0; i < capabilities_.size(); i++) { - output.writeEnum(1, capabilities_.get(i).getNumber()); + for (int i = 0; i < serverName_.size(); i++) { + output.writeMessage(1, serverName_.get(i)); } getUnknownFields().writeTo(output); } @@ -62294,14 +62420,9 @@ public int getSerializedSize() { if (size != -1) return size; size = 0; - { - int dataSize = 0; - for (int i = 0; i < capabilities_.size(); i++) { - dataSize += com.google.protobuf.CodedOutputStream - .computeEnumSizeNoTag(capabilities_.get(i).getNumber()); - } - size += dataSize; - size += 1 * capabilities_.size(); + for (int i = 0; i < serverName_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, serverName_.get(i)); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; @@ -62320,14 +62441,14 @@ public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse)) { + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse)) { return super.equals(obj); } - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse) obj; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse) obj; boolean result = true; - result = result && getCapabilitiesList() - .equals(other.getCapabilitiesList()); + result = result && getServerNameList() + .equals(other.getServerNameList()); result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -62341,62 +62462,62 @@ public int hashCode() { } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); - if (getCapabilitiesCount() > 0) { - hash = (37 * hash) + CAPABILITIES_FIELD_NUMBER; - hash = (53 * hash) + hashEnumList(getCapabilitiesList()); + if (getServerNameCount() > 0) { + hash = (37 * hash) + SERVER_NAME_FIELD_NUMBER; + hash = (53 * hash) + getServerNameList().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse parseFrom(byte[] data) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse parseFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse parseDelimitedFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse parseDelimitedFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -62405,7 +62526,7 @@ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCa public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse prototype) { + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @@ -62417,24 +62538,24 @@ protected Builder newBuilderForType( return builder; } /** - * Protobuf type {@code hbase.pb.SecurityCapabilitiesResponse} + * Protobuf type {@code hbase.pb.ClearDeadServersResponse} */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponseOrBuilder { + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponseOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SecurityCapabilitiesResponse_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ClearDeadServersResponse_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SecurityCapabilitiesResponse_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ClearDeadServersResponse_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse.Builder.class); } - // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.newBuilder() + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -62446,6 +62567,7 @@ private Builder( } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getServerNameFieldBuilder(); } } private static Builder create() { @@ -62454,8 +62576,12 @@ private static Builder create() { public Builder clear() { super.clear(); - capabilities_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000001); + if (serverNameBuilder_ == null) { + serverName_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + } else { + serverNameBuilder_.clear(); + } return this; } @@ -62465,211 +62591,400 @@ public Builder clone() { public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SecurityCapabilitiesResponse_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ClearDeadServersResponse_descriptor; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.getDefaultInstance(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse.getDefaultInstance(); } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse build() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse result = buildPartial(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse(this); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse(this); int from_bitField0_ = bitField0_; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - capabilities_ = java.util.Collections.unmodifiableList(capabilities_); - bitField0_ = (bitField0_ & ~0x00000001); + if (serverNameBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001)) { + serverName_ = java.util.Collections.unmodifiableList(serverName_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.serverName_ = serverName_; + } else { + result.serverName_ = serverNameBuilder_.build(); } - result.capabilities_ = capabilities_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse)other); + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse.getDefaultInstance()) return this; + if (serverNameBuilder_ == null) { + if (!other.serverName_.isEmpty()) { + if (serverName_.isEmpty()) { + serverName_ = other.serverName_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureServerNameIsMutable(); + serverName_.addAll(other.serverName_); + } + onChanged(); + } + } else { + if (!other.serverName_.isEmpty()) { + if (serverNameBuilder_.isEmpty()) { + serverNameBuilder_.dispose(); + serverNameBuilder_ = null; + serverName_ = other.serverName_; + bitField0_ = (bitField0_ & ~0x00000001); + serverNameBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getServerNameFieldBuilder() : null; + } else { + serverNameBuilder_.addAllMessages(other.serverName_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + for (int i = 0; i < getServerNameCount(); i++) { + if (!getServerName(i).isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // repeated .hbase.pb.ServerName server_name = 1; + private java.util.List serverName_ = + java.util.Collections.emptyList(); + private void ensureServerNameIsMutable() { + if (!((bitField0_ & 0x00000001) == 0x00000001)) { + serverName_ = new java.util.ArrayList(serverName_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> serverNameBuilder_; + + /** + * repeated .hbase.pb.ServerName server_name = 1; + */ + public java.util.List getServerNameList() { + if (serverNameBuilder_ == null) { + return java.util.Collections.unmodifiableList(serverName_); + } else { + return serverNameBuilder_.getMessageList(); + } + } + /** + * repeated .hbase.pb.ServerName server_name = 1; + */ + public int getServerNameCount() { + if (serverNameBuilder_ == null) { + return serverName_.size(); + } else { + return serverNameBuilder_.getCount(); + } + } + /** + * repeated .hbase.pb.ServerName server_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServerName(int index) { + if (serverNameBuilder_ == null) { + return serverName_.get(index); + } else { + return serverNameBuilder_.getMessage(index); + } + } + /** + * repeated .hbase.pb.ServerName server_name = 1; + */ + public Builder setServerName( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) { + if (serverNameBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureServerNameIsMutable(); + serverName_.set(index, value); + onChanged(); + } else { + serverNameBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.ServerName server_name = 1; + */ + public Builder setServerName( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) { + if (serverNameBuilder_ == null) { + ensureServerNameIsMutable(); + serverName_.set(index, builderForValue.build()); + onChanged(); } else { - super.mergeFrom(other); - return this; + serverNameBuilder_.setMessage(index, builderForValue.build()); } + return this; } - - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.getDefaultInstance()) return this; - if (!other.capabilities_.isEmpty()) { - if (capabilities_.isEmpty()) { - capabilities_ = other.capabilities_; - bitField0_ = (bitField0_ & ~0x00000001); - } else { - ensureCapabilitiesIsMutable(); - capabilities_.addAll(other.capabilities_); + /** + * repeated .hbase.pb.ServerName server_name = 1; + */ + public Builder addServerName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) { + if (serverNameBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); } + ensureServerNameIsMutable(); + serverName_.add(value); onChanged(); + } else { + serverNameBuilder_.addMessage(value); } - this.mergeUnknownFields(other.getUnknownFields()); return this; } - - public final boolean isInitialized() { - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse) e.getUnfinishedMessage(); - throw e; - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); + /** + * repeated .hbase.pb.ServerName server_name = 1; + */ + public Builder addServerName( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) { + if (serverNameBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); } + ensureServerNameIsMutable(); + serverName_.add(index, value); + onChanged(); + } else { + serverNameBuilder_.addMessage(index, value); } return this; } - private int bitField0_; - - // repeated .hbase.pb.SecurityCapabilitiesResponse.Capability capabilities = 1; - private java.util.List capabilities_ = - java.util.Collections.emptyList(); - private void ensureCapabilitiesIsMutable() { - if (!((bitField0_ & 0x00000001) == 0x00000001)) { - capabilities_ = new java.util.ArrayList(capabilities_); - bitField0_ |= 0x00000001; + /** + * repeated .hbase.pb.ServerName server_name = 1; + */ + public Builder addServerName( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) { + if (serverNameBuilder_ == null) { + ensureServerNameIsMutable(); + serverName_.add(builderForValue.build()); + onChanged(); + } else { + serverNameBuilder_.addMessage(builderForValue.build()); } + return this; } /** - * repeated .hbase.pb.SecurityCapabilitiesResponse.Capability capabilities = 1; + * repeated .hbase.pb.ServerName server_name = 1; */ - public java.util.List getCapabilitiesList() { - return java.util.Collections.unmodifiableList(capabilities_); + public Builder addServerName( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) { + if (serverNameBuilder_ == null) { + ensureServerNameIsMutable(); + serverName_.add(index, builderForValue.build()); + onChanged(); + } else { + serverNameBuilder_.addMessage(index, builderForValue.build()); + } + return this; } /** - * repeated .hbase.pb.SecurityCapabilitiesResponse.Capability capabilities = 1; + * repeated .hbase.pb.ServerName server_name = 1; */ - public int getCapabilitiesCount() { - return capabilities_.size(); + public Builder addAllServerName( + java.lang.Iterable values) { + if (serverNameBuilder_ == null) { + ensureServerNameIsMutable(); + super.addAll(values, serverName_); + onChanged(); + } else { + serverNameBuilder_.addAllMessages(values); + } + return this; } /** - * repeated .hbase.pb.SecurityCapabilitiesResponse.Capability capabilities = 1; + * repeated .hbase.pb.ServerName server_name = 1; */ - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.Capability getCapabilities(int index) { - return capabilities_.get(index); + public Builder clearServerName() { + if (serverNameBuilder_ == null) { + serverName_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + serverNameBuilder_.clear(); + } + return this; } /** - * repeated .hbase.pb.SecurityCapabilitiesResponse.Capability capabilities = 1; + * repeated .hbase.pb.ServerName server_name = 1; */ - public Builder setCapabilities( - int index, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.Capability value) { - if (value == null) { - throw new NullPointerException(); + public Builder removeServerName(int index) { + if (serverNameBuilder_ == null) { + ensureServerNameIsMutable(); + serverName_.remove(index); + onChanged(); + } else { + serverNameBuilder_.remove(index); } - ensureCapabilitiesIsMutable(); - capabilities_.set(index, value); - onChanged(); return this; } /** - * repeated .hbase.pb.SecurityCapabilitiesResponse.Capability capabilities = 1; + * repeated .hbase.pb.ServerName server_name = 1; */ - public Builder addCapabilities(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.Capability value) { - if (value == null) { - throw new NullPointerException(); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder getServerNameBuilder( + int index) { + return getServerNameFieldBuilder().getBuilder(index); + } + /** + * repeated .hbase.pb.ServerName server_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerNameOrBuilder( + int index) { + if (serverNameBuilder_ == null) { + return serverName_.get(index); } else { + return serverNameBuilder_.getMessageOrBuilder(index); } - ensureCapabilitiesIsMutable(); - capabilities_.add(value); - onChanged(); - return this; } /** - * repeated .hbase.pb.SecurityCapabilitiesResponse.Capability capabilities = 1; + * repeated .hbase.pb.ServerName server_name = 1; */ - public Builder addAllCapabilities( - java.lang.Iterable values) { - ensureCapabilitiesIsMutable(); - super.addAll(values, capabilities_); - onChanged(); - return this; + public java.util.List + getServerNameOrBuilderList() { + if (serverNameBuilder_ != null) { + return serverNameBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(serverName_); + } } /** - * repeated .hbase.pb.SecurityCapabilitiesResponse.Capability capabilities = 1; + * repeated .hbase.pb.ServerName server_name = 1; */ - public Builder clearCapabilities() { - capabilities_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000001); - onChanged(); - return this; + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder addServerNameBuilder() { + return getServerNameFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance()); + } + /** + * repeated .hbase.pb.ServerName server_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder addServerNameBuilder( + int index) { + return getServerNameFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance()); + } + /** + * repeated .hbase.pb.ServerName server_name = 1; + */ + public java.util.List + getServerNameBuilderList() { + return getServerNameFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> + getServerNameFieldBuilder() { + if (serverNameBuilder_ == null) { + serverNameBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>( + serverName_, + ((bitField0_ & 0x00000001) == 0x00000001), + getParentForChildren(), + isClean()); + serverName_ = null; + } + return serverNameBuilder_; } - // @@protoc_insertion_point(builder_scope:hbase.pb.SecurityCapabilitiesResponse) + // @@protoc_insertion_point(builder_scope:hbase.pb.ClearDeadServersResponse) } static { - defaultInstance = new SecurityCapabilitiesResponse(true); + defaultInstance = new ClearDeadServersResponse(true); defaultInstance.initFields(); } - // @@protoc_insertion_point(class_scope:hbase.pb.SecurityCapabilitiesResponse) + // @@protoc_insertion_point(class_scope:hbase.pb.ClearDeadServersResponse) } - public interface ClearDeadServersRequestOrBuilder + public interface SetSnapshotCleanupRequestOrBuilder extends com.google.protobuf.MessageOrBuilder { - // repeated .hbase.pb.ServerName server_name = 1; - /** - * repeated .hbase.pb.ServerName server_name = 1; - */ - java.util.List - getServerNameList(); + // required bool enabled = 1; /** - * repeated .hbase.pb.ServerName server_name = 1; + * required bool enabled = 1; */ - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServerName(int index); + boolean hasEnabled(); /** - * repeated .hbase.pb.ServerName server_name = 1; + * required bool enabled = 1; */ - int getServerNameCount(); + boolean getEnabled(); + + // optional bool synchronous = 2; /** - * repeated .hbase.pb.ServerName server_name = 1; + * optional bool synchronous = 2; */ - java.util.List - getServerNameOrBuilderList(); + boolean hasSynchronous(); /** - * repeated .hbase.pb.ServerName server_name = 1; + * optional bool synchronous = 2; */ - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerNameOrBuilder( - int index); + boolean getSynchronous(); } /** - * Protobuf type {@code hbase.pb.ClearDeadServersRequest} + * Protobuf type {@code hbase.pb.SetSnapshotCleanupRequest} */ - public static final class ClearDeadServersRequest extends - com.google.protobuf.GeneratedMessage - implements ClearDeadServersRequestOrBuilder { - // Use ClearDeadServersRequest.newBuilder() to construct. - private ClearDeadServersRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + public static final class SetSnapshotCleanupRequest extends + com.google.protobuf.GeneratedMessage + implements SetSnapshotCleanupRequestOrBuilder { + // Use SetSnapshotCleanupRequest.newBuilder() to construct. + private SetSnapshotCleanupRequest(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } - private ClearDeadServersRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + private SetSnapshotCleanupRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - private static final ClearDeadServersRequest defaultInstance; - public static ClearDeadServersRequest getDefaultInstance() { + private static final SetSnapshotCleanupRequest defaultInstance; + public static SetSnapshotCleanupRequest getDefaultInstance() { return defaultInstance; } - public ClearDeadServersRequest getDefaultInstanceForType() { + public SetSnapshotCleanupRequest getDefaultInstanceForType() { return defaultInstance; } @@ -62679,7 +62994,7 @@ public ClearDeadServersRequest getDefaultInstanceForType() { getUnknownFields() { return this.unknownFields; } - private ClearDeadServersRequest( + private SetSnapshotCleanupRequest( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { @@ -62702,12 +63017,14 @@ private ClearDeadServersRequest( } break; } - case 10: { - if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { - serverName_ = new java.util.ArrayList(); - mutable_bitField0_ |= 0x00000001; - } - serverName_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.PARSER, extensionRegistry)); + case 8: { + bitField0_ |= 0x00000001; + enabled_ = input.readBool(); + break; + } + case 16: { + bitField0_ |= 0x00000002; + synchronous_ = input.readBool(); break; } } @@ -62718,89 +63035,82 @@ private ClearDeadServersRequest( throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { - if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { - serverName_ = java.util.Collections.unmodifiableList(serverName_); - } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ClearDeadServersRequest_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SetSnapshotCleanupRequest_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ClearDeadServersRequest_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SetSnapshotCleanupRequest_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest.Builder.class); } - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public ClearDeadServersRequest parsePartialFrom( + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public SetSnapshotCleanupRequest parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - return new ClearDeadServersRequest(input, extensionRegistry); + return new SetSnapshotCleanupRequest(input, extensionRegistry); } }; @java.lang.Override - public com.google.protobuf.Parser getParserForType() { + public com.google.protobuf.Parser getParserForType() { return PARSER; } - // repeated .hbase.pb.ServerName server_name = 1; - public static final int SERVER_NAME_FIELD_NUMBER = 1; - private java.util.List serverName_; - /** - * repeated .hbase.pb.ServerName server_name = 1; - */ - public java.util.List getServerNameList() { - return serverName_; - } + private int bitField0_; + // required bool enabled = 1; + public static final int ENABLED_FIELD_NUMBER = 1; + private boolean enabled_; /** - * repeated .hbase.pb.ServerName server_name = 1; + * required bool enabled = 1; */ - public java.util.List - getServerNameOrBuilderList() { - return serverName_; + public boolean hasEnabled() { + return ((bitField0_ & 0x00000001) == 0x00000001); } /** - * repeated .hbase.pb.ServerName server_name = 1; + * required bool enabled = 1; */ - public int getServerNameCount() { - return serverName_.size(); + public boolean getEnabled() { + return enabled_; } + + // optional bool synchronous = 2; + public static final int SYNCHRONOUS_FIELD_NUMBER = 2; + private boolean synchronous_; /** - * repeated .hbase.pb.ServerName server_name = 1; + * optional bool synchronous = 2; */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServerName(int index) { - return serverName_.get(index); + public boolean hasSynchronous() { + return ((bitField0_ & 0x00000002) == 0x00000002); } /** - * repeated .hbase.pb.ServerName server_name = 1; + * optional bool synchronous = 2; */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerNameOrBuilder( - int index) { - return serverName_.get(index); + public boolean getSynchronous() { + return synchronous_; } private void initFields() { - serverName_ = java.util.Collections.emptyList(); + enabled_ = false; + synchronous_ = false; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; - for (int i = 0; i < getServerNameCount(); i++) { - if (!getServerName(i).isInitialized()) { - memoizedIsInitialized = 0; - return false; - } + if (!hasEnabled()) { + memoizedIsInitialized = 0; + return false; } memoizedIsInitialized = 1; return true; @@ -62809,8 +63119,11 @@ public final boolean isInitialized() { public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); - for (int i = 0; i < serverName_.size(); i++) { - output.writeMessage(1, serverName_.get(i)); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBool(1, enabled_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeBool(2, synchronous_); } getUnknownFields().writeTo(output); } @@ -62821,9 +63134,13 @@ public int getSerializedSize() { if (size != -1) return size; size = 0; - for (int i = 0; i < serverName_.size(); i++) { + if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, serverName_.get(i)); + .computeBoolSize(1, enabled_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(2, synchronous_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; @@ -62842,14 +63159,22 @@ public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest)) { + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest)) { return super.equals(obj); } - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest) obj; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest) obj; boolean result = true; - result = result && getServerNameList() - .equals(other.getServerNameList()); + result = result && (hasEnabled() == other.hasEnabled()); + if (hasEnabled()) { + result = result && (getEnabled() + == other.getEnabled()); + } + result = result && (hasSynchronous() == other.hasSynchronous()); + if (hasSynchronous()) { + result = result && (getSynchronous() + == other.getSynchronous()); + } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -62863,62 +63188,66 @@ public int hashCode() { } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); - if (getServerNameCount() > 0) { - hash = (37 * hash) + SERVER_NAME_FIELD_NUMBER; - hash = (53 * hash) + getServerNameList().hashCode(); + if (hasEnabled()) { + hash = (37 * hash) + ENABLED_FIELD_NUMBER; + hash = (53 * hash) + hashBoolean(getEnabled()); + } + if (hasSynchronous()) { + hash = (37 * hash) + SYNCHRONOUS_FIELD_NUMBER; + hash = (53 * hash) + hashBoolean(getSynchronous()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest parseFrom(byte[] data) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest parseFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest parseDelimitedFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest parseDelimitedFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -62927,7 +63256,7 @@ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadS public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest prototype) { + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @@ -62939,24 +63268,24 @@ protected Builder newBuilderForType( return builder; } /** - * Protobuf type {@code hbase.pb.ClearDeadServersRequest} + * Protobuf type {@code hbase.pb.SetSnapshotCleanupRequest} */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequestOrBuilder { + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequestOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ClearDeadServersRequest_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SetSnapshotCleanupRequest_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ClearDeadServersRequest_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SetSnapshotCleanupRequest_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest.Builder.class); } - // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest.newBuilder() + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -62968,7 +63297,6 @@ private Builder( } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getServerNameFieldBuilder(); } } private static Builder create() { @@ -62977,12 +63305,10 @@ private static Builder create() { public Builder clear() { super.clear(); - if (serverNameBuilder_ == null) { - serverName_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000001); - } else { - serverNameBuilder_.clear(); - } + enabled_ = false; + bitField0_ = (bitField0_ & ~0x00000001); + synchronous_ = false; + bitField0_ = (bitField0_ & ~0x00000002); return this; } @@ -62992,84 +63318,63 @@ public Builder clone() { public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ClearDeadServersRequest_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SetSnapshotCleanupRequest_descriptor; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest.getDefaultInstance(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest.getDefaultInstance(); } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest build() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest result = buildPartial(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest(this); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest(this); int from_bitField0_ = bitField0_; - if (serverNameBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001)) { - serverName_ = java.util.Collections.unmodifiableList(serverName_); - bitField0_ = (bitField0_ & ~0x00000001); - } - result.serverName_ = serverName_; - } else { - result.serverName_ = serverNameBuilder_.build(); + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; } - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest)other); - } else { - super.mergeFrom(other); - return this; + result.enabled_ = enabled_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; } - } - - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest.getDefaultInstance()) return this; - if (serverNameBuilder_ == null) { - if (!other.serverName_.isEmpty()) { - if (serverName_.isEmpty()) { - serverName_ = other.serverName_; - bitField0_ = (bitField0_ & ~0x00000001); - } else { - ensureServerNameIsMutable(); - serverName_.addAll(other.serverName_); - } - onChanged(); - } - } else { - if (!other.serverName_.isEmpty()) { - if (serverNameBuilder_.isEmpty()) { - serverNameBuilder_.dispose(); - serverNameBuilder_ = null; - serverName_ = other.serverName_; - bitField0_ = (bitField0_ & ~0x00000001); - serverNameBuilder_ = - com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? - getServerNameFieldBuilder() : null; - } else { - serverNameBuilder_.addAllMessages(other.serverName_); - } - } + result.synchronous_ = synchronous_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest.getDefaultInstance()) return this; + if (other.hasEnabled()) { + setEnabled(other.getEnabled()); + } + if (other.hasSynchronous()) { + setSynchronous(other.getSynchronous()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { - for (int i = 0; i < getServerNameCount(); i++) { - if (!getServerName(i).isInitialized()) { - - return false; - } + if (!hasEnabled()) { + + return false; } return true; } @@ -63078,11 +63383,11 @@ public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest parsedMessage = null; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest) e.getUnfinishedMessage(); + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { @@ -63093,304 +63398,115 @@ public Builder mergeFrom( } private int bitField0_; - // repeated .hbase.pb.ServerName server_name = 1; - private java.util.List serverName_ = - java.util.Collections.emptyList(); - private void ensureServerNameIsMutable() { - if (!((bitField0_ & 0x00000001) == 0x00000001)) { - serverName_ = new java.util.ArrayList(serverName_); - bitField0_ |= 0x00000001; - } - } - - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> serverNameBuilder_; - - /** - * repeated .hbase.pb.ServerName server_name = 1; - */ - public java.util.List getServerNameList() { - if (serverNameBuilder_ == null) { - return java.util.Collections.unmodifiableList(serverName_); - } else { - return serverNameBuilder_.getMessageList(); - } - } - /** - * repeated .hbase.pb.ServerName server_name = 1; - */ - public int getServerNameCount() { - if (serverNameBuilder_ == null) { - return serverName_.size(); - } else { - return serverNameBuilder_.getCount(); - } - } - /** - * repeated .hbase.pb.ServerName server_name = 1; - */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServerName(int index) { - if (serverNameBuilder_ == null) { - return serverName_.get(index); - } else { - return serverNameBuilder_.getMessage(index); - } - } - /** - * repeated .hbase.pb.ServerName server_name = 1; - */ - public Builder setServerName( - int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) { - if (serverNameBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureServerNameIsMutable(); - serverName_.set(index, value); - onChanged(); - } else { - serverNameBuilder_.setMessage(index, value); - } - return this; - } - /** - * repeated .hbase.pb.ServerName server_name = 1; - */ - public Builder setServerName( - int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) { - if (serverNameBuilder_ == null) { - ensureServerNameIsMutable(); - serverName_.set(index, builderForValue.build()); - onChanged(); - } else { - serverNameBuilder_.setMessage(index, builderForValue.build()); - } - return this; - } - /** - * repeated .hbase.pb.ServerName server_name = 1; - */ - public Builder addServerName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) { - if (serverNameBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureServerNameIsMutable(); - serverName_.add(value); - onChanged(); - } else { - serverNameBuilder_.addMessage(value); - } - return this; - } - /** - * repeated .hbase.pb.ServerName server_name = 1; - */ - public Builder addServerName( - int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) { - if (serverNameBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureServerNameIsMutable(); - serverName_.add(index, value); - onChanged(); - } else { - serverNameBuilder_.addMessage(index, value); - } - return this; - } - /** - * repeated .hbase.pb.ServerName server_name = 1; - */ - public Builder addServerName( - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) { - if (serverNameBuilder_ == null) { - ensureServerNameIsMutable(); - serverName_.add(builderForValue.build()); - onChanged(); - } else { - serverNameBuilder_.addMessage(builderForValue.build()); - } - return this; - } + // required bool enabled = 1; + private boolean enabled_ ; /** - * repeated .hbase.pb.ServerName server_name = 1; + * required bool enabled = 1; */ - public Builder addServerName( - int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) { - if (serverNameBuilder_ == null) { - ensureServerNameIsMutable(); - serverName_.add(index, builderForValue.build()); - onChanged(); - } else { - serverNameBuilder_.addMessage(index, builderForValue.build()); - } - return this; + public boolean hasEnabled() { + return ((bitField0_ & 0x00000001) == 0x00000001); } /** - * repeated .hbase.pb.ServerName server_name = 1; + * required bool enabled = 1; */ - public Builder addAllServerName( - java.lang.Iterable values) { - if (serverNameBuilder_ == null) { - ensureServerNameIsMutable(); - super.addAll(values, serverName_); - onChanged(); - } else { - serverNameBuilder_.addAllMessages(values); - } - return this; + public boolean getEnabled() { + return enabled_; } /** - * repeated .hbase.pb.ServerName server_name = 1; + * required bool enabled = 1; */ - public Builder clearServerName() { - if (serverNameBuilder_ == null) { - serverName_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000001); - onChanged(); - } else { - serverNameBuilder_.clear(); - } + public Builder setEnabled(boolean value) { + bitField0_ |= 0x00000001; + enabled_ = value; + onChanged(); return this; } /** - * repeated .hbase.pb.ServerName server_name = 1; + * required bool enabled = 1; */ - public Builder removeServerName(int index) { - if (serverNameBuilder_ == null) { - ensureServerNameIsMutable(); - serverName_.remove(index); - onChanged(); - } else { - serverNameBuilder_.remove(index); - } + public Builder clearEnabled() { + bitField0_ = (bitField0_ & ~0x00000001); + enabled_ = false; + onChanged(); return this; } + + // optional bool synchronous = 2; + private boolean synchronous_ ; /** - * repeated .hbase.pb.ServerName server_name = 1; - */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder getServerNameBuilder( - int index) { - return getServerNameFieldBuilder().getBuilder(index); - } - /** - * repeated .hbase.pb.ServerName server_name = 1; - */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerNameOrBuilder( - int index) { - if (serverNameBuilder_ == null) { - return serverName_.get(index); } else { - return serverNameBuilder_.getMessageOrBuilder(index); - } - } - /** - * repeated .hbase.pb.ServerName server_name = 1; - */ - public java.util.List - getServerNameOrBuilderList() { - if (serverNameBuilder_ != null) { - return serverNameBuilder_.getMessageOrBuilderList(); - } else { - return java.util.Collections.unmodifiableList(serverName_); - } - } - /** - * repeated .hbase.pb.ServerName server_name = 1; + * optional bool synchronous = 2; */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder addServerNameBuilder() { - return getServerNameFieldBuilder().addBuilder( - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance()); + public boolean hasSynchronous() { + return ((bitField0_ & 0x00000002) == 0x00000002); } /** - * repeated .hbase.pb.ServerName server_name = 1; + * optional bool synchronous = 2; */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder addServerNameBuilder( - int index) { - return getServerNameFieldBuilder().addBuilder( - index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance()); + public boolean getSynchronous() { + return synchronous_; } /** - * repeated .hbase.pb.ServerName server_name = 1; + * optional bool synchronous = 2; */ - public java.util.List - getServerNameBuilderList() { - return getServerNameFieldBuilder().getBuilderList(); + public Builder setSynchronous(boolean value) { + bitField0_ |= 0x00000002; + synchronous_ = value; + onChanged(); + return this; } - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> - getServerNameFieldBuilder() { - if (serverNameBuilder_ == null) { - serverNameBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>( - serverName_, - ((bitField0_ & 0x00000001) == 0x00000001), - getParentForChildren(), - isClean()); - serverName_ = null; - } - return serverNameBuilder_; + /** + * optional bool synchronous = 2; + */ + public Builder clearSynchronous() { + bitField0_ = (bitField0_ & ~0x00000002); + synchronous_ = false; + onChanged(); + return this; } - // @@protoc_insertion_point(builder_scope:hbase.pb.ClearDeadServersRequest) + // @@protoc_insertion_point(builder_scope:hbase.pb.SetSnapshotCleanupRequest) } static { - defaultInstance = new ClearDeadServersRequest(true); + defaultInstance = new SetSnapshotCleanupRequest(true); defaultInstance.initFields(); } - // @@protoc_insertion_point(class_scope:hbase.pb.ClearDeadServersRequest) + // @@protoc_insertion_point(class_scope:hbase.pb.SetSnapshotCleanupRequest) } - public interface ClearDeadServersResponseOrBuilder + public interface SetSnapshotCleanupResponseOrBuilder extends com.google.protobuf.MessageOrBuilder { - // repeated .hbase.pb.ServerName server_name = 1; - /** - * repeated .hbase.pb.ServerName server_name = 1; - */ - java.util.List - getServerNameList(); - /** - * repeated .hbase.pb.ServerName server_name = 1; - */ - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServerName(int index); - /** - * repeated .hbase.pb.ServerName server_name = 1; - */ - int getServerNameCount(); + // required bool prev_snapshot_cleanup = 1; /** - * repeated .hbase.pb.ServerName server_name = 1; + * required bool prev_snapshot_cleanup = 1; */ - java.util.List - getServerNameOrBuilderList(); + boolean hasPrevSnapshotCleanup(); /** - * repeated .hbase.pb.ServerName server_name = 1; + * required bool prev_snapshot_cleanup = 1; */ - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerNameOrBuilder( - int index); + boolean getPrevSnapshotCleanup(); } /** - * Protobuf type {@code hbase.pb.ClearDeadServersResponse} + * Protobuf type {@code hbase.pb.SetSnapshotCleanupResponse} */ - public static final class ClearDeadServersResponse extends + public static final class SetSnapshotCleanupResponse extends com.google.protobuf.GeneratedMessage - implements ClearDeadServersResponseOrBuilder { - // Use ClearDeadServersResponse.newBuilder() to construct. - private ClearDeadServersResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + implements SetSnapshotCleanupResponseOrBuilder { + // Use SetSnapshotCleanupResponse.newBuilder() to construct. + private SetSnapshotCleanupResponse(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } - private ClearDeadServersResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + private SetSnapshotCleanupResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - private static final ClearDeadServersResponse defaultInstance; - public static ClearDeadServersResponse getDefaultInstance() { + private static final SetSnapshotCleanupResponse defaultInstance; + public static SetSnapshotCleanupResponse getDefaultInstance() { return defaultInstance; } - public ClearDeadServersResponse getDefaultInstanceForType() { + public SetSnapshotCleanupResponse getDefaultInstanceForType() { return defaultInstance; } @@ -63400,7 +63516,7 @@ public ClearDeadServersResponse getDefaultInstanceForType() { getUnknownFields() { return this.unknownFields; } - private ClearDeadServersResponse( + private SetSnapshotCleanupResponse( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { @@ -63423,12 +63539,9 @@ private ClearDeadServersResponse( } break; } - case 10: { - if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { - serverName_ = new java.util.ArrayList(); - mutable_bitField0_ |= 0x00000001; - } - serverName_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.PARSER, extensionRegistry)); + case 8: { + bitField0_ |= 0x00000001; + prevSnapshotCleanup_ = input.readBool(); break; } } @@ -63439,89 +63552,65 @@ private ClearDeadServersResponse( throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { - if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { - serverName_ = java.util.Collections.unmodifiableList(serverName_); - } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ClearDeadServersResponse_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SetSnapshotCleanupResponse_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ClearDeadServersResponse_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SetSnapshotCleanupResponse_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse.Builder.class); } - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public ClearDeadServersResponse parsePartialFrom( + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public SetSnapshotCleanupResponse parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - return new ClearDeadServersResponse(input, extensionRegistry); + return new SetSnapshotCleanupResponse(input, extensionRegistry); } }; @java.lang.Override - public com.google.protobuf.Parser getParserForType() { + public com.google.protobuf.Parser getParserForType() { return PARSER; } - // repeated .hbase.pb.ServerName server_name = 1; - public static final int SERVER_NAME_FIELD_NUMBER = 1; - private java.util.List serverName_; - /** - * repeated .hbase.pb.ServerName server_name = 1; - */ - public java.util.List getServerNameList() { - return serverName_; - } - /** - * repeated .hbase.pb.ServerName server_name = 1; - */ - public java.util.List - getServerNameOrBuilderList() { - return serverName_; - } - /** - * repeated .hbase.pb.ServerName server_name = 1; - */ - public int getServerNameCount() { - return serverName_.size(); - } + private int bitField0_; + // required bool prev_snapshot_cleanup = 1; + public static final int PREV_SNAPSHOT_CLEANUP_FIELD_NUMBER = 1; + private boolean prevSnapshotCleanup_; /** - * repeated .hbase.pb.ServerName server_name = 1; + * required bool prev_snapshot_cleanup = 1; */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServerName(int index) { - return serverName_.get(index); + public boolean hasPrevSnapshotCleanup() { + return ((bitField0_ & 0x00000001) == 0x00000001); } /** - * repeated .hbase.pb.ServerName server_name = 1; + * required bool prev_snapshot_cleanup = 1; */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerNameOrBuilder( - int index) { - return serverName_.get(index); + public boolean getPrevSnapshotCleanup() { + return prevSnapshotCleanup_; } private void initFields() { - serverName_ = java.util.Collections.emptyList(); + prevSnapshotCleanup_ = false; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; - for (int i = 0; i < getServerNameCount(); i++) { - if (!getServerName(i).isInitialized()) { - memoizedIsInitialized = 0; - return false; - } + if (!hasPrevSnapshotCleanup()) { + memoizedIsInitialized = 0; + return false; } memoizedIsInitialized = 1; return true; @@ -63530,8 +63619,8 @@ public final boolean isInitialized() { public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); - for (int i = 0; i < serverName_.size(); i++) { - output.writeMessage(1, serverName_.get(i)); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBool(1, prevSnapshotCleanup_); } getUnknownFields().writeTo(output); } @@ -63542,9 +63631,9 @@ public int getSerializedSize() { if (size != -1) return size; size = 0; - for (int i = 0; i < serverName_.size(); i++) { + if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, serverName_.get(i)); + .computeBoolSize(1, prevSnapshotCleanup_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; @@ -63563,14 +63652,17 @@ public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse)) { + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse)) { return super.equals(obj); } - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse) obj; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse) obj; boolean result = true; - result = result && getServerNameList() - .equals(other.getServerNameList()); + result = result && (hasPrevSnapshotCleanup() == other.hasPrevSnapshotCleanup()); + if (hasPrevSnapshotCleanup()) { + result = result && (getPrevSnapshotCleanup() + == other.getPrevSnapshotCleanup()); + } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -63584,62 +63676,62 @@ public int hashCode() { } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); - if (getServerNameCount() > 0) { - hash = (37 * hash) + SERVER_NAME_FIELD_NUMBER; - hash = (53 * hash) + getServerNameList().hashCode(); + if (hasPrevSnapshotCleanup()) { + hash = (37 * hash) + PREV_SNAPSHOT_CLEANUP_FIELD_NUMBER; + hash = (53 * hash) + hashBoolean(getPrevSnapshotCleanup()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse parseFrom(byte[] data) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse parseFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse parseDelimitedFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse parseDelimitedFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -63648,7 +63740,7 @@ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadS public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse prototype) { + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @@ -63660,24 +63752,24 @@ protected Builder newBuilderForType( return builder; } /** - * Protobuf type {@code hbase.pb.ClearDeadServersResponse} + * Protobuf type {@code hbase.pb.SetSnapshotCleanupResponse} */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponseOrBuilder { + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponseOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ClearDeadServersResponse_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SetSnapshotCleanupResponse_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ClearDeadServersResponse_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SetSnapshotCleanupResponse_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse.Builder.class); } - // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse.newBuilder() + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -63689,7 +63781,6 @@ private Builder( } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getServerNameFieldBuilder(); } } private static Builder create() { @@ -63698,12 +63789,8 @@ private static Builder create() { public Builder clear() { super.clear(); - if (serverNameBuilder_ == null) { - serverName_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000001); - } else { - serverNameBuilder_.clear(); - } + prevSnapshotCleanup_ = false; + bitField0_ = (bitField0_ & ~0x00000001); return this; } @@ -63713,84 +63800,56 @@ public Builder clone() { public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ClearDeadServersResponse_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SetSnapshotCleanupResponse_descriptor; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse.getDefaultInstance(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse.getDefaultInstance(); } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse build() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse result = buildPartial(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } - return result; - } - - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse(this); - int from_bitField0_ = bitField0_; - if (serverNameBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001)) { - serverName_ = java.util.Collections.unmodifiableList(serverName_); - bitField0_ = (bitField0_ & ~0x00000001); - } - result.serverName_ = serverName_; - } else { - result.serverName_ = serverNameBuilder_.build(); - } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.prevSnapshotCleanup_ = prevSnapshotCleanup_; + result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse)other); + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse)other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse.getDefaultInstance()) return this; - if (serverNameBuilder_ == null) { - if (!other.serverName_.isEmpty()) { - if (serverName_.isEmpty()) { - serverName_ = other.serverName_; - bitField0_ = (bitField0_ & ~0x00000001); - } else { - ensureServerNameIsMutable(); - serverName_.addAll(other.serverName_); - } - onChanged(); - } - } else { - if (!other.serverName_.isEmpty()) { - if (serverNameBuilder_.isEmpty()) { - serverNameBuilder_.dispose(); - serverNameBuilder_ = null; - serverName_ = other.serverName_; - bitField0_ = (bitField0_ & ~0x00000001); - serverNameBuilder_ = - com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? - getServerNameFieldBuilder() : null; - } else { - serverNameBuilder_.addAllMessages(other.serverName_); - } - } + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse.getDefaultInstance()) return this; + if (other.hasPrevSnapshotCleanup()) { + setPrevSnapshotCleanup(other.getPrevSnapshotCleanup()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { - for (int i = 0; i < getServerNameCount(); i++) { - if (!getServerName(i).isInitialized()) { - - return false; - } + if (!hasPrevSnapshotCleanup()) { + + return false; } return true; } @@ -63799,314 +63858,87 @@ public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse parsedMessage = null; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse) e.getUnfinishedMessage(); + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; - } - private int bitField0_; - - // repeated .hbase.pb.ServerName server_name = 1; - private java.util.List serverName_ = - java.util.Collections.emptyList(); - private void ensureServerNameIsMutable() { - if (!((bitField0_ & 0x00000001) == 0x00000001)) { - serverName_ = new java.util.ArrayList(serverName_); - bitField0_ |= 0x00000001; - } - } - - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> serverNameBuilder_; - - /** - * repeated .hbase.pb.ServerName server_name = 1; - */ - public java.util.List getServerNameList() { - if (serverNameBuilder_ == null) { - return java.util.Collections.unmodifiableList(serverName_); - } else { - return serverNameBuilder_.getMessageList(); - } - } - /** - * repeated .hbase.pb.ServerName server_name = 1; - */ - public int getServerNameCount() { - if (serverNameBuilder_ == null) { - return serverName_.size(); - } else { - return serverNameBuilder_.getCount(); - } - } - /** - * repeated .hbase.pb.ServerName server_name = 1; - */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServerName(int index) { - if (serverNameBuilder_ == null) { - return serverName_.get(index); - } else { - return serverNameBuilder_.getMessage(index); - } - } - /** - * repeated .hbase.pb.ServerName server_name = 1; - */ - public Builder setServerName( - int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) { - if (serverNameBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureServerNameIsMutable(); - serverName_.set(index, value); - onChanged(); - } else { - serverNameBuilder_.setMessage(index, value); - } - return this; - } - /** - * repeated .hbase.pb.ServerName server_name = 1; - */ - public Builder setServerName( - int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) { - if (serverNameBuilder_ == null) { - ensureServerNameIsMutable(); - serverName_.set(index, builderForValue.build()); - onChanged(); - } else { - serverNameBuilder_.setMessage(index, builderForValue.build()); - } - return this; - } - /** - * repeated .hbase.pb.ServerName server_name = 1; - */ - public Builder addServerName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) { - if (serverNameBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureServerNameIsMutable(); - serverName_.add(value); - onChanged(); - } else { - serverNameBuilder_.addMessage(value); - } - return this; - } - /** - * repeated .hbase.pb.ServerName server_name = 1; - */ - public Builder addServerName( - int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) { - if (serverNameBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureServerNameIsMutable(); - serverName_.add(index, value); - onChanged(); - } else { - serverNameBuilder_.addMessage(index, value); - } - return this; - } - /** - * repeated .hbase.pb.ServerName server_name = 1; - */ - public Builder addServerName( - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) { - if (serverNameBuilder_ == null) { - ensureServerNameIsMutable(); - serverName_.add(builderForValue.build()); - onChanged(); - } else { - serverNameBuilder_.addMessage(builderForValue.build()); - } - return this; - } - /** - * repeated .hbase.pb.ServerName server_name = 1; - */ - public Builder addServerName( - int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) { - if (serverNameBuilder_ == null) { - ensureServerNameIsMutable(); - serverName_.add(index, builderForValue.build()); - onChanged(); - } else { - serverNameBuilder_.addMessage(index, builderForValue.build()); - } - return this; - } - /** - * repeated .hbase.pb.ServerName server_name = 1; - */ - public Builder addAllServerName( - java.lang.Iterable values) { - if (serverNameBuilder_ == null) { - ensureServerNameIsMutable(); - super.addAll(values, serverName_); - onChanged(); - } else { - serverNameBuilder_.addAllMessages(values); - } - return this; - } - /** - * repeated .hbase.pb.ServerName server_name = 1; - */ - public Builder clearServerName() { - if (serverNameBuilder_ == null) { - serverName_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000001); - onChanged(); - } else { - serverNameBuilder_.clear(); - } - return this; - } - /** - * repeated .hbase.pb.ServerName server_name = 1; - */ - public Builder removeServerName(int index) { - if (serverNameBuilder_ == null) { - ensureServerNameIsMutable(); - serverName_.remove(index); - onChanged(); - } else { - serverNameBuilder_.remove(index); - } - return this; - } - /** - * repeated .hbase.pb.ServerName server_name = 1; - */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder getServerNameBuilder( - int index) { - return getServerNameFieldBuilder().getBuilder(index); - } - /** - * repeated .hbase.pb.ServerName server_name = 1; - */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerNameOrBuilder( - int index) { - if (serverNameBuilder_ == null) { - return serverName_.get(index); } else { - return serverNameBuilder_.getMessageOrBuilder(index); + mergeFrom(parsedMessage); + } } + return this; } + private int bitField0_; + + // required bool prev_snapshot_cleanup = 1; + private boolean prevSnapshotCleanup_ ; /** - * repeated .hbase.pb.ServerName server_name = 1; + * required bool prev_snapshot_cleanup = 1; */ - public java.util.List - getServerNameOrBuilderList() { - if (serverNameBuilder_ != null) { - return serverNameBuilder_.getMessageOrBuilderList(); - } else { - return java.util.Collections.unmodifiableList(serverName_); - } + public boolean hasPrevSnapshotCleanup() { + return ((bitField0_ & 0x00000001) == 0x00000001); } /** - * repeated .hbase.pb.ServerName server_name = 1; + * required bool prev_snapshot_cleanup = 1; */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder addServerNameBuilder() { - return getServerNameFieldBuilder().addBuilder( - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance()); + public boolean getPrevSnapshotCleanup() { + return prevSnapshotCleanup_; } /** - * repeated .hbase.pb.ServerName server_name = 1; + * required bool prev_snapshot_cleanup = 1; */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder addServerNameBuilder( - int index) { - return getServerNameFieldBuilder().addBuilder( - index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance()); + public Builder setPrevSnapshotCleanup(boolean value) { + bitField0_ |= 0x00000001; + prevSnapshotCleanup_ = value; + onChanged(); + return this; } /** - * repeated .hbase.pb.ServerName server_name = 1; + * required bool prev_snapshot_cleanup = 1; */ - public java.util.List - getServerNameBuilderList() { - return getServerNameFieldBuilder().getBuilderList(); - } - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> - getServerNameFieldBuilder() { - if (serverNameBuilder_ == null) { - serverNameBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>( - serverName_, - ((bitField0_ & 0x00000001) == 0x00000001), - getParentForChildren(), - isClean()); - serverName_ = null; - } - return serverNameBuilder_; + public Builder clearPrevSnapshotCleanup() { + bitField0_ = (bitField0_ & ~0x00000001); + prevSnapshotCleanup_ = false; + onChanged(); + return this; } - // @@protoc_insertion_point(builder_scope:hbase.pb.ClearDeadServersResponse) + // @@protoc_insertion_point(builder_scope:hbase.pb.SetSnapshotCleanupResponse) } static { - defaultInstance = new ClearDeadServersResponse(true); + defaultInstance = new SetSnapshotCleanupResponse(true); defaultInstance.initFields(); } - // @@protoc_insertion_point(class_scope:hbase.pb.ClearDeadServersResponse) + // @@protoc_insertion_point(class_scope:hbase.pb.SetSnapshotCleanupResponse) } - public interface SetSnapshotCleanupRequestOrBuilder + public interface IsSnapshotCleanupEnabledRequestOrBuilder extends com.google.protobuf.MessageOrBuilder { - - // required bool enabled = 1; - /** - * required bool enabled = 1; - */ - boolean hasEnabled(); - /** - * required bool enabled = 1; - */ - boolean getEnabled(); - - // optional bool synchronous = 2; - /** - * optional bool synchronous = 2; - */ - boolean hasSynchronous(); - /** - * optional bool synchronous = 2; - */ - boolean getSynchronous(); } /** - * Protobuf type {@code hbase.pb.SetSnapshotCleanupRequest} + * Protobuf type {@code hbase.pb.IsSnapshotCleanupEnabledRequest} */ - public static final class SetSnapshotCleanupRequest extends + public static final class IsSnapshotCleanupEnabledRequest extends com.google.protobuf.GeneratedMessage - implements SetSnapshotCleanupRequestOrBuilder { - // Use SetSnapshotCleanupRequest.newBuilder() to construct. - private SetSnapshotCleanupRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + implements IsSnapshotCleanupEnabledRequestOrBuilder { + // Use IsSnapshotCleanupEnabledRequest.newBuilder() to construct. + private IsSnapshotCleanupEnabledRequest(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } - private SetSnapshotCleanupRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + private IsSnapshotCleanupEnabledRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - private static final SetSnapshotCleanupRequest defaultInstance; - public static SetSnapshotCleanupRequest getDefaultInstance() { + private static final IsSnapshotCleanupEnabledRequest defaultInstance; + public static IsSnapshotCleanupEnabledRequest getDefaultInstance() { return defaultInstance; } - public SetSnapshotCleanupRequest getDefaultInstanceForType() { + public IsSnapshotCleanupEnabledRequest getDefaultInstanceForType() { return defaultInstance; } @@ -64116,12 +63948,11 @@ public SetSnapshotCleanupRequest getDefaultInstanceForType() { getUnknownFields() { return this.unknownFields; } - private SetSnapshotCleanupRequest( + private IsSnapshotCleanupEnabledRequest( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); - int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { @@ -64139,16 +63970,6 @@ private SetSnapshotCleanupRequest( } break; } - case 8: { - bitField0_ |= 0x00000001; - enabled_ = input.readBool(); - break; - } - case 16: { - bitField0_ |= 0x00000002; - synchronous_ = input.readBool(); - break; - } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { @@ -64163,77 +63984,38 @@ private SetSnapshotCleanupRequest( } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SetSnapshotCleanupRequest_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsSnapshotCleanupEnabledRequest_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SetSnapshotCleanupRequest_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsSnapshotCleanupEnabledRequest_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest.Builder.class); } - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public SetSnapshotCleanupRequest parsePartialFrom( + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public IsSnapshotCleanupEnabledRequest parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - return new SetSnapshotCleanupRequest(input, extensionRegistry); + return new IsSnapshotCleanupEnabledRequest(input, extensionRegistry); } }; @java.lang.Override - public com.google.protobuf.Parser getParserForType() { + public com.google.protobuf.Parser getParserForType() { return PARSER; } - private int bitField0_; - // required bool enabled = 1; - public static final int ENABLED_FIELD_NUMBER = 1; - private boolean enabled_; - /** - * required bool enabled = 1; - */ - public boolean hasEnabled() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required bool enabled = 1; - */ - public boolean getEnabled() { - return enabled_; - } - - // optional bool synchronous = 2; - public static final int SYNCHRONOUS_FIELD_NUMBER = 2; - private boolean synchronous_; - /** - * optional bool synchronous = 2; - */ - public boolean hasSynchronous() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * optional bool synchronous = 2; - */ - public boolean getSynchronous() { - return synchronous_; - } - private void initFields() { - enabled_ = false; - synchronous_ = false; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; - if (!hasEnabled()) { - memoizedIsInitialized = 0; - return false; - } memoizedIsInitialized = 1; return true; } @@ -64241,12 +64023,6 @@ public final boolean isInitialized() { public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBool(1, enabled_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeBool(2, synchronous_); - } getUnknownFields().writeTo(output); } @@ -64256,14 +64032,6 @@ public int getSerializedSize() { if (size != -1) return size; size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeBoolSize(1, enabled_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeBoolSize(2, synchronous_); - } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; @@ -64281,22 +64049,12 @@ public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest)) { + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest)) { return super.equals(obj); } - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest) obj; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest) obj; boolean result = true; - result = result && (hasEnabled() == other.hasEnabled()); - if (hasEnabled()) { - result = result && (getEnabled() - == other.getEnabled()); - } - result = result && (hasSynchronous() == other.hasSynchronous()); - if (hasSynchronous()) { - result = result && (getSynchronous() - == other.getSynchronous()); - } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -64310,66 +64068,58 @@ public int hashCode() { } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasEnabled()) { - hash = (37 * hash) + ENABLED_FIELD_NUMBER; - hash = (53 * hash) + hashBoolean(getEnabled()); - } - if (hasSynchronous()) { - hash = (37 * hash) + SYNCHRONOUS_FIELD_NUMBER; - hash = (53 * hash) + hashBoolean(getSynchronous()); - } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest parseFrom(byte[] data) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest parseFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest parseDelimitedFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest parseDelimitedFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -64378,7 +64128,7 @@ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapsho public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest prototype) { + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @@ -64390,24 +64140,24 @@ protected Builder newBuilderForType( return builder; } /** - * Protobuf type {@code hbase.pb.SetSnapshotCleanupRequest} + * Protobuf type {@code hbase.pb.IsSnapshotCleanupEnabledRequest} */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequestOrBuilder { + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequestOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SetSnapshotCleanupRequest_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsSnapshotCleanupEnabledRequest_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SetSnapshotCleanupRequest_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsSnapshotCleanupEnabledRequest_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest.Builder.class); } - // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest.newBuilder() + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -64427,10 +64177,6 @@ private static Builder create() { public Builder clear() { super.clear(); - enabled_ = false; - bitField0_ = (bitField0_ & ~0x00000001); - synchronous_ = false; - bitField0_ = (bitField0_ & ~0x00000002); return this; } @@ -64440,64 +64186,43 @@ public Builder clone() { public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SetSnapshotCleanupRequest_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsSnapshotCleanupEnabledRequest_descriptor; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest.getDefaultInstance(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest.getDefaultInstance(); } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest build() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest result = buildPartial(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.enabled_ = enabled_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.synchronous_ = synchronous_; - result.bitField0_ = to_bitField0_; + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest(this); onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest)other); + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest)other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest.getDefaultInstance()) return this; - if (other.hasEnabled()) { - setEnabled(other.getEnabled()); - } - if (other.hasSynchronous()) { - setSynchronous(other.getSynchronous()); - } + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest.getDefaultInstance()) return this; this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { - if (!hasEnabled()) { - - return false; - } return true; } @@ -64505,11 +64230,11 @@ public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest parsedMessage = null; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest) e.getUnfinishedMessage(); + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { @@ -64518,117 +64243,50 @@ public Builder mergeFrom( } return this; } - private int bitField0_; - - // required bool enabled = 1; - private boolean enabled_ ; - /** - * required bool enabled = 1; - */ - public boolean hasEnabled() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required bool enabled = 1; - */ - public boolean getEnabled() { - return enabled_; - } - /** - * required bool enabled = 1; - */ - public Builder setEnabled(boolean value) { - bitField0_ |= 0x00000001; - enabled_ = value; - onChanged(); - return this; - } - /** - * required bool enabled = 1; - */ - public Builder clearEnabled() { - bitField0_ = (bitField0_ & ~0x00000001); - enabled_ = false; - onChanged(); - return this; - } - - // optional bool synchronous = 2; - private boolean synchronous_ ; - /** - * optional bool synchronous = 2; - */ - public boolean hasSynchronous() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * optional bool synchronous = 2; - */ - public boolean getSynchronous() { - return synchronous_; - } - /** - * optional bool synchronous = 2; - */ - public Builder setSynchronous(boolean value) { - bitField0_ |= 0x00000002; - synchronous_ = value; - onChanged(); - return this; - } - /** - * optional bool synchronous = 2; - */ - public Builder clearSynchronous() { - bitField0_ = (bitField0_ & ~0x00000002); - synchronous_ = false; - onChanged(); - return this; - } - // @@protoc_insertion_point(builder_scope:hbase.pb.SetSnapshotCleanupRequest) + // @@protoc_insertion_point(builder_scope:hbase.pb.IsSnapshotCleanupEnabledRequest) } static { - defaultInstance = new SetSnapshotCleanupRequest(true); + defaultInstance = new IsSnapshotCleanupEnabledRequest(true); defaultInstance.initFields(); } - // @@protoc_insertion_point(class_scope:hbase.pb.SetSnapshotCleanupRequest) + // @@protoc_insertion_point(class_scope:hbase.pb.IsSnapshotCleanupEnabledRequest) } - public interface SetSnapshotCleanupResponseOrBuilder + public interface IsSnapshotCleanupEnabledResponseOrBuilder extends com.google.protobuf.MessageOrBuilder { - // required bool prev_snapshot_cleanup = 1; + // required bool enabled = 1; /** - * required bool prev_snapshot_cleanup = 1; + * required bool enabled = 1; */ - boolean hasPrevSnapshotCleanup(); + boolean hasEnabled(); /** - * required bool prev_snapshot_cleanup = 1; + * required bool enabled = 1; */ - boolean getPrevSnapshotCleanup(); + boolean getEnabled(); } /** - * Protobuf type {@code hbase.pb.SetSnapshotCleanupResponse} + * Protobuf type {@code hbase.pb.IsSnapshotCleanupEnabledResponse} */ - public static final class SetSnapshotCleanupResponse extends + public static final class IsSnapshotCleanupEnabledResponse extends com.google.protobuf.GeneratedMessage - implements SetSnapshotCleanupResponseOrBuilder { - // Use SetSnapshotCleanupResponse.newBuilder() to construct. - private SetSnapshotCleanupResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + implements IsSnapshotCleanupEnabledResponseOrBuilder { + // Use IsSnapshotCleanupEnabledResponse.newBuilder() to construct. + private IsSnapshotCleanupEnabledResponse(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } - private SetSnapshotCleanupResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + private IsSnapshotCleanupEnabledResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - private static final SetSnapshotCleanupResponse defaultInstance; - public static SetSnapshotCleanupResponse getDefaultInstance() { + private static final IsSnapshotCleanupEnabledResponse defaultInstance; + public static IsSnapshotCleanupEnabledResponse getDefaultInstance() { return defaultInstance; } - public SetSnapshotCleanupResponse getDefaultInstanceForType() { + public IsSnapshotCleanupEnabledResponse getDefaultInstanceForType() { return defaultInstance; } @@ -64638,7 +64296,7 @@ public SetSnapshotCleanupResponse getDefaultInstanceForType() { getUnknownFields() { return this.unknownFields; } - private SetSnapshotCleanupResponse( + private IsSnapshotCleanupEnabledResponse( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { @@ -64663,7 +64321,7 @@ private SetSnapshotCleanupResponse( } case 8: { bitField0_ |= 0x00000001; - prevSnapshotCleanup_ = input.readBool(); + enabled_ = input.readBool(); break; } } @@ -64680,57 +64338,57 @@ private SetSnapshotCleanupResponse( } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SetSnapshotCleanupResponse_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsSnapshotCleanupEnabledResponse_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SetSnapshotCleanupResponse_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsSnapshotCleanupEnabledResponse_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse.Builder.class); } - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public SetSnapshotCleanupResponse parsePartialFrom( + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public IsSnapshotCleanupEnabledResponse parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - return new SetSnapshotCleanupResponse(input, extensionRegistry); + return new IsSnapshotCleanupEnabledResponse(input, extensionRegistry); } }; @java.lang.Override - public com.google.protobuf.Parser getParserForType() { + public com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; - // required bool prev_snapshot_cleanup = 1; - public static final int PREV_SNAPSHOT_CLEANUP_FIELD_NUMBER = 1; - private boolean prevSnapshotCleanup_; + // required bool enabled = 1; + public static final int ENABLED_FIELD_NUMBER = 1; + private boolean enabled_; /** - * required bool prev_snapshot_cleanup = 1; + * required bool enabled = 1; */ - public boolean hasPrevSnapshotCleanup() { + public boolean hasEnabled() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** - * required bool prev_snapshot_cleanup = 1; + * required bool enabled = 1; */ - public boolean getPrevSnapshotCleanup() { - return prevSnapshotCleanup_; + public boolean getEnabled() { + return enabled_; } private void initFields() { - prevSnapshotCleanup_ = false; + enabled_ = false; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; - if (!hasPrevSnapshotCleanup()) { + if (!hasEnabled()) { memoizedIsInitialized = 0; return false; } @@ -64742,7 +64400,7 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBool(1, prevSnapshotCleanup_); + output.writeBool(1, enabled_); } getUnknownFields().writeTo(output); } @@ -64755,7 +64413,7 @@ public int getSerializedSize() { size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream - .computeBoolSize(1, prevSnapshotCleanup_); + .computeBoolSize(1, enabled_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; @@ -64774,16 +64432,16 @@ public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse)) { + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse)) { return super.equals(obj); } - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse) obj; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse) obj; boolean result = true; - result = result && (hasPrevSnapshotCleanup() == other.hasPrevSnapshotCleanup()); - if (hasPrevSnapshotCleanup()) { - result = result && (getPrevSnapshotCleanup() - == other.getPrevSnapshotCleanup()); + result = result && (hasEnabled() == other.hasEnabled()); + if (hasEnabled()) { + result = result && (getEnabled() + == other.getEnabled()); } result = result && getUnknownFields().equals(other.getUnknownFields()); @@ -64798,62 +64456,62 @@ public int hashCode() { } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasPrevSnapshotCleanup()) { - hash = (37 * hash) + PREV_SNAPSHOT_CLEANUP_FIELD_NUMBER; - hash = (53 * hash) + hashBoolean(getPrevSnapshotCleanup()); + if (hasEnabled()) { + hash = (37 * hash) + ENABLED_FIELD_NUMBER; + hash = (53 * hash) + hashBoolean(getEnabled()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse parseFrom(byte[] data) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse parseFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse parseDelimitedFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse parseDelimitedFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -64862,7 +64520,7 @@ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapsho public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse prototype) { + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @@ -64874,24 +64532,24 @@ protected Builder newBuilderForType( return builder; } /** - * Protobuf type {@code hbase.pb.SetSnapshotCleanupResponse} + * Protobuf type {@code hbase.pb.IsSnapshotCleanupEnabledResponse} */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponseOrBuilder { + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponseOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SetSnapshotCleanupResponse_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsSnapshotCleanupEnabledResponse_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SetSnapshotCleanupResponse_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsSnapshotCleanupEnabledResponse_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse.Builder.class); } - // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse.newBuilder() + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -64911,7 +64569,7 @@ private static Builder create() { public Builder clear() { super.clear(); - prevSnapshotCleanup_ = false; + enabled_ = false; bitField0_ = (bitField0_ & ~0x00000001); return this; } @@ -64922,54 +64580,54 @@ public Builder clone() { public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SetSnapshotCleanupResponse_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsSnapshotCleanupEnabledResponse_descriptor; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse.getDefaultInstance(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse.getDefaultInstance(); } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse build() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse result = buildPartial(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse(this); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } - result.prevSnapshotCleanup_ = prevSnapshotCleanup_; + result.enabled_ = enabled_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse)other); + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse)other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse.getDefaultInstance()) return this; - if (other.hasPrevSnapshotCleanup()) { - setPrevSnapshotCleanup(other.getPrevSnapshotCleanup()); + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse.getDefaultInstance()) return this; + if (other.hasEnabled()) { + setEnabled(other.getEnabled()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { - if (!hasPrevSnapshotCleanup()) { + if (!hasEnabled()) { return false; } @@ -64980,11 +64638,11 @@ public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse parsedMessage = null; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse) e.getUnfinishedMessage(); + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { @@ -64995,72 +64653,91 @@ public Builder mergeFrom( } private int bitField0_; - // required bool prev_snapshot_cleanup = 1; - private boolean prevSnapshotCleanup_ ; + // required bool enabled = 1; + private boolean enabled_ ; /** - * required bool prev_snapshot_cleanup = 1; + * required bool enabled = 1; */ - public boolean hasPrevSnapshotCleanup() { + public boolean hasEnabled() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** - * required bool prev_snapshot_cleanup = 1; + * required bool enabled = 1; */ - public boolean getPrevSnapshotCleanup() { - return prevSnapshotCleanup_; + public boolean getEnabled() { + return enabled_; } /** - * required bool prev_snapshot_cleanup = 1; + * required bool enabled = 1; */ - public Builder setPrevSnapshotCleanup(boolean value) { + public Builder setEnabled(boolean value) { bitField0_ |= 0x00000001; - prevSnapshotCleanup_ = value; + enabled_ = value; onChanged(); return this; } /** - * required bool prev_snapshot_cleanup = 1; + * required bool enabled = 1; */ - public Builder clearPrevSnapshotCleanup() { + public Builder clearEnabled() { bitField0_ = (bitField0_ & ~0x00000001); - prevSnapshotCleanup_ = false; + enabled_ = false; onChanged(); return this; } - // @@protoc_insertion_point(builder_scope:hbase.pb.SetSnapshotCleanupResponse) + // @@protoc_insertion_point(builder_scope:hbase.pb.IsSnapshotCleanupEnabledResponse) } static { - defaultInstance = new SetSnapshotCleanupResponse(true); + defaultInstance = new IsSnapshotCleanupEnabledResponse(true); defaultInstance.initFields(); } - // @@protoc_insertion_point(class_scope:hbase.pb.SetSnapshotCleanupResponse) + // @@protoc_insertion_point(class_scope:hbase.pb.IsSnapshotCleanupEnabledResponse) } - public interface IsSnapshotCleanupEnabledRequestOrBuilder + public interface BalancerDecisionsRequestOrBuilder extends com.google.protobuf.MessageOrBuilder { + + // optional uint32 limit = 1; + /** + * optional uint32 limit = 1; + */ + boolean hasLimit(); + /** + * optional uint32 limit = 1; + */ + int getLimit(); } /** - * Protobuf type {@code hbase.pb.IsSnapshotCleanupEnabledRequest} + * Protobuf type {@code hbase.pb.BalancerDecisionsRequest} + * + *
+   **
+   * BalancerDecision (LogRequest) use-case specific RPC request. This request payload will be
+   * converted in bytes and sent to generic RPC API: GetLogEntries
+   * LogRequest message has two params:
+   * 1. log_class_name: BalancerDecisionsRequest (for BalancerDecision use-case)
+   * 2. log_message: BalancerDecisionsRequest converted in bytes (for BalancerDecision use-case)
+   * 
*/ - public static final class IsSnapshotCleanupEnabledRequest extends + public static final class BalancerDecisionsRequest extends com.google.protobuf.GeneratedMessage - implements IsSnapshotCleanupEnabledRequestOrBuilder { - // Use IsSnapshotCleanupEnabledRequest.newBuilder() to construct. - private IsSnapshotCleanupEnabledRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + implements BalancerDecisionsRequestOrBuilder { + // Use BalancerDecisionsRequest.newBuilder() to construct. + private BalancerDecisionsRequest(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } - private IsSnapshotCleanupEnabledRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + private BalancerDecisionsRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - private static final IsSnapshotCleanupEnabledRequest defaultInstance; - public static IsSnapshotCleanupEnabledRequest getDefaultInstance() { + private static final BalancerDecisionsRequest defaultInstance; + public static BalancerDecisionsRequest getDefaultInstance() { return defaultInstance; } - public IsSnapshotCleanupEnabledRequest getDefaultInstanceForType() { + public BalancerDecisionsRequest getDefaultInstanceForType() { return defaultInstance; } @@ -65070,11 +64747,12 @@ public IsSnapshotCleanupEnabledRequest getDefaultInstanceForType() { getUnknownFields() { return this.unknownFields; } - private IsSnapshotCleanupEnabledRequest( + private BalancerDecisionsRequest( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); + int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { @@ -65092,6 +64770,11 @@ private IsSnapshotCleanupEnabledRequest( } break; } + case 8: { + bitField0_ |= 0x00000001; + limit_ = input.readUInt32(); + break; + } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { @@ -65106,32 +64789,50 @@ private IsSnapshotCleanupEnabledRequest( } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsSnapshotCleanupEnabledRequest_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BalancerDecisionsRequest_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsSnapshotCleanupEnabledRequest_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BalancerDecisionsRequest_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsRequest.Builder.class); } - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public IsSnapshotCleanupEnabledRequest parsePartialFrom( + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public BalancerDecisionsRequest parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - return new IsSnapshotCleanupEnabledRequest(input, extensionRegistry); + return new BalancerDecisionsRequest(input, extensionRegistry); } }; @java.lang.Override - public com.google.protobuf.Parser getParserForType() { + public com.google.protobuf.Parser getParserForType() { return PARSER; } + private int bitField0_; + // optional uint32 limit = 1; + public static final int LIMIT_FIELD_NUMBER = 1; + private int limit_; + /** + * optional uint32 limit = 1; + */ + public boolean hasLimit() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional uint32 limit = 1; + */ + public int getLimit() { + return limit_; + } + private void initFields() { + limit_ = 0; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { @@ -65145,6 +64846,9 @@ public final boolean isInitialized() { public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeUInt32(1, limit_); + } getUnknownFields().writeTo(output); } @@ -65154,6 +64858,10 @@ public int getSerializedSize() { if (size != -1) return size; size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt32Size(1, limit_); + } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; @@ -65171,12 +64879,17 @@ public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest)) { + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsRequest)) { return super.equals(obj); } - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest) obj; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsRequest) obj; boolean result = true; + result = result && (hasLimit() == other.hasLimit()); + if (hasLimit()) { + result = result && (getLimit() + == other.getLimit()); + } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -65190,58 +64903,62 @@ public int hashCode() { } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasLimit()) { + hash = (37 * hash) + LIMIT_FIELD_NUMBER; + hash = (53 * hash) + getLimit(); + } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsRequest parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsRequest parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest parseFrom(byte[] data) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsRequest parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsRequest parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest parseFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsRequest parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsRequest parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest parseDelimitedFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsRequest parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest parseDelimitedFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsRequest parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsRequest parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsRequest parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -65250,7 +64967,7 @@ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshot public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest prototype) { + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsRequest prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @@ -65262,24 +64979,33 @@ protected Builder newBuilderForType( return builder; } /** - * Protobuf type {@code hbase.pb.IsSnapshotCleanupEnabledRequest} + * Protobuf type {@code hbase.pb.BalancerDecisionsRequest} + * + *
+     **
+     * BalancerDecision (LogRequest) use-case specific RPC request. This request payload will be
+     * converted in bytes and sent to generic RPC API: GetLogEntries
+     * LogRequest message has two params:
+     * 1. log_class_name: BalancerDecisionsRequest (for BalancerDecision use-case)
+     * 2. log_message: BalancerDecisionsRequest converted in bytes (for BalancerDecision use-case)
+     * 
*/ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequestOrBuilder { + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsRequestOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsSnapshotCleanupEnabledRequest_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BalancerDecisionsRequest_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsSnapshotCleanupEnabledRequest_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BalancerDecisionsRequest_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsRequest.Builder.class); } - // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest.newBuilder() + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsRequest.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -65299,6 +65025,8 @@ private static Builder create() { public Builder clear() { super.clear(); + limit_ = 0; + bitField0_ = (bitField0_ & ~0x00000001); return this; } @@ -65308,38 +65036,48 @@ public Builder clone() { public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsSnapshotCleanupEnabledRequest_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BalancerDecisionsRequest_descriptor; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest.getDefaultInstance(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsRequest.getDefaultInstance(); } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest build() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest result = buildPartial(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsRequest build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsRequest result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest(this); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsRequest(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.limit_ = limit_; + result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest)other); + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsRequest)other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest.getDefaultInstance()) return this; + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsRequest.getDefaultInstance()) return this; + if (other.hasLimit()) { + setLimit(other.getLimit()); + } this.mergeUnknownFields(other.getUnknownFields()); return this; } @@ -65352,11 +65090,11 @@ public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest parsedMessage = null; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsRequest parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest) e.getUnfinishedMessage(); + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsRequest) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { @@ -65365,50 +65103,108 @@ public Builder mergeFrom( } return this; } + private int bitField0_; + + // optional uint32 limit = 1; + private int limit_ ; + /** + * optional uint32 limit = 1; + */ + public boolean hasLimit() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional uint32 limit = 1; + */ + public int getLimit() { + return limit_; + } + /** + * optional uint32 limit = 1; + */ + public Builder setLimit(int value) { + bitField0_ |= 0x00000001; + limit_ = value; + onChanged(); + return this; + } + /** + * optional uint32 limit = 1; + */ + public Builder clearLimit() { + bitField0_ = (bitField0_ & ~0x00000001); + limit_ = 0; + onChanged(); + return this; + } - // @@protoc_insertion_point(builder_scope:hbase.pb.IsSnapshotCleanupEnabledRequest) + // @@protoc_insertion_point(builder_scope:hbase.pb.BalancerDecisionsRequest) } static { - defaultInstance = new IsSnapshotCleanupEnabledRequest(true); + defaultInstance = new BalancerDecisionsRequest(true); defaultInstance.initFields(); } - // @@protoc_insertion_point(class_scope:hbase.pb.IsSnapshotCleanupEnabledRequest) + // @@protoc_insertion_point(class_scope:hbase.pb.BalancerDecisionsRequest) } - public interface IsSnapshotCleanupEnabledResponseOrBuilder + public interface BalancerDecisionsResponseOrBuilder extends com.google.protobuf.MessageOrBuilder { - // required bool enabled = 1; + // repeated .hbase.pb.BalancerDecision balancer_decision = 1; /** - * required bool enabled = 1; + * repeated .hbase.pb.BalancerDecision balancer_decision = 1; */ - boolean hasEnabled(); + java.util.List + getBalancerDecisionList(); /** - * required bool enabled = 1; + * repeated .hbase.pb.BalancerDecision balancer_decision = 1; */ - boolean getEnabled(); + org.apache.hadoop.hbase.protobuf.generated.RecentLogs.BalancerDecision getBalancerDecision(int index); + /** + * repeated .hbase.pb.BalancerDecision balancer_decision = 1; + */ + int getBalancerDecisionCount(); + /** + * repeated .hbase.pb.BalancerDecision balancer_decision = 1; + */ + java.util.List + getBalancerDecisionOrBuilderList(); + /** + * repeated .hbase.pb.BalancerDecision balancer_decision = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.RecentLogs.BalancerDecisionOrBuilder getBalancerDecisionOrBuilder( + int index); } /** - * Protobuf type {@code hbase.pb.IsSnapshotCleanupEnabledResponse} + * Protobuf type {@code hbase.pb.BalancerDecisionsResponse} + * + *
+   **
+   * BalancerDecision (LogEntry) use-case specific RPC response. This response payload will be
+   * converted in bytes by servers and sent as response to generic RPC API: GetLogEntries
+   * LogEntry message has two params:
+   * 1. log_class_name: BalancerDecisionsResponse (for BalancerDecision use-case)
+   * 2. log_message: BalancerDecisionsResponse converted in bytes (for BalancerDecision use-case)
+   * 
*/ - public static final class IsSnapshotCleanupEnabledResponse extends + public static final class BalancerDecisionsResponse extends com.google.protobuf.GeneratedMessage - implements IsSnapshotCleanupEnabledResponseOrBuilder { - // Use IsSnapshotCleanupEnabledResponse.newBuilder() to construct. - private IsSnapshotCleanupEnabledResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + implements BalancerDecisionsResponseOrBuilder { + // Use BalancerDecisionsResponse.newBuilder() to construct. + private BalancerDecisionsResponse(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } - private IsSnapshotCleanupEnabledResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + private BalancerDecisionsResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - private static final IsSnapshotCleanupEnabledResponse defaultInstance; - public static IsSnapshotCleanupEnabledResponse getDefaultInstance() { + private static final BalancerDecisionsResponse defaultInstance; + public static BalancerDecisionsResponse getDefaultInstance() { return defaultInstance; } - public IsSnapshotCleanupEnabledResponse getDefaultInstanceForType() { + public BalancerDecisionsResponse getDefaultInstanceForType() { return defaultInstance; } @@ -65418,7 +65214,7 @@ public IsSnapshotCleanupEnabledResponse getDefaultInstanceForType() { getUnknownFields() { return this.unknownFields; } - private IsSnapshotCleanupEnabledResponse( + private BalancerDecisionsResponse( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { @@ -65441,9 +65237,12 @@ private IsSnapshotCleanupEnabledResponse( } break; } - case 8: { - bitField0_ |= 0x00000001; - enabled_ = input.readBool(); + case 10: { + if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + balancerDecision_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000001; + } + balancerDecision_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.RecentLogs.BalancerDecision.PARSER, extensionRegistry)); break; } } @@ -65454,65 +65253,89 @@ private IsSnapshotCleanupEnabledResponse( throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { + if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + balancerDecision_ = java.util.Collections.unmodifiableList(balancerDecision_); + } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsSnapshotCleanupEnabledResponse_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BalancerDecisionsResponse_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsSnapshotCleanupEnabledResponse_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BalancerDecisionsResponse_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsResponse.Builder.class); } - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public IsSnapshotCleanupEnabledResponse parsePartialFrom( + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public BalancerDecisionsResponse parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - return new IsSnapshotCleanupEnabledResponse(input, extensionRegistry); + return new BalancerDecisionsResponse(input, extensionRegistry); } }; @java.lang.Override - public com.google.protobuf.Parser getParserForType() { + public com.google.protobuf.Parser getParserForType() { return PARSER; } - private int bitField0_; - // required bool enabled = 1; - public static final int ENABLED_FIELD_NUMBER = 1; - private boolean enabled_; + // repeated .hbase.pb.BalancerDecision balancer_decision = 1; + public static final int BALANCER_DECISION_FIELD_NUMBER = 1; + private java.util.List balancerDecision_; /** - * required bool enabled = 1; + * repeated .hbase.pb.BalancerDecision balancer_decision = 1; */ - public boolean hasEnabled() { - return ((bitField0_ & 0x00000001) == 0x00000001); + public java.util.List getBalancerDecisionList() { + return balancerDecision_; } /** - * required bool enabled = 1; + * repeated .hbase.pb.BalancerDecision balancer_decision = 1; */ - public boolean getEnabled() { - return enabled_; + public java.util.List + getBalancerDecisionOrBuilderList() { + return balancerDecision_; + } + /** + * repeated .hbase.pb.BalancerDecision balancer_decision = 1; + */ + public int getBalancerDecisionCount() { + return balancerDecision_.size(); + } + /** + * repeated .hbase.pb.BalancerDecision balancer_decision = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RecentLogs.BalancerDecision getBalancerDecision(int index) { + return balancerDecision_.get(index); + } + /** + * repeated .hbase.pb.BalancerDecision balancer_decision = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RecentLogs.BalancerDecisionOrBuilder getBalancerDecisionOrBuilder( + int index) { + return balancerDecision_.get(index); } private void initFields() { - enabled_ = false; + balancerDecision_ = java.util.Collections.emptyList(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; - if (!hasEnabled()) { - memoizedIsInitialized = 0; - return false; + for (int i = 0; i < getBalancerDecisionCount(); i++) { + if (!getBalancerDecision(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } } memoizedIsInitialized = 1; return true; @@ -65521,8 +65344,8 @@ public final boolean isInitialized() { public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBool(1, enabled_); + for (int i = 0; i < balancerDecision_.size(); i++) { + output.writeMessage(1, balancerDecision_.get(i)); } getUnknownFields().writeTo(output); } @@ -65533,9 +65356,9 @@ public int getSerializedSize() { if (size != -1) return size; size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { + for (int i = 0; i < balancerDecision_.size(); i++) { size += com.google.protobuf.CodedOutputStream - .computeBoolSize(1, enabled_); + .computeMessageSize(1, balancerDecision_.get(i)); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; @@ -65554,17 +65377,14 @@ public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse)) { + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsResponse)) { return super.equals(obj); } - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse) obj; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsResponse) obj; boolean result = true; - result = result && (hasEnabled() == other.hasEnabled()); - if (hasEnabled()) { - result = result && (getEnabled() - == other.getEnabled()); - } + result = result && getBalancerDecisionList() + .equals(other.getBalancerDecisionList()); result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -65578,62 +65398,62 @@ public int hashCode() { } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasEnabled()) { - hash = (37 * hash) + ENABLED_FIELD_NUMBER; - hash = (53 * hash) + hashBoolean(getEnabled()); + if (getBalancerDecisionCount() > 0) { + hash = (37 * hash) + BALANCER_DECISION_FIELD_NUMBER; + hash = (53 * hash) + getBalancerDecisionList().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsResponse parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsResponse parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse parseFrom(byte[] data) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsResponse parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsResponse parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse parseFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsResponse parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsResponse parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse parseDelimitedFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsResponse parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse parseDelimitedFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsResponse parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsResponse parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsResponse parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -65642,7 +65462,7 @@ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshot public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse prototype) { + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsResponse prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @@ -65654,24 +65474,33 @@ protected Builder newBuilderForType( return builder; } /** - * Protobuf type {@code hbase.pb.IsSnapshotCleanupEnabledResponse} + * Protobuf type {@code hbase.pb.BalancerDecisionsResponse} + * + *
+     **
+     * BalancerDecision (LogEntry) use-case specific RPC response. This response payload will be
+     * converted in bytes by servers and sent as response to generic RPC API: GetLogEntries
+     * LogEntry message has two params:
+     * 1. log_class_name: BalancerDecisionsResponse (for BalancerDecision use-case)
+     * 2. log_message: BalancerDecisionsResponse converted in bytes (for BalancerDecision use-case)
+     * 
*/ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponseOrBuilder { + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsResponseOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsSnapshotCleanupEnabledResponse_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BalancerDecisionsResponse_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsSnapshotCleanupEnabledResponse_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BalancerDecisionsResponse_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsResponse.Builder.class); } - // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse.newBuilder() + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsResponse.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -65683,6 +65512,7 @@ private Builder( } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getBalancerDecisionFieldBuilder(); } } private static Builder create() { @@ -65691,8 +65521,12 @@ private static Builder create() { public Builder clear() { super.clear(); - enabled_ = false; - bitField0_ = (bitField0_ & ~0x00000001); + if (balancerDecisionBuilder_ == null) { + balancerDecision_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + } else { + balancerDecisionBuilder_.clear(); + } return this; } @@ -65702,56 +65536,84 @@ public Builder clone() { public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsSnapshotCleanupEnabledResponse_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BalancerDecisionsResponse_descriptor; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse.getDefaultInstance(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsResponse.getDefaultInstance(); } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse build() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse result = buildPartial(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsResponse build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsResponse result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse(this); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsResponse(this); int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; + if (balancerDecisionBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001)) { + balancerDecision_ = java.util.Collections.unmodifiableList(balancerDecision_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.balancerDecision_ = balancerDecision_; + } else { + result.balancerDecision_ = balancerDecisionBuilder_.build(); } - result.enabled_ = enabled_; - result.bitField0_ = to_bitField0_; onBuilt(); return result; } - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse)other); + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsResponse)other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse.getDefaultInstance()) return this; - if (other.hasEnabled()) { - setEnabled(other.getEnabled()); + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsResponse.getDefaultInstance()) return this; + if (balancerDecisionBuilder_ == null) { + if (!other.balancerDecision_.isEmpty()) { + if (balancerDecision_.isEmpty()) { + balancerDecision_ = other.balancerDecision_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureBalancerDecisionIsMutable(); + balancerDecision_.addAll(other.balancerDecision_); + } + onChanged(); + } + } else { + if (!other.balancerDecision_.isEmpty()) { + if (balancerDecisionBuilder_.isEmpty()) { + balancerDecisionBuilder_.dispose(); + balancerDecisionBuilder_ = null; + balancerDecision_ = other.balancerDecision_; + bitField0_ = (bitField0_ & ~0x00000001); + balancerDecisionBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getBalancerDecisionFieldBuilder() : null; + } else { + balancerDecisionBuilder_.addAllMessages(other.balancerDecision_); + } + } } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { - if (!hasEnabled()) { - - return false; + for (int i = 0; i < getBalancerDecisionCount(); i++) { + if (!getBalancerDecision(i).isInitialized()) { + + return false; + } } return true; } @@ -65760,11 +65622,11 @@ public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse parsedMessage = null; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsResponse parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse) e.getUnfinishedMessage(); + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsResponse) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { @@ -65775,91 +65637,283 @@ public Builder mergeFrom( } private int bitField0_; - // required bool enabled = 1; - private boolean enabled_ ; + // repeated .hbase.pb.BalancerDecision balancer_decision = 1; + private java.util.List balancerDecision_ = + java.util.Collections.emptyList(); + private void ensureBalancerDecisionIsMutable() { + if (!((bitField0_ & 0x00000001) == 0x00000001)) { + balancerDecision_ = new java.util.ArrayList(balancerDecision_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.RecentLogs.BalancerDecision, org.apache.hadoop.hbase.protobuf.generated.RecentLogs.BalancerDecision.Builder, org.apache.hadoop.hbase.protobuf.generated.RecentLogs.BalancerDecisionOrBuilder> balancerDecisionBuilder_; + /** - * required bool enabled = 1; + * repeated .hbase.pb.BalancerDecision balancer_decision = 1; */ - public boolean hasEnabled() { - return ((bitField0_ & 0x00000001) == 0x00000001); + public java.util.List getBalancerDecisionList() { + if (balancerDecisionBuilder_ == null) { + return java.util.Collections.unmodifiableList(balancerDecision_); + } else { + return balancerDecisionBuilder_.getMessageList(); + } } /** - * required bool enabled = 1; + * repeated .hbase.pb.BalancerDecision balancer_decision = 1; */ - public boolean getEnabled() { - return enabled_; + public int getBalancerDecisionCount() { + if (balancerDecisionBuilder_ == null) { + return balancerDecision_.size(); + } else { + return balancerDecisionBuilder_.getCount(); + } + } + /** + * repeated .hbase.pb.BalancerDecision balancer_decision = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RecentLogs.BalancerDecision getBalancerDecision(int index) { + if (balancerDecisionBuilder_ == null) { + return balancerDecision_.get(index); + } else { + return balancerDecisionBuilder_.getMessage(index); + } + } + /** + * repeated .hbase.pb.BalancerDecision balancer_decision = 1; + */ + public Builder setBalancerDecision( + int index, org.apache.hadoop.hbase.protobuf.generated.RecentLogs.BalancerDecision value) { + if (balancerDecisionBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureBalancerDecisionIsMutable(); + balancerDecision_.set(index, value); + onChanged(); + } else { + balancerDecisionBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.BalancerDecision balancer_decision = 1; + */ + public Builder setBalancerDecision( + int index, org.apache.hadoop.hbase.protobuf.generated.RecentLogs.BalancerDecision.Builder builderForValue) { + if (balancerDecisionBuilder_ == null) { + ensureBalancerDecisionIsMutable(); + balancerDecision_.set(index, builderForValue.build()); + onChanged(); + } else { + balancerDecisionBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.BalancerDecision balancer_decision = 1; + */ + public Builder addBalancerDecision(org.apache.hadoop.hbase.protobuf.generated.RecentLogs.BalancerDecision value) { + if (balancerDecisionBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureBalancerDecisionIsMutable(); + balancerDecision_.add(value); + onChanged(); + } else { + balancerDecisionBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .hbase.pb.BalancerDecision balancer_decision = 1; + */ + public Builder addBalancerDecision( + int index, org.apache.hadoop.hbase.protobuf.generated.RecentLogs.BalancerDecision value) { + if (balancerDecisionBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureBalancerDecisionIsMutable(); + balancerDecision_.add(index, value); + onChanged(); + } else { + balancerDecisionBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.BalancerDecision balancer_decision = 1; + */ + public Builder addBalancerDecision( + org.apache.hadoop.hbase.protobuf.generated.RecentLogs.BalancerDecision.Builder builderForValue) { + if (balancerDecisionBuilder_ == null) { + ensureBalancerDecisionIsMutable(); + balancerDecision_.add(builderForValue.build()); + onChanged(); + } else { + balancerDecisionBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.BalancerDecision balancer_decision = 1; + */ + public Builder addBalancerDecision( + int index, org.apache.hadoop.hbase.protobuf.generated.RecentLogs.BalancerDecision.Builder builderForValue) { + if (balancerDecisionBuilder_ == null) { + ensureBalancerDecisionIsMutable(); + balancerDecision_.add(index, builderForValue.build()); + onChanged(); + } else { + balancerDecisionBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.BalancerDecision balancer_decision = 1; + */ + public Builder addAllBalancerDecision( + java.lang.Iterable values) { + if (balancerDecisionBuilder_ == null) { + ensureBalancerDecisionIsMutable(); + super.addAll(values, balancerDecision_); + onChanged(); + } else { + balancerDecisionBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .hbase.pb.BalancerDecision balancer_decision = 1; + */ + public Builder clearBalancerDecision() { + if (balancerDecisionBuilder_ == null) { + balancerDecision_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + balancerDecisionBuilder_.clear(); + } + return this; + } + /** + * repeated .hbase.pb.BalancerDecision balancer_decision = 1; + */ + public Builder removeBalancerDecision(int index) { + if (balancerDecisionBuilder_ == null) { + ensureBalancerDecisionIsMutable(); + balancerDecision_.remove(index); + onChanged(); + } else { + balancerDecisionBuilder_.remove(index); + } + return this; + } + /** + * repeated .hbase.pb.BalancerDecision balancer_decision = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RecentLogs.BalancerDecision.Builder getBalancerDecisionBuilder( + int index) { + return getBalancerDecisionFieldBuilder().getBuilder(index); + } + /** + * repeated .hbase.pb.BalancerDecision balancer_decision = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RecentLogs.BalancerDecisionOrBuilder getBalancerDecisionOrBuilder( + int index) { + if (balancerDecisionBuilder_ == null) { + return balancerDecision_.get(index); } else { + return balancerDecisionBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .hbase.pb.BalancerDecision balancer_decision = 1; + */ + public java.util.List + getBalancerDecisionOrBuilderList() { + if (balancerDecisionBuilder_ != null) { + return balancerDecisionBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(balancerDecision_); + } + } + /** + * repeated .hbase.pb.BalancerDecision balancer_decision = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RecentLogs.BalancerDecision.Builder addBalancerDecisionBuilder() { + return getBalancerDecisionFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.RecentLogs.BalancerDecision.getDefaultInstance()); } /** - * required bool enabled = 1; + * repeated .hbase.pb.BalancerDecision balancer_decision = 1; */ - public Builder setEnabled(boolean value) { - bitField0_ |= 0x00000001; - enabled_ = value; - onChanged(); - return this; + public org.apache.hadoop.hbase.protobuf.generated.RecentLogs.BalancerDecision.Builder addBalancerDecisionBuilder( + int index) { + return getBalancerDecisionFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.RecentLogs.BalancerDecision.getDefaultInstance()); } /** - * required bool enabled = 1; + * repeated .hbase.pb.BalancerDecision balancer_decision = 1; */ - public Builder clearEnabled() { - bitField0_ = (bitField0_ & ~0x00000001); - enabled_ = false; - onChanged(); - return this; + public java.util.List + getBalancerDecisionBuilderList() { + return getBalancerDecisionFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.RecentLogs.BalancerDecision, org.apache.hadoop.hbase.protobuf.generated.RecentLogs.BalancerDecision.Builder, org.apache.hadoop.hbase.protobuf.generated.RecentLogs.BalancerDecisionOrBuilder> + getBalancerDecisionFieldBuilder() { + if (balancerDecisionBuilder_ == null) { + balancerDecisionBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.RecentLogs.BalancerDecision, org.apache.hadoop.hbase.protobuf.generated.RecentLogs.BalancerDecision.Builder, org.apache.hadoop.hbase.protobuf.generated.RecentLogs.BalancerDecisionOrBuilder>( + balancerDecision_, + ((bitField0_ & 0x00000001) == 0x00000001), + getParentForChildren(), + isClean()); + balancerDecision_ = null; + } + return balancerDecisionBuilder_; } - // @@protoc_insertion_point(builder_scope:hbase.pb.IsSnapshotCleanupEnabledResponse) + // @@protoc_insertion_point(builder_scope:hbase.pb.BalancerDecisionsResponse) } static { - defaultInstance = new IsSnapshotCleanupEnabledResponse(true); + defaultInstance = new BalancerDecisionsResponse(true); defaultInstance.initFields(); } - // @@protoc_insertion_point(class_scope:hbase.pb.IsSnapshotCleanupEnabledResponse) + // @@protoc_insertion_point(class_scope:hbase.pb.BalancerDecisionsResponse) } - public interface BalancerDecisionsRequestOrBuilder + public interface GetClusterIdRequestOrBuilder extends com.google.protobuf.MessageOrBuilder { - - // optional uint32 limit = 1; - /** - * optional uint32 limit = 1; - */ - boolean hasLimit(); - /** - * optional uint32 limit = 1; - */ - int getLimit(); } /** - * Protobuf type {@code hbase.pb.BalancerDecisionsRequest} + * Protobuf type {@code hbase.pb.GetClusterIdRequest} * *
-   **
-   * BalancerDecision (LogRequest) use-case specific RPC request. This request payload will be
-   * converted in bytes and sent to generic RPC API: GetLogEntries
-   * LogRequest message has two params:
-   * 1. log_class_name: BalancerDecisionsRequest (for BalancerDecision use-case)
-   * 2. log_message: BalancerDecisionsRequest converted in bytes (for BalancerDecision use-case)
+   ** Request and response to get the clusterID for this cluster 
    * 
*/ - public static final class BalancerDecisionsRequest extends + public static final class GetClusterIdRequest extends com.google.protobuf.GeneratedMessage - implements BalancerDecisionsRequestOrBuilder { - // Use BalancerDecisionsRequest.newBuilder() to construct. - private BalancerDecisionsRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + implements GetClusterIdRequestOrBuilder { + // Use GetClusterIdRequest.newBuilder() to construct. + private GetClusterIdRequest(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } - private BalancerDecisionsRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + private GetClusterIdRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - private static final BalancerDecisionsRequest defaultInstance; - public static BalancerDecisionsRequest getDefaultInstance() { + private static final GetClusterIdRequest defaultInstance; + public static GetClusterIdRequest getDefaultInstance() { return defaultInstance; } - public BalancerDecisionsRequest getDefaultInstanceForType() { + public GetClusterIdRequest getDefaultInstanceForType() { return defaultInstance; } @@ -65869,12 +65923,11 @@ public BalancerDecisionsRequest getDefaultInstanceForType() { getUnknownFields() { return this.unknownFields; } - private BalancerDecisionsRequest( + private GetClusterIdRequest( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); - int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { @@ -65892,11 +65945,6 @@ private BalancerDecisionsRequest( } break; } - case 8: { - bitField0_ |= 0x00000001; - limit_ = input.readUInt32(); - break; - } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { @@ -65911,50 +65959,32 @@ private BalancerDecisionsRequest( } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BalancerDecisionsRequest_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetClusterIdRequest_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BalancerDecisionsRequest_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetClusterIdRequest_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsRequest.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest.Builder.class); } - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public BalancerDecisionsRequest parsePartialFrom( + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public GetClusterIdRequest parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - return new BalancerDecisionsRequest(input, extensionRegistry); + return new GetClusterIdRequest(input, extensionRegistry); } }; @java.lang.Override - public com.google.protobuf.Parser getParserForType() { + public com.google.protobuf.Parser getParserForType() { return PARSER; } - private int bitField0_; - // optional uint32 limit = 1; - public static final int LIMIT_FIELD_NUMBER = 1; - private int limit_; - /** - * optional uint32 limit = 1; - */ - public boolean hasLimit() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * optional uint32 limit = 1; - */ - public int getLimit() { - return limit_; - } - private void initFields() { - limit_ = 0; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { @@ -65968,9 +65998,6 @@ public final boolean isInitialized() { public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeUInt32(1, limit_); - } getUnknownFields().writeTo(output); } @@ -65980,10 +66007,6 @@ public int getSerializedSize() { if (size != -1) return size; size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt32Size(1, limit_); - } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; @@ -66001,17 +66024,12 @@ public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsRequest)) { + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest)) { return super.equals(obj); } - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsRequest) obj; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest) obj; boolean result = true; - result = result && (hasLimit() == other.hasLimit()); - if (hasLimit()) { - result = result && (getLimit() - == other.getLimit()); - } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -66025,62 +66043,58 @@ public int hashCode() { } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasLimit()) { - hash = (37 * hash) + LIMIT_FIELD_NUMBER; - hash = (53 * hash) + getLimit(); - } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsRequest parseFrom(byte[] data) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsRequest parseFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsRequest parseDelimitedFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsRequest parseDelimitedFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -66089,7 +66103,7 @@ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDe public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsRequest prototype) { + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @@ -66101,33 +66115,28 @@ protected Builder newBuilderForType( return builder; } /** - * Protobuf type {@code hbase.pb.BalancerDecisionsRequest} + * Protobuf type {@code hbase.pb.GetClusterIdRequest} * *
-     **
-     * BalancerDecision (LogRequest) use-case specific RPC request. This request payload will be
-     * converted in bytes and sent to generic RPC API: GetLogEntries
-     * LogRequest message has two params:
-     * 1. log_class_name: BalancerDecisionsRequest (for BalancerDecision use-case)
-     * 2. log_message: BalancerDecisionsRequest converted in bytes (for BalancerDecision use-case)
+     ** Request and response to get the clusterID for this cluster 
      * 
*/ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsRequestOrBuilder { + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequestOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BalancerDecisionsRequest_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetClusterIdRequest_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BalancerDecisionsRequest_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetClusterIdRequest_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsRequest.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest.Builder.class); } - // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsRequest.newBuilder() + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -66147,8 +66156,6 @@ private static Builder create() { public Builder clear() { super.clear(); - limit_ = 0; - bitField0_ = (bitField0_ & ~0x00000001); return this; } @@ -66158,48 +66165,38 @@ public Builder clone() { public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BalancerDecisionsRequest_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetClusterIdRequest_descriptor; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsRequest getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsRequest.getDefaultInstance(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest.getDefaultInstance(); } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsRequest build() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsRequest result = buildPartial(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsRequest buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsRequest(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.limit_ = limit_; - result.bitField0_ = to_bitField0_; + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest(this); onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsRequest) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsRequest)other); + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest)other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsRequest other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsRequest.getDefaultInstance()) return this; - if (other.hasLimit()) { - setLimit(other.getLimit()); - } + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest.getDefaultInstance()) return this; this.mergeUnknownFields(other.getUnknownFields()); return this; } @@ -66212,11 +66209,11 @@ public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsRequest parsedMessage = null; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsRequest) e.getUnfinishedMessage(); + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { @@ -66225,108 +66222,67 @@ public Builder mergeFrom( } return this; } - private int bitField0_; - - // optional uint32 limit = 1; - private int limit_ ; - /** - * optional uint32 limit = 1; - */ - public boolean hasLimit() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * optional uint32 limit = 1; - */ - public int getLimit() { - return limit_; - } - /** - * optional uint32 limit = 1; - */ - public Builder setLimit(int value) { - bitField0_ |= 0x00000001; - limit_ = value; - onChanged(); - return this; - } - /** - * optional uint32 limit = 1; - */ - public Builder clearLimit() { - bitField0_ = (bitField0_ & ~0x00000001); - limit_ = 0; - onChanged(); - return this; - } - // @@protoc_insertion_point(builder_scope:hbase.pb.BalancerDecisionsRequest) + // @@protoc_insertion_point(builder_scope:hbase.pb.GetClusterIdRequest) } static { - defaultInstance = new BalancerDecisionsRequest(true); + defaultInstance = new GetClusterIdRequest(true); defaultInstance.initFields(); } - // @@protoc_insertion_point(class_scope:hbase.pb.BalancerDecisionsRequest) + // @@protoc_insertion_point(class_scope:hbase.pb.GetClusterIdRequest) } - public interface BalancerDecisionsResponseOrBuilder + public interface GetClusterIdResponseOrBuilder extends com.google.protobuf.MessageOrBuilder { - // repeated .hbase.pb.BalancerDecision balancer_decision = 1; - /** - * repeated .hbase.pb.BalancerDecision balancer_decision = 1; - */ - java.util.List - getBalancerDecisionList(); - /** - * repeated .hbase.pb.BalancerDecision balancer_decision = 1; - */ - org.apache.hadoop.hbase.protobuf.generated.RecentLogs.BalancerDecision getBalancerDecision(int index); + // optional string cluster_id = 1; /** - * repeated .hbase.pb.BalancerDecision balancer_decision = 1; + * optional string cluster_id = 1; + * + *
+     ** Not set if cluster ID could not be determined. 
+     * 
*/ - int getBalancerDecisionCount(); + boolean hasClusterId(); /** - * repeated .hbase.pb.BalancerDecision balancer_decision = 1; + * optional string cluster_id = 1; + * + *
+     ** Not set if cluster ID could not be determined. 
+     * 
*/ - java.util.List - getBalancerDecisionOrBuilderList(); + java.lang.String getClusterId(); /** - * repeated .hbase.pb.BalancerDecision balancer_decision = 1; + * optional string cluster_id = 1; + * + *
+     ** Not set if cluster ID could not be determined. 
+     * 
*/ - org.apache.hadoop.hbase.protobuf.generated.RecentLogs.BalancerDecisionOrBuilder getBalancerDecisionOrBuilder( - int index); + com.google.protobuf.ByteString + getClusterIdBytes(); } /** - * Protobuf type {@code hbase.pb.BalancerDecisionsResponse} - * - *
-   **
-   * BalancerDecision (LogEntry) use-case specific RPC response. This response payload will be
-   * converted in bytes by servers and sent as response to generic RPC API: GetLogEntries
-   * LogEntry message has two params:
-   * 1. log_class_name: BalancerDecisionsResponse (for BalancerDecision use-case)
-   * 2. log_message: BalancerDecisionsResponse converted in bytes (for BalancerDecision use-case)
-   * 
+ * Protobuf type {@code hbase.pb.GetClusterIdResponse} */ - public static final class BalancerDecisionsResponse extends + public static final class GetClusterIdResponse extends com.google.protobuf.GeneratedMessage - implements BalancerDecisionsResponseOrBuilder { - // Use BalancerDecisionsResponse.newBuilder() to construct. - private BalancerDecisionsResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + implements GetClusterIdResponseOrBuilder { + // Use GetClusterIdResponse.newBuilder() to construct. + private GetClusterIdResponse(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } - private BalancerDecisionsResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + private GetClusterIdResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - private static final BalancerDecisionsResponse defaultInstance; - public static BalancerDecisionsResponse getDefaultInstance() { + private static final GetClusterIdResponse defaultInstance; + public static GetClusterIdResponse getDefaultInstance() { return defaultInstance; } - public BalancerDecisionsResponse getDefaultInstanceForType() { + public GetClusterIdResponse getDefaultInstanceForType() { return defaultInstance; } @@ -66336,7 +66292,7 @@ public BalancerDecisionsResponse getDefaultInstanceForType() { getUnknownFields() { return this.unknownFields; } - private BalancerDecisionsResponse( + private GetClusterIdResponse( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { @@ -66360,11 +66316,8 @@ private BalancerDecisionsResponse( break; } case 10: { - if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { - balancerDecision_ = new java.util.ArrayList(); - mutable_bitField0_ |= 0x00000001; - } - balancerDecision_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.RecentLogs.BalancerDecision.PARSER, extensionRegistry)); + bitField0_ |= 0x00000001; + clusterId_ = input.readBytes(); break; } } @@ -66375,90 +66328,101 @@ private BalancerDecisionsResponse( throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { - if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { - balancerDecision_ = java.util.Collections.unmodifiableList(balancerDecision_); - } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BalancerDecisionsResponse_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetClusterIdResponse_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BalancerDecisionsResponse_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetClusterIdResponse_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsResponse.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse.Builder.class); } - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public BalancerDecisionsResponse parsePartialFrom( + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public GetClusterIdResponse parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - return new BalancerDecisionsResponse(input, extensionRegistry); + return new GetClusterIdResponse(input, extensionRegistry); } }; @java.lang.Override - public com.google.protobuf.Parser getParserForType() { + public com.google.protobuf.Parser getParserForType() { return PARSER; } - // repeated .hbase.pb.BalancerDecision balancer_decision = 1; - public static final int BALANCER_DECISION_FIELD_NUMBER = 1; - private java.util.List balancerDecision_; - /** - * repeated .hbase.pb.BalancerDecision balancer_decision = 1; - */ - public java.util.List getBalancerDecisionList() { - return balancerDecision_; - } - /** - * repeated .hbase.pb.BalancerDecision balancer_decision = 1; - */ - public java.util.List - getBalancerDecisionOrBuilderList() { - return balancerDecision_; - } + private int bitField0_; + // optional string cluster_id = 1; + public static final int CLUSTER_ID_FIELD_NUMBER = 1; + private java.lang.Object clusterId_; /** - * repeated .hbase.pb.BalancerDecision balancer_decision = 1; + * optional string cluster_id = 1; + * + *
+     ** Not set if cluster ID could not be determined. 
+     * 
*/ - public int getBalancerDecisionCount() { - return balancerDecision_.size(); + public boolean hasClusterId() { + return ((bitField0_ & 0x00000001) == 0x00000001); } /** - * repeated .hbase.pb.BalancerDecision balancer_decision = 1; + * optional string cluster_id = 1; + * + *
+     ** Not set if cluster ID could not be determined. 
+     * 
*/ - public org.apache.hadoop.hbase.protobuf.generated.RecentLogs.BalancerDecision getBalancerDecision(int index) { - return balancerDecision_.get(index); + public java.lang.String getClusterId() { + java.lang.Object ref = clusterId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + clusterId_ = s; + } + return s; + } } /** - * repeated .hbase.pb.BalancerDecision balancer_decision = 1; + * optional string cluster_id = 1; + * + *
+     ** Not set if cluster ID could not be determined. 
+     * 
*/ - public org.apache.hadoop.hbase.protobuf.generated.RecentLogs.BalancerDecisionOrBuilder getBalancerDecisionOrBuilder( - int index) { - return balancerDecision_.get(index); + public com.google.protobuf.ByteString + getClusterIdBytes() { + java.lang.Object ref = clusterId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + clusterId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } } private void initFields() { - balancerDecision_ = java.util.Collections.emptyList(); + clusterId_ = ""; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; - for (int i = 0; i < getBalancerDecisionCount(); i++) { - if (!getBalancerDecision(i).isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - } memoizedIsInitialized = 1; return true; } @@ -66466,8 +66430,8 @@ public final boolean isInitialized() { public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); - for (int i = 0; i < balancerDecision_.size(); i++) { - output.writeMessage(1, balancerDecision_.get(i)); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getClusterIdBytes()); } getUnknownFields().writeTo(output); } @@ -66478,9 +66442,9 @@ public int getSerializedSize() { if (size != -1) return size; size = 0; - for (int i = 0; i < balancerDecision_.size(); i++) { + if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, balancerDecision_.get(i)); + .computeBytesSize(1, getClusterIdBytes()); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; @@ -66499,14 +66463,17 @@ public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsResponse)) { + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse)) { return super.equals(obj); } - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsResponse) obj; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse) obj; boolean result = true; - result = result && getBalancerDecisionList() - .equals(other.getBalancerDecisionList()); + result = result && (hasClusterId() == other.hasClusterId()); + if (hasClusterId()) { + result = result && getClusterId() + .equals(other.getClusterId()); + } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -66520,62 +66487,62 @@ public int hashCode() { } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); - if (getBalancerDecisionCount() > 0) { - hash = (37 * hash) + BALANCER_DECISION_FIELD_NUMBER; - hash = (53 * hash) + getBalancerDecisionList().hashCode(); + if (hasClusterId()) { + hash = (37 * hash) + CLUSTER_ID_FIELD_NUMBER; + hash = (53 * hash) + getClusterId().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsResponse parseFrom(byte[] data) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsResponse parseFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsResponse parseDelimitedFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsResponse parseDelimitedFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -66584,7 +66551,7 @@ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDe public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsResponse prototype) { + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @@ -66596,33 +66563,24 @@ protected Builder newBuilderForType( return builder; } /** - * Protobuf type {@code hbase.pb.BalancerDecisionsResponse} - * - *
-     **
-     * BalancerDecision (LogEntry) use-case specific RPC response. This response payload will be
-     * converted in bytes by servers and sent as response to generic RPC API: GetLogEntries
-     * LogEntry message has two params:
-     * 1. log_class_name: BalancerDecisionsResponse (for BalancerDecision use-case)
-     * 2. log_message: BalancerDecisionsResponse converted in bytes (for BalancerDecision use-case)
-     * 
+ * Protobuf type {@code hbase.pb.GetClusterIdResponse} */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsResponseOrBuilder { + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponseOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BalancerDecisionsResponse_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetClusterIdResponse_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BalancerDecisionsResponse_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetClusterIdResponse_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsResponse.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse.Builder.class); } - // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsResponse.newBuilder() + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -66634,7 +66592,6 @@ private Builder( } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getBalancerDecisionFieldBuilder(); } } private static Builder create() { @@ -66642,13 +66599,9 @@ private static Builder create() { } public Builder clear() { - super.clear(); - if (balancerDecisionBuilder_ == null) { - balancerDecision_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000001); - } else { - balancerDecisionBuilder_.clear(); - } + super.clear(); + clusterId_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); return this; } @@ -66658,85 +66611,55 @@ public Builder clone() { public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BalancerDecisionsResponse_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetClusterIdResponse_descriptor; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsResponse getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsResponse.getDefaultInstance(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse.getDefaultInstance(); } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsResponse build() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsResponse result = buildPartial(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsResponse buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsResponse(this); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse(this); int from_bitField0_ = bitField0_; - if (balancerDecisionBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001)) { - balancerDecision_ = java.util.Collections.unmodifiableList(balancerDecision_); - bitField0_ = (bitField0_ & ~0x00000001); - } - result.balancerDecision_ = balancerDecision_; - } else { - result.balancerDecision_ = balancerDecisionBuilder_.build(); + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; } + result.clusterId_ = clusterId_; + result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsResponse) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsResponse)other); + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse)other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsResponse other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsResponse.getDefaultInstance()) return this; - if (balancerDecisionBuilder_ == null) { - if (!other.balancerDecision_.isEmpty()) { - if (balancerDecision_.isEmpty()) { - balancerDecision_ = other.balancerDecision_; - bitField0_ = (bitField0_ & ~0x00000001); - } else { - ensureBalancerDecisionIsMutable(); - balancerDecision_.addAll(other.balancerDecision_); - } - onChanged(); - } - } else { - if (!other.balancerDecision_.isEmpty()) { - if (balancerDecisionBuilder_.isEmpty()) { - balancerDecisionBuilder_.dispose(); - balancerDecisionBuilder_ = null; - balancerDecision_ = other.balancerDecision_; - bitField0_ = (bitField0_ & ~0x00000001); - balancerDecisionBuilder_ = - com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? - getBalancerDecisionFieldBuilder() : null; - } else { - balancerDecisionBuilder_.addAllMessages(other.balancerDecision_); - } - } + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse.getDefaultInstance()) return this; + if (other.hasClusterId()) { + bitField0_ |= 0x00000001; + clusterId_ = other.clusterId_; + onChanged(); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { - for (int i = 0; i < getBalancerDecisionCount(); i++) { - if (!getBalancerDecision(i).isInitialized()) { - - return false; - } - } return true; } @@ -66744,11 +66667,11 @@ public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsResponse parsedMessage = null; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalancerDecisionsResponse) e.getUnfinishedMessage(); + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { @@ -66759,283 +66682,141 @@ public Builder mergeFrom( } private int bitField0_; - // repeated .hbase.pb.BalancerDecision balancer_decision = 1; - private java.util.List balancerDecision_ = - java.util.Collections.emptyList(); - private void ensureBalancerDecisionIsMutable() { - if (!((bitField0_ & 0x00000001) == 0x00000001)) { - balancerDecision_ = new java.util.ArrayList(balancerDecision_); - bitField0_ |= 0x00000001; - } - } - - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.RecentLogs.BalancerDecision, org.apache.hadoop.hbase.protobuf.generated.RecentLogs.BalancerDecision.Builder, org.apache.hadoop.hbase.protobuf.generated.RecentLogs.BalancerDecisionOrBuilder> balancerDecisionBuilder_; - - /** - * repeated .hbase.pb.BalancerDecision balancer_decision = 1; - */ - public java.util.List getBalancerDecisionList() { - if (balancerDecisionBuilder_ == null) { - return java.util.Collections.unmodifiableList(balancerDecision_); - } else { - return balancerDecisionBuilder_.getMessageList(); - } - } - /** - * repeated .hbase.pb.BalancerDecision balancer_decision = 1; - */ - public int getBalancerDecisionCount() { - if (balancerDecisionBuilder_ == null) { - return balancerDecision_.size(); - } else { - return balancerDecisionBuilder_.getCount(); - } - } - /** - * repeated .hbase.pb.BalancerDecision balancer_decision = 1; - */ - public org.apache.hadoop.hbase.protobuf.generated.RecentLogs.BalancerDecision getBalancerDecision(int index) { - if (balancerDecisionBuilder_ == null) { - return balancerDecision_.get(index); - } else { - return balancerDecisionBuilder_.getMessage(index); - } - } - /** - * repeated .hbase.pb.BalancerDecision balancer_decision = 1; - */ - public Builder setBalancerDecision( - int index, org.apache.hadoop.hbase.protobuf.generated.RecentLogs.BalancerDecision value) { - if (balancerDecisionBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureBalancerDecisionIsMutable(); - balancerDecision_.set(index, value); - onChanged(); - } else { - balancerDecisionBuilder_.setMessage(index, value); - } - return this; - } - /** - * repeated .hbase.pb.BalancerDecision balancer_decision = 1; - */ - public Builder setBalancerDecision( - int index, org.apache.hadoop.hbase.protobuf.generated.RecentLogs.BalancerDecision.Builder builderForValue) { - if (balancerDecisionBuilder_ == null) { - ensureBalancerDecisionIsMutable(); - balancerDecision_.set(index, builderForValue.build()); - onChanged(); - } else { - balancerDecisionBuilder_.setMessage(index, builderForValue.build()); - } - return this; - } - /** - * repeated .hbase.pb.BalancerDecision balancer_decision = 1; - */ - public Builder addBalancerDecision(org.apache.hadoop.hbase.protobuf.generated.RecentLogs.BalancerDecision value) { - if (balancerDecisionBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureBalancerDecisionIsMutable(); - balancerDecision_.add(value); - onChanged(); - } else { - balancerDecisionBuilder_.addMessage(value); - } - return this; - } - /** - * repeated .hbase.pb.BalancerDecision balancer_decision = 1; - */ - public Builder addBalancerDecision( - int index, org.apache.hadoop.hbase.protobuf.generated.RecentLogs.BalancerDecision value) { - if (balancerDecisionBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureBalancerDecisionIsMutable(); - balancerDecision_.add(index, value); - onChanged(); - } else { - balancerDecisionBuilder_.addMessage(index, value); - } - return this; - } - /** - * repeated .hbase.pb.BalancerDecision balancer_decision = 1; - */ - public Builder addBalancerDecision( - org.apache.hadoop.hbase.protobuf.generated.RecentLogs.BalancerDecision.Builder builderForValue) { - if (balancerDecisionBuilder_ == null) { - ensureBalancerDecisionIsMutable(); - balancerDecision_.add(builderForValue.build()); - onChanged(); - } else { - balancerDecisionBuilder_.addMessage(builderForValue.build()); - } - return this; - } - /** - * repeated .hbase.pb.BalancerDecision balancer_decision = 1; - */ - public Builder addBalancerDecision( - int index, org.apache.hadoop.hbase.protobuf.generated.RecentLogs.BalancerDecision.Builder builderForValue) { - if (balancerDecisionBuilder_ == null) { - ensureBalancerDecisionIsMutable(); - balancerDecision_.add(index, builderForValue.build()); - onChanged(); - } else { - balancerDecisionBuilder_.addMessage(index, builderForValue.build()); - } - return this; - } - /** - * repeated .hbase.pb.BalancerDecision balancer_decision = 1; - */ - public Builder addAllBalancerDecision( - java.lang.Iterable values) { - if (balancerDecisionBuilder_ == null) { - ensureBalancerDecisionIsMutable(); - super.addAll(values, balancerDecision_); - onChanged(); - } else { - balancerDecisionBuilder_.addAllMessages(values); - } - return this; - } - /** - * repeated .hbase.pb.BalancerDecision balancer_decision = 1; - */ - public Builder clearBalancerDecision() { - if (balancerDecisionBuilder_ == null) { - balancerDecision_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000001); - onChanged(); - } else { - balancerDecisionBuilder_.clear(); - } - return this; - } - /** - * repeated .hbase.pb.BalancerDecision balancer_decision = 1; - */ - public Builder removeBalancerDecision(int index) { - if (balancerDecisionBuilder_ == null) { - ensureBalancerDecisionIsMutable(); - balancerDecision_.remove(index); - onChanged(); - } else { - balancerDecisionBuilder_.remove(index); - } - return this; - } + // optional string cluster_id = 1; + private java.lang.Object clusterId_ = ""; /** - * repeated .hbase.pb.BalancerDecision balancer_decision = 1; + * optional string cluster_id = 1; + * + *
+       ** Not set if cluster ID could not be determined. 
+       * 
*/ - public org.apache.hadoop.hbase.protobuf.generated.RecentLogs.BalancerDecision.Builder getBalancerDecisionBuilder( - int index) { - return getBalancerDecisionFieldBuilder().getBuilder(index); + public boolean hasClusterId() { + return ((bitField0_ & 0x00000001) == 0x00000001); } /** - * repeated .hbase.pb.BalancerDecision balancer_decision = 1; + * optional string cluster_id = 1; + * + *
+       ** Not set if cluster ID could not be determined. 
+       * 
*/ - public org.apache.hadoop.hbase.protobuf.generated.RecentLogs.BalancerDecisionOrBuilder getBalancerDecisionOrBuilder( - int index) { - if (balancerDecisionBuilder_ == null) { - return balancerDecision_.get(index); } else { - return balancerDecisionBuilder_.getMessageOrBuilder(index); + public java.lang.String getClusterId() { + java.lang.Object ref = clusterId_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + clusterId_ = s; + return s; + } else { + return (java.lang.String) ref; } } /** - * repeated .hbase.pb.BalancerDecision balancer_decision = 1; + * optional string cluster_id = 1; + * + *
+       ** Not set if cluster ID could not be determined. 
+       * 
*/ - public java.util.List - getBalancerDecisionOrBuilderList() { - if (balancerDecisionBuilder_ != null) { - return balancerDecisionBuilder_.getMessageOrBuilderList(); + public com.google.protobuf.ByteString + getClusterIdBytes() { + java.lang.Object ref = clusterId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + clusterId_ = b; + return b; } else { - return java.util.Collections.unmodifiableList(balancerDecision_); + return (com.google.protobuf.ByteString) ref; } } /** - * repeated .hbase.pb.BalancerDecision balancer_decision = 1; + * optional string cluster_id = 1; + * + *
+       ** Not set if cluster ID could not be determined. 
+       * 
*/ - public org.apache.hadoop.hbase.protobuf.generated.RecentLogs.BalancerDecision.Builder addBalancerDecisionBuilder() { - return getBalancerDecisionFieldBuilder().addBuilder( - org.apache.hadoop.hbase.protobuf.generated.RecentLogs.BalancerDecision.getDefaultInstance()); + public Builder setClusterId( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + clusterId_ = value; + onChanged(); + return this; } /** - * repeated .hbase.pb.BalancerDecision balancer_decision = 1; + * optional string cluster_id = 1; + * + *
+       ** Not set if cluster ID could not be determined. 
+       * 
*/ - public org.apache.hadoop.hbase.protobuf.generated.RecentLogs.BalancerDecision.Builder addBalancerDecisionBuilder( - int index) { - return getBalancerDecisionFieldBuilder().addBuilder( - index, org.apache.hadoop.hbase.protobuf.generated.RecentLogs.BalancerDecision.getDefaultInstance()); + public Builder clearClusterId() { + bitField0_ = (bitField0_ & ~0x00000001); + clusterId_ = getDefaultInstance().getClusterId(); + onChanged(); + return this; } /** - * repeated .hbase.pb.BalancerDecision balancer_decision = 1; + * optional string cluster_id = 1; + * + *
+       ** Not set if cluster ID could not be determined. 
+       * 
*/ - public java.util.List - getBalancerDecisionBuilderList() { - return getBalancerDecisionFieldBuilder().getBuilderList(); - } - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.RecentLogs.BalancerDecision, org.apache.hadoop.hbase.protobuf.generated.RecentLogs.BalancerDecision.Builder, org.apache.hadoop.hbase.protobuf.generated.RecentLogs.BalancerDecisionOrBuilder> - getBalancerDecisionFieldBuilder() { - if (balancerDecisionBuilder_ == null) { - balancerDecisionBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.RecentLogs.BalancerDecision, org.apache.hadoop.hbase.protobuf.generated.RecentLogs.BalancerDecision.Builder, org.apache.hadoop.hbase.protobuf.generated.RecentLogs.BalancerDecisionOrBuilder>( - balancerDecision_, - ((bitField0_ & 0x00000001) == 0x00000001), - getParentForChildren(), - isClean()); - balancerDecision_ = null; - } - return balancerDecisionBuilder_; + public Builder setClusterIdBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + clusterId_ = value; + onChanged(); + return this; } - // @@protoc_insertion_point(builder_scope:hbase.pb.BalancerDecisionsResponse) + // @@protoc_insertion_point(builder_scope:hbase.pb.GetClusterIdResponse) } static { - defaultInstance = new BalancerDecisionsResponse(true); + defaultInstance = new GetClusterIdResponse(true); defaultInstance.initFields(); } - // @@protoc_insertion_point(class_scope:hbase.pb.BalancerDecisionsResponse) + // @@protoc_insertion_point(class_scope:hbase.pb.GetClusterIdResponse) } - public interface GetClusterIdRequestOrBuilder + public interface GetMastersRequestOrBuilder extends com.google.protobuf.MessageOrBuilder { } /** - * Protobuf type {@code hbase.pb.GetClusterIdRequest} + * Protobuf type {@code hbase.pb.GetMastersRequest} * *
-   ** Request and response to get the clusterID for this cluster 
+   ** Request and response to get the current list of all registers master servers 
    * 
*/ - public static final class GetClusterIdRequest extends + public static final class GetMastersRequest extends com.google.protobuf.GeneratedMessage - implements GetClusterIdRequestOrBuilder { - // Use GetClusterIdRequest.newBuilder() to construct. - private GetClusterIdRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + implements GetMastersRequestOrBuilder { + // Use GetMastersRequest.newBuilder() to construct. + private GetMastersRequest(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } - private GetClusterIdRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + private GetMastersRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - private static final GetClusterIdRequest defaultInstance; - public static GetClusterIdRequest getDefaultInstance() { + private static final GetMastersRequest defaultInstance; + public static GetMastersRequest getDefaultInstance() { return defaultInstance; } - public GetClusterIdRequest getDefaultInstanceForType() { + public GetMastersRequest getDefaultInstanceForType() { return defaultInstance; } @@ -67045,7 +66826,7 @@ public GetClusterIdRequest getDefaultInstanceForType() { getUnknownFields() { return this.unknownFields; } - private GetClusterIdRequest( + private GetMastersRequest( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { @@ -67081,28 +66862,28 @@ private GetClusterIdRequest( } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetClusterIdRequest_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetMastersRequest_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetClusterIdRequest_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetMastersRequest_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest.Builder.class); } - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public GetClusterIdRequest parsePartialFrom( + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public GetMastersRequest parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - return new GetClusterIdRequest(input, extensionRegistry); + return new GetMastersRequest(input, extensionRegistry); } }; @java.lang.Override - public com.google.protobuf.Parser getParserForType() { + public com.google.protobuf.Parser getParserForType() { return PARSER; } @@ -67146,10 +66927,10 @@ public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest)) { + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest)) { return super.equals(obj); } - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest) obj; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest) obj; boolean result = true; result = result && @@ -67170,53 +66951,53 @@ public int hashCode() { return hash; } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest parseFrom(byte[] data) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest parseFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest parseDelimitedFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest parseDelimitedFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -67225,7 +67006,7 @@ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCluster public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest prototype) { + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @@ -67237,28 +67018,28 @@ protected Builder newBuilderForType( return builder; } /** - * Protobuf type {@code hbase.pb.GetClusterIdRequest} + * Protobuf type {@code hbase.pb.GetMastersRequest} * *
-     ** Request and response to get the clusterID for this cluster 
+     ** Request and response to get the current list of all registers master servers 
      * 
*/ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequestOrBuilder { + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequestOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetClusterIdRequest_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetMastersRequest_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetClusterIdRequest_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetMastersRequest_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest.Builder.class); } - // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest.newBuilder() + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -67287,38 +67068,38 @@ public Builder clone() { public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetClusterIdRequest_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetMastersRequest_descriptor; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest.getDefaultInstance(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest.getDefaultInstance(); } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest build() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest result = buildPartial(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest(this); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest(this); onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest)other); + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest)other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest.getDefaultInstance()) return this; + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest.getDefaultInstance()) return this; this.mergeUnknownFields(other.getUnknownFields()); return this; } @@ -67331,11 +67112,11 @@ public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest parsedMessage = null; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest) e.getUnfinishedMessage(); + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { @@ -67345,66 +67126,63 @@ public Builder mergeFrom( return this; } - // @@protoc_insertion_point(builder_scope:hbase.pb.GetClusterIdRequest) + // @@protoc_insertion_point(builder_scope:hbase.pb.GetMastersRequest) } static { - defaultInstance = new GetClusterIdRequest(true); + defaultInstance = new GetMastersRequest(true); defaultInstance.initFields(); } - // @@protoc_insertion_point(class_scope:hbase.pb.GetClusterIdRequest) + // @@protoc_insertion_point(class_scope:hbase.pb.GetMastersRequest) } - public interface GetClusterIdResponseOrBuilder + public interface GetMastersResponseEntryOrBuilder extends com.google.protobuf.MessageOrBuilder { - // optional string cluster_id = 1; + // required .hbase.pb.ServerName server_name = 1; /** - * optional string cluster_id = 1; - * - *
-     ** Not set if cluster ID could not be determined. 
-     * 
+ * required .hbase.pb.ServerName server_name = 1; */ - boolean hasClusterId(); + boolean hasServerName(); /** - * optional string cluster_id = 1; - * - *
-     ** Not set if cluster ID could not be determined. 
-     * 
+ * required .hbase.pb.ServerName server_name = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServerName(); + /** + * required .hbase.pb.ServerName server_name = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerNameOrBuilder(); + + // required bool is_active = 2; + /** + * required bool is_active = 2; */ - java.lang.String getClusterId(); + boolean hasIsActive(); /** - * optional string cluster_id = 1; - * - *
-     ** Not set if cluster ID could not be determined. 
-     * 
+ * required bool is_active = 2; */ - com.google.protobuf.ByteString - getClusterIdBytes(); + boolean getIsActive(); } /** - * Protobuf type {@code hbase.pb.GetClusterIdResponse} + * Protobuf type {@code hbase.pb.GetMastersResponseEntry} */ - public static final class GetClusterIdResponse extends + public static final class GetMastersResponseEntry extends com.google.protobuf.GeneratedMessage - implements GetClusterIdResponseOrBuilder { - // Use GetClusterIdResponse.newBuilder() to construct. - private GetClusterIdResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + implements GetMastersResponseEntryOrBuilder { + // Use GetMastersResponseEntry.newBuilder() to construct. + private GetMastersResponseEntry(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } - private GetClusterIdResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + private GetMastersResponseEntry(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - private static final GetClusterIdResponse defaultInstance; - public static GetClusterIdResponse getDefaultInstance() { + private static final GetMastersResponseEntry defaultInstance; + public static GetMastersResponseEntry getDefaultInstance() { return defaultInstance; } - public GetClusterIdResponse getDefaultInstanceForType() { + public GetMastersResponseEntry getDefaultInstanceForType() { return defaultInstance; } @@ -67414,7 +67192,7 @@ public GetClusterIdResponse getDefaultInstanceForType() { getUnknownFields() { return this.unknownFields; } - private GetClusterIdResponse( + private GetMastersResponseEntry( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { @@ -67438,8 +67216,21 @@ private GetClusterIdResponse( break; } case 10: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = serverName_.toBuilder(); + } + serverName_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(serverName_); + serverName_ = subBuilder.buildPartial(); + } bitField0_ |= 0x00000001; - clusterId_ = input.readBytes(); + break; + } + case 16: { + bitField0_ |= 0x00000002; + isActive_ = input.readBool(); break; } } @@ -67456,95 +67247,91 @@ private GetClusterIdResponse( } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetClusterIdResponse_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetMastersResponseEntry_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetClusterIdResponse_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetMastersResponseEntry_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry.Builder.class); } - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public GetClusterIdResponse parsePartialFrom( + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public GetMastersResponseEntry parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - return new GetClusterIdResponse(input, extensionRegistry); + return new GetMastersResponseEntry(input, extensionRegistry); } }; @java.lang.Override - public com.google.protobuf.Parser getParserForType() { + public com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; - // optional string cluster_id = 1; - public static final int CLUSTER_ID_FIELD_NUMBER = 1; - private java.lang.Object clusterId_; + // required .hbase.pb.ServerName server_name = 1; + public static final int SERVER_NAME_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName serverName_; /** - * optional string cluster_id = 1; - * - *
-     ** Not set if cluster ID could not be determined. 
-     * 
+ * required .hbase.pb.ServerName server_name = 1; */ - public boolean hasClusterId() { + public boolean hasServerName() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** - * optional string cluster_id = 1; - * - *
-     ** Not set if cluster ID could not be determined. 
-     * 
+ * required .hbase.pb.ServerName server_name = 1; */ - public java.lang.String getClusterId() { - java.lang.Object ref = clusterId_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - clusterId_ = s; - } - return s; - } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServerName() { + return serverName_; } /** - * optional string cluster_id = 1; - * - *
-     ** Not set if cluster ID could not be determined. 
-     * 
+ * required .hbase.pb.ServerName server_name = 1; */ - public com.google.protobuf.ByteString - getClusterIdBytes() { - java.lang.Object ref = clusterId_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - clusterId_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerNameOrBuilder() { + return serverName_; + } + + // required bool is_active = 2; + public static final int IS_ACTIVE_FIELD_NUMBER = 2; + private boolean isActive_; + /** + * required bool is_active = 2; + */ + public boolean hasIsActive() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required bool is_active = 2; + */ + public boolean getIsActive() { + return isActive_; } private void initFields() { - clusterId_ = ""; + serverName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); + isActive_ = false; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; + if (!hasServerName()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasIsActive()) { + memoizedIsInitialized = 0; + return false; + } + if (!getServerName().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } memoizedIsInitialized = 1; return true; } @@ -67553,7 +67340,10 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBytes(1, getClusterIdBytes()); + output.writeMessage(1, serverName_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeBool(2, isActive_); } getUnknownFields().writeTo(output); } @@ -67566,7 +67356,11 @@ public int getSerializedSize() { size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream - .computeBytesSize(1, getClusterIdBytes()); + .computeMessageSize(1, serverName_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(2, isActive_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; @@ -67585,16 +67379,21 @@ public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse)) { + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry)) { return super.equals(obj); } - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse) obj; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry) obj; boolean result = true; - result = result && (hasClusterId() == other.hasClusterId()); - if (hasClusterId()) { - result = result && getClusterId() - .equals(other.getClusterId()); + result = result && (hasServerName() == other.hasServerName()); + if (hasServerName()) { + result = result && getServerName() + .equals(other.getServerName()); + } + result = result && (hasIsActive() == other.hasIsActive()); + if (hasIsActive()) { + result = result && (getIsActive() + == other.getIsActive()); } result = result && getUnknownFields().equals(other.getUnknownFields()); @@ -67609,62 +67408,66 @@ public int hashCode() { } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasClusterId()) { - hash = (37 * hash) + CLUSTER_ID_FIELD_NUMBER; - hash = (53 * hash) + getClusterId().hashCode(); + if (hasServerName()) { + hash = (37 * hash) + SERVER_NAME_FIELD_NUMBER; + hash = (53 * hash) + getServerName().hashCode(); + } + if (hasIsActive()) { + hash = (37 * hash) + IS_ACTIVE_FIELD_NUMBER; + hash = (53 * hash) + hashBoolean(getIsActive()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse parseFrom(byte[] data) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse parseFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse parseDelimitedFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse parseDelimitedFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -67673,7 +67476,7 @@ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCluster public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse prototype) { + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @@ -67685,24 +67488,24 @@ protected Builder newBuilderForType( return builder; } /** - * Protobuf type {@code hbase.pb.GetClusterIdResponse} + * Protobuf type {@code hbase.pb.GetMastersResponseEntry} */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponseOrBuilder { + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntryOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetClusterIdResponse_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetMastersResponseEntry_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetClusterIdResponse_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetMastersResponseEntry_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry.Builder.class); } - // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse.newBuilder() + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -67714,6 +67517,7 @@ private Builder( } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getServerNameFieldBuilder(); } } private static Builder create() { @@ -67722,8 +67526,14 @@ private static Builder create() { public Builder clear() { super.clear(); - clusterId_ = ""; + if (serverNameBuilder_ == null) { + serverName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); + } else { + serverNameBuilder_.clear(); + } bitField0_ = (bitField0_ & ~0x00000001); + isActive_ = false; + bitField0_ = (bitField0_ & ~0x00000002); return this; } @@ -67733,55 +67543,76 @@ public Builder clone() { public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetClusterIdResponse_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetMastersResponseEntry_descriptor; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse.getDefaultInstance(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry.getDefaultInstance(); } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse build() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse result = buildPartial(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse(this); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } - result.clusterId_ = clusterId_; + if (serverNameBuilder_ == null) { + result.serverName_ = serverName_; + } else { + result.serverName_ = serverNameBuilder_.build(); + } + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.isActive_ = isActive_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse)other); + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry)other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse.getDefaultInstance()) return this; - if (other.hasClusterId()) { - bitField0_ |= 0x00000001; - clusterId_ = other.clusterId_; - onChanged(); + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry.getDefaultInstance()) return this; + if (other.hasServerName()) { + mergeServerName(other.getServerName()); + } + if (other.hasIsActive()) { + setIsActive(other.getIsActive()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { + if (!hasServerName()) { + + return false; + } + if (!hasIsActive()) { + + return false; + } + if (!getServerName().isInitialized()) { + + return false; + } return true; } @@ -67789,11 +67620,11 @@ public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse parsedMessage = null; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse) e.getUnfinishedMessage(); + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { @@ -67804,141 +67635,214 @@ public Builder mergeFrom( } private int bitField0_; - // optional string cluster_id = 1; - private java.lang.Object clusterId_ = ""; + // required .hbase.pb.ServerName server_name = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName serverName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> serverNameBuilder_; /** - * optional string cluster_id = 1; - * - *
-       ** Not set if cluster ID could not be determined. 
-       * 
+ * required .hbase.pb.ServerName server_name = 1; */ - public boolean hasClusterId() { + public boolean hasServerName() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** - * optional string cluster_id = 1; - * - *
-       ** Not set if cluster ID could not be determined. 
-       * 
+ * required .hbase.pb.ServerName server_name = 1; */ - public java.lang.String getClusterId() { - java.lang.Object ref = clusterId_; - if (!(ref instanceof java.lang.String)) { - java.lang.String s = ((com.google.protobuf.ByteString) ref) - .toStringUtf8(); - clusterId_ = s; - return s; + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServerName() { + if (serverNameBuilder_ == null) { + return serverName_; } else { - return (java.lang.String) ref; + return serverNameBuilder_.getMessage(); } } /** - * optional string cluster_id = 1; - * - *
-       ** Not set if cluster ID could not be determined. 
-       * 
+ * required .hbase.pb.ServerName server_name = 1; */ - public com.google.protobuf.ByteString - getClusterIdBytes() { - java.lang.Object ref = clusterId_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - clusterId_ = b; - return b; + public Builder setServerName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) { + if (serverNameBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + serverName_ = value; + onChanged(); } else { - return (com.google.protobuf.ByteString) ref; + serverNameBuilder_.setMessage(value); } + bitField0_ |= 0x00000001; + return this; } /** - * optional string cluster_id = 1; - * - *
-       ** Not set if cluster ID could not be determined. 
-       * 
+ * required .hbase.pb.ServerName server_name = 1; */ - public Builder setClusterId( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - clusterId_ = value; - onChanged(); + public Builder setServerName( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) { + if (serverNameBuilder_ == null) { + serverName_ = builderForValue.build(); + onChanged(); + } else { + serverNameBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; return this; } /** - * optional string cluster_id = 1; - * - *
-       ** Not set if cluster ID could not be determined. 
-       * 
+ * required .hbase.pb.ServerName server_name = 1; */ - public Builder clearClusterId() { + public Builder mergeServerName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) { + if (serverNameBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + serverName_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance()) { + serverName_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.newBuilder(serverName_).mergeFrom(value).buildPartial(); + } else { + serverName_ = value; + } + onChanged(); + } else { + serverNameBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .hbase.pb.ServerName server_name = 1; + */ + public Builder clearServerName() { + if (serverNameBuilder_ == null) { + serverName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); + onChanged(); + } else { + serverNameBuilder_.clear(); + } bitField0_ = (bitField0_ & ~0x00000001); - clusterId_ = getDefaultInstance().getClusterId(); + return this; + } + /** + * required .hbase.pb.ServerName server_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder getServerNameBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getServerNameFieldBuilder().getBuilder(); + } + /** + * required .hbase.pb.ServerName server_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerNameOrBuilder() { + if (serverNameBuilder_ != null) { + return serverNameBuilder_.getMessageOrBuilder(); + } else { + return serverName_; + } + } + /** + * required .hbase.pb.ServerName server_name = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> + getServerNameFieldBuilder() { + if (serverNameBuilder_ == null) { + serverNameBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>( + serverName_, + getParentForChildren(), + isClean()); + serverName_ = null; + } + return serverNameBuilder_; + } + + // required bool is_active = 2; + private boolean isActive_ ; + /** + * required bool is_active = 2; + */ + public boolean hasIsActive() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required bool is_active = 2; + */ + public boolean getIsActive() { + return isActive_; + } + /** + * required bool is_active = 2; + */ + public Builder setIsActive(boolean value) { + bitField0_ |= 0x00000002; + isActive_ = value; onChanged(); return this; } /** - * optional string cluster_id = 1; - * - *
-       ** Not set if cluster ID could not be determined. 
-       * 
+ * required bool is_active = 2; */ - public Builder setClusterIdBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - clusterId_ = value; + public Builder clearIsActive() { + bitField0_ = (bitField0_ & ~0x00000002); + isActive_ = false; onChanged(); return this; } - // @@protoc_insertion_point(builder_scope:hbase.pb.GetClusterIdResponse) + // @@protoc_insertion_point(builder_scope:hbase.pb.GetMastersResponseEntry) } static { - defaultInstance = new GetClusterIdResponse(true); + defaultInstance = new GetMastersResponseEntry(true); defaultInstance.initFields(); } - // @@protoc_insertion_point(class_scope:hbase.pb.GetClusterIdResponse) + // @@protoc_insertion_point(class_scope:hbase.pb.GetMastersResponseEntry) } - public interface GetMastersRequestOrBuilder + public interface GetMastersResponseOrBuilder extends com.google.protobuf.MessageOrBuilder { + + // repeated .hbase.pb.GetMastersResponseEntry master_servers = 1; + /** + * repeated .hbase.pb.GetMastersResponseEntry master_servers = 1; + */ + java.util.List + getMasterServersList(); + /** + * repeated .hbase.pb.GetMastersResponseEntry master_servers = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry getMasterServers(int index); + /** + * repeated .hbase.pb.GetMastersResponseEntry master_servers = 1; + */ + int getMasterServersCount(); + /** + * repeated .hbase.pb.GetMastersResponseEntry master_servers = 1; + */ + java.util.List + getMasterServersOrBuilderList(); + /** + * repeated .hbase.pb.GetMastersResponseEntry master_servers = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntryOrBuilder getMasterServersOrBuilder( + int index); } /** - * Protobuf type {@code hbase.pb.GetMastersRequest} - * - *
-   ** Request and response to get the current list of all registers master servers 
-   * 
+ * Protobuf type {@code hbase.pb.GetMastersResponse} */ - public static final class GetMastersRequest extends + public static final class GetMastersResponse extends com.google.protobuf.GeneratedMessage - implements GetMastersRequestOrBuilder { - // Use GetMastersRequest.newBuilder() to construct. - private GetMastersRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + implements GetMastersResponseOrBuilder { + // Use GetMastersResponse.newBuilder() to construct. + private GetMastersResponse(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } - private GetMastersRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + private GetMastersResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - private static final GetMastersRequest defaultInstance; - public static GetMastersRequest getDefaultInstance() { + private static final GetMastersResponse defaultInstance; + public static GetMastersResponse getDefaultInstance() { return defaultInstance; } - public GetMastersRequest getDefaultInstanceForType() { + public GetMastersResponse getDefaultInstanceForType() { return defaultInstance; } @@ -67948,11 +67852,12 @@ public GetMastersRequest getDefaultInstanceForType() { getUnknownFields() { return this.unknownFields; } - private GetMastersRequest( + private GetMastersResponse( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); + int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { @@ -67970,6 +67875,14 @@ private GetMastersRequest( } break; } + case 10: { + if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + masterServers_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000001; + } + masterServers_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry.PARSER, extensionRegistry)); + break; + } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { @@ -67978,44 +67891,90 @@ private GetMastersRequest( throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { + if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + masterServers_ = java.util.Collections.unmodifiableList(masterServers_); + } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetMastersRequest_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetMastersResponse_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetMastersRequest_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetMastersResponse_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse.Builder.class); } - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public GetMastersRequest parsePartialFrom( + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public GetMastersResponse parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - return new GetMastersRequest(input, extensionRegistry); + return new GetMastersResponse(input, extensionRegistry); } }; @java.lang.Override - public com.google.protobuf.Parser getParserForType() { + public com.google.protobuf.Parser getParserForType() { return PARSER; } + // repeated .hbase.pb.GetMastersResponseEntry master_servers = 1; + public static final int MASTER_SERVERS_FIELD_NUMBER = 1; + private java.util.List masterServers_; + /** + * repeated .hbase.pb.GetMastersResponseEntry master_servers = 1; + */ + public java.util.List getMasterServersList() { + return masterServers_; + } + /** + * repeated .hbase.pb.GetMastersResponseEntry master_servers = 1; + */ + public java.util.List + getMasterServersOrBuilderList() { + return masterServers_; + } + /** + * repeated .hbase.pb.GetMastersResponseEntry master_servers = 1; + */ + public int getMasterServersCount() { + return masterServers_.size(); + } + /** + * repeated .hbase.pb.GetMastersResponseEntry master_servers = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry getMasterServers(int index) { + return masterServers_.get(index); + } + /** + * repeated .hbase.pb.GetMastersResponseEntry master_servers = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntryOrBuilder getMasterServersOrBuilder( + int index) { + return masterServers_.get(index); + } + private void initFields() { + masterServers_ = java.util.Collections.emptyList(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; + for (int i = 0; i < getMasterServersCount(); i++) { + if (!getMasterServers(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } memoizedIsInitialized = 1; return true; } @@ -68023,6 +67982,9 @@ public final boolean isInitialized() { public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); + for (int i = 0; i < masterServers_.size(); i++) { + output.writeMessage(1, masterServers_.get(i)); + } getUnknownFields().writeTo(output); } @@ -68032,6 +67994,10 @@ public int getSerializedSize() { if (size != -1) return size; size = 0; + for (int i = 0; i < masterServers_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, masterServers_.get(i)); + } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; @@ -68049,12 +68015,14 @@ public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest)) { + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse)) { return super.equals(obj); } - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest) obj; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse) obj; boolean result = true; + result = result && getMasterServersList() + .equals(other.getMasterServersList()); result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -68068,58 +68036,62 @@ public int hashCode() { } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); + if (getMasterServersCount() > 0) { + hash = (37 * hash) + MASTER_SERVERS_FIELD_NUMBER; + hash = (53 * hash) + getMasterServersList().hashCode(); + } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest parseFrom(byte[] data) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest parseFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest parseDelimitedFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest parseDelimitedFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -68128,183 +68100,449 @@ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMasters public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest prototype) { + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - /** - * Protobuf type {@code hbase.pb.GetMastersRequest} - * - *
-     ** Request and response to get the current list of all registers master servers 
-     * 
- */ - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequestOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetMastersRequest_descriptor; + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.GetMastersResponse} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetMastersResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetMastersResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getMasterServersFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (masterServersBuilder_ == null) { + masterServers_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + } else { + masterServersBuilder_.clear(); + } + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetMastersResponse_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse(this); + int from_bitField0_ = bitField0_; + if (masterServersBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001)) { + masterServers_ = java.util.Collections.unmodifiableList(masterServers_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.masterServers_ = masterServers_; + } else { + result.masterServers_ = masterServersBuilder_.build(); + } + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse.getDefaultInstance()) return this; + if (masterServersBuilder_ == null) { + if (!other.masterServers_.isEmpty()) { + if (masterServers_.isEmpty()) { + masterServers_ = other.masterServers_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureMasterServersIsMutable(); + masterServers_.addAll(other.masterServers_); + } + onChanged(); + } + } else { + if (!other.masterServers_.isEmpty()) { + if (masterServersBuilder_.isEmpty()) { + masterServersBuilder_.dispose(); + masterServersBuilder_ = null; + masterServers_ = other.masterServers_; + bitField0_ = (bitField0_ & ~0x00000001); + masterServersBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getMasterServersFieldBuilder() : null; + } else { + masterServersBuilder_.addAllMessages(other.masterServers_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + for (int i = 0; i < getMasterServersCount(); i++) { + if (!getMasterServers(i).isInitialized()) { + + return false; + } + } + return true; } - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetMastersRequest_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest.Builder.class); + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; } + private int bitField0_; - // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); + // repeated .hbase.pb.GetMastersResponseEntry master_servers = 1; + private java.util.List masterServers_ = + java.util.Collections.emptyList(); + private void ensureMasterServersIsMutable() { + if (!((bitField0_ & 0x00000001) == 0x00000001)) { + masterServers_ = new java.util.ArrayList(masterServers_); + bitField0_ |= 0x00000001; + } } - private Builder( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry.Builder, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntryOrBuilder> masterServersBuilder_; + + /** + * repeated .hbase.pb.GetMastersResponseEntry master_servers = 1; + */ + public java.util.List getMasterServersList() { + if (masterServersBuilder_ == null) { + return java.util.Collections.unmodifiableList(masterServers_); + } else { + return masterServersBuilder_.getMessageList(); + } } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + /** + * repeated .hbase.pb.GetMastersResponseEntry master_servers = 1; + */ + public int getMasterServersCount() { + if (masterServersBuilder_ == null) { + return masterServers_.size(); + } else { + return masterServersBuilder_.getCount(); } } - private static Builder create() { - return new Builder(); + /** + * repeated .hbase.pb.GetMastersResponseEntry master_servers = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry getMasterServers(int index) { + if (masterServersBuilder_ == null) { + return masterServers_.get(index); + } else { + return masterServersBuilder_.getMessage(index); + } } - - public Builder clear() { - super.clear(); + /** + * repeated .hbase.pb.GetMastersResponseEntry master_servers = 1; + */ + public Builder setMasterServers( + int index, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry value) { + if (masterServersBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureMasterServersIsMutable(); + masterServers_.set(index, value); + onChanged(); + } else { + masterServersBuilder_.setMessage(index, value); + } return this; } - - public Builder clone() { - return create().mergeFrom(buildPartial()); + /** + * repeated .hbase.pb.GetMastersResponseEntry master_servers = 1; + */ + public Builder setMasterServers( + int index, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry.Builder builderForValue) { + if (masterServersBuilder_ == null) { + ensureMasterServersIsMutable(); + masterServers_.set(index, builderForValue.build()); + onChanged(); + } else { + masterServersBuilder_.setMessage(index, builderForValue.build()); + } + return this; } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetMastersRequest_descriptor; + /** + * repeated .hbase.pb.GetMastersResponseEntry master_servers = 1; + */ + public Builder addMasterServers(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry value) { + if (masterServersBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureMasterServersIsMutable(); + masterServers_.add(value); + onChanged(); + } else { + masterServersBuilder_.addMessage(value); + } + return this; } - - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest.getDefaultInstance(); + /** + * repeated .hbase.pb.GetMastersResponseEntry master_servers = 1; + */ + public Builder addMasterServers( + int index, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry value) { + if (masterServersBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureMasterServersIsMutable(); + masterServers_.add(index, value); + onChanged(); + } else { + masterServersBuilder_.addMessage(index, value); + } + return this; } - - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest build() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); + /** + * repeated .hbase.pb.GetMastersResponseEntry master_servers = 1; + */ + public Builder addMasterServers( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry.Builder builderForValue) { + if (masterServersBuilder_ == null) { + ensureMasterServersIsMutable(); + masterServers_.add(builderForValue.build()); + onChanged(); + } else { + masterServersBuilder_.addMessage(builderForValue.build()); } - return result; + return this; + } + /** + * repeated .hbase.pb.GetMastersResponseEntry master_servers = 1; + */ + public Builder addMasterServers( + int index, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry.Builder builderForValue) { + if (masterServersBuilder_ == null) { + ensureMasterServersIsMutable(); + masterServers_.add(index, builderForValue.build()); + onChanged(); + } else { + masterServersBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.GetMastersResponseEntry master_servers = 1; + */ + public Builder addAllMasterServers( + java.lang.Iterable values) { + if (masterServersBuilder_ == null) { + ensureMasterServersIsMutable(); + super.addAll(values, masterServers_); + onChanged(); + } else { + masterServersBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .hbase.pb.GetMastersResponseEntry master_servers = 1; + */ + public Builder clearMasterServers() { + if (masterServersBuilder_ == null) { + masterServers_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + masterServersBuilder_.clear(); + } + return this; + } + /** + * repeated .hbase.pb.GetMastersResponseEntry master_servers = 1; + */ + public Builder removeMasterServers(int index) { + if (masterServersBuilder_ == null) { + ensureMasterServersIsMutable(); + masterServers_.remove(index); + onChanged(); + } else { + masterServersBuilder_.remove(index); + } + return this; + } + /** + * repeated .hbase.pb.GetMastersResponseEntry master_servers = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry.Builder getMasterServersBuilder( + int index) { + return getMasterServersFieldBuilder().getBuilder(index); } - - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest(this); - onBuilt(); - return result; + /** + * repeated .hbase.pb.GetMastersResponseEntry master_servers = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntryOrBuilder getMasterServersOrBuilder( + int index) { + if (masterServersBuilder_ == null) { + return masterServers_.get(index); } else { + return masterServersBuilder_.getMessageOrBuilder(index); + } } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest)other); + /** + * repeated .hbase.pb.GetMastersResponseEntry master_servers = 1; + */ + public java.util.List + getMasterServersOrBuilderList() { + if (masterServersBuilder_ != null) { + return masterServersBuilder_.getMessageOrBuilderList(); } else { - super.mergeFrom(other); - return this; + return java.util.Collections.unmodifiableList(masterServers_); } } - - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest.getDefaultInstance()) return this; - this.mergeUnknownFields(other.getUnknownFields()); - return this; + /** + * repeated .hbase.pb.GetMastersResponseEntry master_servers = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry.Builder addMasterServersBuilder() { + return getMasterServersFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry.getDefaultInstance()); } - - public final boolean isInitialized() { - return true; + /** + * repeated .hbase.pb.GetMastersResponseEntry master_servers = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry.Builder addMasterServersBuilder( + int index) { + return getMasterServersFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry.getDefaultInstance()); } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest) e.getUnfinishedMessage(); - throw e; - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } + /** + * repeated .hbase.pb.GetMastersResponseEntry master_servers = 1; + */ + public java.util.List + getMasterServersBuilderList() { + return getMasterServersFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry.Builder, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntryOrBuilder> + getMasterServersFieldBuilder() { + if (masterServersBuilder_ == null) { + masterServersBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry.Builder, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntryOrBuilder>( + masterServers_, + ((bitField0_ & 0x00000001) == 0x00000001), + getParentForChildren(), + isClean()); + masterServers_ = null; } - return this; + return masterServersBuilder_; } - // @@protoc_insertion_point(builder_scope:hbase.pb.GetMastersRequest) + // @@protoc_insertion_point(builder_scope:hbase.pb.GetMastersResponse) } static { - defaultInstance = new GetMastersRequest(true); + defaultInstance = new GetMastersResponse(true); defaultInstance.initFields(); } - // @@protoc_insertion_point(class_scope:hbase.pb.GetMastersRequest) + // @@protoc_insertion_point(class_scope:hbase.pb.GetMastersResponse) } - public interface GetMastersResponseEntryOrBuilder + public interface GetMetaRegionLocationsRequestOrBuilder extends com.google.protobuf.MessageOrBuilder { - - // required .hbase.pb.ServerName server_name = 1; - /** - * required .hbase.pb.ServerName server_name = 1; - */ - boolean hasServerName(); - /** - * required .hbase.pb.ServerName server_name = 1; - */ - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServerName(); - /** - * required .hbase.pb.ServerName server_name = 1; - */ - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerNameOrBuilder(); - - // required bool is_active = 2; - /** - * required bool is_active = 2; - */ - boolean hasIsActive(); - /** - * required bool is_active = 2; - */ - boolean getIsActive(); } /** - * Protobuf type {@code hbase.pb.GetMastersResponseEntry} + * Protobuf type {@code hbase.pb.GetMetaRegionLocationsRequest} + * + *
+   ** Request and response to get the current list of meta region locations 
+   * 
*/ - public static final class GetMastersResponseEntry extends + public static final class GetMetaRegionLocationsRequest extends com.google.protobuf.GeneratedMessage - implements GetMastersResponseEntryOrBuilder { - // Use GetMastersResponseEntry.newBuilder() to construct. - private GetMastersResponseEntry(com.google.protobuf.GeneratedMessage.Builder builder) { + implements GetMetaRegionLocationsRequestOrBuilder { + // Use GetMetaRegionLocationsRequest.newBuilder() to construct. + private GetMetaRegionLocationsRequest(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } - private GetMastersResponseEntry(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + private GetMetaRegionLocationsRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - private static final GetMastersResponseEntry defaultInstance; - public static GetMastersResponseEntry getDefaultInstance() { + private static final GetMetaRegionLocationsRequest defaultInstance; + public static GetMetaRegionLocationsRequest getDefaultInstance() { return defaultInstance; } - public GetMastersResponseEntry getDefaultInstanceForType() { + public GetMetaRegionLocationsRequest getDefaultInstanceForType() { return defaultInstance; } @@ -68314,12 +68552,11 @@ public GetMastersResponseEntry getDefaultInstanceForType() { getUnknownFields() { return this.unknownFields; } - private GetMastersResponseEntry( + private GetMetaRegionLocationsRequest( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); - int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { @@ -68337,24 +68574,6 @@ private GetMastersResponseEntry( } break; } - case 10: { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder subBuilder = null; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - subBuilder = serverName_.toBuilder(); - } - serverName_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.PARSER, extensionRegistry); - if (subBuilder != null) { - subBuilder.mergeFrom(serverName_); - serverName_ = subBuilder.buildPartial(); - } - bitField0_ |= 0x00000001; - break; - } - case 16: { - bitField0_ |= 0x00000002; - isActive_ = input.readBool(); - break; - } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { @@ -68369,91 +68588,38 @@ private GetMastersResponseEntry( } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetMastersResponseEntry_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetMetaRegionLocationsRequest_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetMastersResponseEntry_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetMetaRegionLocationsRequest_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest.Builder.class); } - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public GetMastersResponseEntry parsePartialFrom( + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public GetMetaRegionLocationsRequest parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - return new GetMastersResponseEntry(input, extensionRegistry); + return new GetMetaRegionLocationsRequest(input, extensionRegistry); } }; @java.lang.Override - public com.google.protobuf.Parser getParserForType() { + public com.google.protobuf.Parser getParserForType() { return PARSER; } - private int bitField0_; - // required .hbase.pb.ServerName server_name = 1; - public static final int SERVER_NAME_FIELD_NUMBER = 1; - private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName serverName_; - /** - * required .hbase.pb.ServerName server_name = 1; - */ - public boolean hasServerName() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required .hbase.pb.ServerName server_name = 1; - */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServerName() { - return serverName_; - } - /** - * required .hbase.pb.ServerName server_name = 1; - */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerNameOrBuilder() { - return serverName_; - } - - // required bool is_active = 2; - public static final int IS_ACTIVE_FIELD_NUMBER = 2; - private boolean isActive_; - /** - * required bool is_active = 2; - */ - public boolean hasIsActive() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * required bool is_active = 2; - */ - public boolean getIsActive() { - return isActive_; - } - private void initFields() { - serverName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); - isActive_ = false; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; - if (!hasServerName()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasIsActive()) { - memoizedIsInitialized = 0; - return false; - } - if (!getServerName().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } memoizedIsInitialized = 1; return true; } @@ -68461,12 +68627,6 @@ public final boolean isInitialized() { public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeMessage(1, serverName_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeBool(2, isActive_); - } getUnknownFields().writeTo(output); } @@ -68476,14 +68636,6 @@ public int getSerializedSize() { if (size != -1) return size; size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, serverName_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeBoolSize(2, isActive_); - } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; @@ -68499,24 +68651,14 @@ protected java.lang.Object writeReplace() @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry)) { - return super.equals(obj); - } - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry) obj; - - boolean result = true; - result = result && (hasServerName() == other.hasServerName()); - if (hasServerName()) { - result = result && getServerName() - .equals(other.getServerName()); + return true; } - result = result && (hasIsActive() == other.hasIsActive()); - if (hasIsActive()) { - result = result && (getIsActive() - == other.getIsActive()); + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest)) { + return super.equals(obj); } + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest) obj; + + boolean result = true; result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -68530,66 +68672,58 @@ public int hashCode() { } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasServerName()) { - hash = (37 * hash) + SERVER_NAME_FIELD_NUMBER; - hash = (53 * hash) + getServerName().hashCode(); - } - if (hasIsActive()) { - hash = (37 * hash) + IS_ACTIVE_FIELD_NUMBER; - hash = (53 * hash) + hashBoolean(getIsActive()); - } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry parseFrom(byte[] data) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry parseFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry parseDelimitedFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry parseDelimitedFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -68598,7 +68732,7 @@ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMasters public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry prototype) { + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @@ -68610,24 +68744,28 @@ protected Builder newBuilderForType( return builder; } /** - * Protobuf type {@code hbase.pb.GetMastersResponseEntry} + * Protobuf type {@code hbase.pb.GetMetaRegionLocationsRequest} + * + *
+     ** Request and response to get the current list of meta region locations 
+     * 
*/ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntryOrBuilder { + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequestOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetMastersResponseEntry_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetMetaRegionLocationsRequest_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetMastersResponseEntry_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetMetaRegionLocationsRequest_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest.Builder.class); } - // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry.newBuilder() + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -68635,336 +68773,163 @@ private Builder() { private Builder( com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getServerNameFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - if (serverNameBuilder_ == null) { - serverName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); - } else { - serverNameBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - isActive_ = false; - bitField0_ = (bitField0_ & ~0x00000002); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetMastersResponseEntry_descriptor; - } - - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry.getDefaultInstance(); - } - - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry build() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - if (serverNameBuilder_ == null) { - result.serverName_ = serverName_; - } else { - result.serverName_ = serverNameBuilder_.build(); - } - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.isActive_ = isActive_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry.getDefaultInstance()) return this; - if (other.hasServerName()) { - mergeServerName(other.getServerName()); - } - if (other.hasIsActive()) { - setIsActive(other.getIsActive()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasServerName()) { - - return false; - } - if (!hasIsActive()) { - - return false; - } - if (!getServerName().isInitialized()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry) e.getUnfinishedMessage(); - throw e; - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; - } - private int bitField0_; - - // required .hbase.pb.ServerName server_name = 1; - private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName serverName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> serverNameBuilder_; - /** - * required .hbase.pb.ServerName server_name = 1; - */ - public boolean hasServerName() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required .hbase.pb.ServerName server_name = 1; - */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServerName() { - if (serverNameBuilder_ == null) { - return serverName_; - } else { - return serverNameBuilder_.getMessage(); - } - } - /** - * required .hbase.pb.ServerName server_name = 1; - */ - public Builder setServerName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) { - if (serverNameBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - serverName_ = value; - onChanged(); - } else { - serverNameBuilder_.setMessage(value); - } - bitField0_ |= 0x00000001; - return this; - } - /** - * required .hbase.pb.ServerName server_name = 1; - */ - public Builder setServerName( - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) { - if (serverNameBuilder_ == null) { - serverName_ = builderForValue.build(); - onChanged(); - } else { - serverNameBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000001; - return this; - } - /** - * required .hbase.pb.ServerName server_name = 1; - */ - public Builder mergeServerName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) { - if (serverNameBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001) && - serverName_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance()) { - serverName_ = - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.newBuilder(serverName_).mergeFrom(value).buildPartial(); - } else { - serverName_ = value; - } - onChanged(); - } else { - serverNameBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000001; - return this; - } - /** - * required .hbase.pb.ServerName server_name = 1; - */ - public Builder clearServerName() { - if (serverNameBuilder_ == null) { - serverName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); - onChanged(); - } else { - serverNameBuilder_.clear(); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } - bitField0_ = (bitField0_ & ~0x00000001); + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); return this; } - /** - * required .hbase.pb.ServerName server_name = 1; - */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder getServerNameBuilder() { - bitField0_ |= 0x00000001; - onChanged(); - return getServerNameFieldBuilder().getBuilder(); + + public Builder clone() { + return create().mergeFrom(buildPartial()); } - /** - * required .hbase.pb.ServerName server_name = 1; - */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerNameOrBuilder() { - if (serverNameBuilder_ != null) { - return serverNameBuilder_.getMessageOrBuilder(); - } else { - return serverName_; - } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetMetaRegionLocationsRequest_descriptor; } - /** - * required .hbase.pb.ServerName server_name = 1; - */ - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> - getServerNameFieldBuilder() { - if (serverNameBuilder_ == null) { - serverNameBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>( - serverName_, - getParentForChildren(), - isClean()); - serverName_ = null; + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); } - return serverNameBuilder_; + return result; } - // required bool is_active = 2; - private boolean isActive_ ; - /** - * required bool is_active = 2; - */ - public boolean hasIsActive() { - return ((bitField0_ & 0x00000002) == 0x00000002); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest(this); + onBuilt(); + return result; } - /** - * required bool is_active = 2; - */ - public boolean getIsActive() { - return isActive_; + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest)other); + } else { + super.mergeFrom(other); + return this; + } } - /** - * required bool is_active = 2; - */ - public Builder setIsActive(boolean value) { - bitField0_ |= 0x00000002; - isActive_ = value; - onChanged(); + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest.getDefaultInstance()) return this; + this.mergeUnknownFields(other.getUnknownFields()); return this; } - /** - * required bool is_active = 2; - */ - public Builder clearIsActive() { - bitField0_ = (bitField0_ & ~0x00000002); - isActive_ = false; - onChanged(); + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } return this; } - // @@protoc_insertion_point(builder_scope:hbase.pb.GetMastersResponseEntry) + // @@protoc_insertion_point(builder_scope:hbase.pb.GetMetaRegionLocationsRequest) } static { - defaultInstance = new GetMastersResponseEntry(true); + defaultInstance = new GetMetaRegionLocationsRequest(true); defaultInstance.initFields(); } - // @@protoc_insertion_point(class_scope:hbase.pb.GetMastersResponseEntry) + // @@protoc_insertion_point(class_scope:hbase.pb.GetMetaRegionLocationsRequest) } - public interface GetMastersResponseOrBuilder + public interface GetMetaRegionLocationsResponseOrBuilder extends com.google.protobuf.MessageOrBuilder { - // repeated .hbase.pb.GetMastersResponseEntry master_servers = 1; + // repeated .hbase.pb.RegionLocation meta_locations = 1; /** - * repeated .hbase.pb.GetMastersResponseEntry master_servers = 1; + * repeated .hbase.pb.RegionLocation meta_locations = 1; + * + *
+     ** Not set if meta region locations could not be determined. 
+     * 
*/ - java.util.List - getMasterServersList(); + java.util.List + getMetaLocationsList(); /** - * repeated .hbase.pb.GetMastersResponseEntry master_servers = 1; + * repeated .hbase.pb.RegionLocation meta_locations = 1; + * + *
+     ** Not set if meta region locations could not be determined. 
+     * 
*/ - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry getMasterServers(int index); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation getMetaLocations(int index); /** - * repeated .hbase.pb.GetMastersResponseEntry master_servers = 1; + * repeated .hbase.pb.RegionLocation meta_locations = 1; + * + *
+     ** Not set if meta region locations could not be determined. 
+     * 
*/ - int getMasterServersCount(); + int getMetaLocationsCount(); /** - * repeated .hbase.pb.GetMastersResponseEntry master_servers = 1; + * repeated .hbase.pb.RegionLocation meta_locations = 1; + * + *
+     ** Not set if meta region locations could not be determined. 
+     * 
*/ - java.util.List - getMasterServersOrBuilderList(); + java.util.List + getMetaLocationsOrBuilderList(); /** - * repeated .hbase.pb.GetMastersResponseEntry master_servers = 1; + * repeated .hbase.pb.RegionLocation meta_locations = 1; + * + *
+     ** Not set if meta region locations could not be determined. 
+     * 
*/ - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntryOrBuilder getMasterServersOrBuilder( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocationOrBuilder getMetaLocationsOrBuilder( int index); } /** - * Protobuf type {@code hbase.pb.GetMastersResponse} + * Protobuf type {@code hbase.pb.GetMetaRegionLocationsResponse} */ - public static final class GetMastersResponse extends + public static final class GetMetaRegionLocationsResponse extends com.google.protobuf.GeneratedMessage - implements GetMastersResponseOrBuilder { - // Use GetMastersResponse.newBuilder() to construct. - private GetMastersResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + implements GetMetaRegionLocationsResponseOrBuilder { + // Use GetMetaRegionLocationsResponse.newBuilder() to construct. + private GetMetaRegionLocationsResponse(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } - private GetMastersResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + private GetMetaRegionLocationsResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - private static final GetMastersResponse defaultInstance; - public static GetMastersResponse getDefaultInstance() { + private static final GetMetaRegionLocationsResponse defaultInstance; + public static GetMetaRegionLocationsResponse getDefaultInstance() { return defaultInstance; } - public GetMastersResponse getDefaultInstanceForType() { + public GetMetaRegionLocationsResponse getDefaultInstanceForType() { return defaultInstance; } @@ -68974,7 +68939,7 @@ public GetMastersResponse getDefaultInstanceForType() { getUnknownFields() { return this.unknownFields; } - private GetMastersResponse( + private GetMetaRegionLocationsResponse( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { @@ -68999,10 +68964,10 @@ private GetMastersResponse( } case 10: { if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { - masterServers_ = new java.util.ArrayList(); + metaLocations_ = new java.util.ArrayList(); mutable_bitField0_ |= 0x00000001; } - masterServers_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry.PARSER, extensionRegistry)); + metaLocations_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation.PARSER, extensionRegistry)); break; } } @@ -69014,7 +68979,7 @@ private GetMastersResponse( e.getMessage()).setUnfinishedMessage(this); } finally { if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { - masterServers_ = java.util.Collections.unmodifiableList(masterServers_); + metaLocations_ = java.util.Collections.unmodifiableList(metaLocations_); } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); @@ -69022,77 +68987,97 @@ private GetMastersResponse( } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetMastersResponse_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetMetaRegionLocationsResponse_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetMastersResponse_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetMetaRegionLocationsResponse_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse.Builder.class); } - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public GetMastersResponse parsePartialFrom( + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public GetMetaRegionLocationsResponse parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - return new GetMastersResponse(input, extensionRegistry); + return new GetMetaRegionLocationsResponse(input, extensionRegistry); } }; @java.lang.Override - public com.google.protobuf.Parser getParserForType() { + public com.google.protobuf.Parser getParserForType() { return PARSER; } - // repeated .hbase.pb.GetMastersResponseEntry master_servers = 1; - public static final int MASTER_SERVERS_FIELD_NUMBER = 1; - private java.util.List masterServers_; + // repeated .hbase.pb.RegionLocation meta_locations = 1; + public static final int META_LOCATIONS_FIELD_NUMBER = 1; + private java.util.List metaLocations_; /** - * repeated .hbase.pb.GetMastersResponseEntry master_servers = 1; + * repeated .hbase.pb.RegionLocation meta_locations = 1; + * + *
+     ** Not set if meta region locations could not be determined. 
+     * 
*/ - public java.util.List getMasterServersList() { - return masterServers_; + public java.util.List getMetaLocationsList() { + return metaLocations_; } /** - * repeated .hbase.pb.GetMastersResponseEntry master_servers = 1; + * repeated .hbase.pb.RegionLocation meta_locations = 1; + * + *
+     ** Not set if meta region locations could not be determined. 
+     * 
*/ - public java.util.List - getMasterServersOrBuilderList() { - return masterServers_; + public java.util.List + getMetaLocationsOrBuilderList() { + return metaLocations_; } /** - * repeated .hbase.pb.GetMastersResponseEntry master_servers = 1; + * repeated .hbase.pb.RegionLocation meta_locations = 1; + * + *
+     ** Not set if meta region locations could not be determined. 
+     * 
*/ - public int getMasterServersCount() { - return masterServers_.size(); + public int getMetaLocationsCount() { + return metaLocations_.size(); } /** - * repeated .hbase.pb.GetMastersResponseEntry master_servers = 1; + * repeated .hbase.pb.RegionLocation meta_locations = 1; + * + *
+     ** Not set if meta region locations could not be determined. 
+     * 
*/ - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry getMasterServers(int index) { - return masterServers_.get(index); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation getMetaLocations(int index) { + return metaLocations_.get(index); } /** - * repeated .hbase.pb.GetMastersResponseEntry master_servers = 1; + * repeated .hbase.pb.RegionLocation meta_locations = 1; + * + *
+     ** Not set if meta region locations could not be determined. 
+     * 
*/ - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntryOrBuilder getMasterServersOrBuilder( + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocationOrBuilder getMetaLocationsOrBuilder( int index) { - return masterServers_.get(index); + return metaLocations_.get(index); } private void initFields() { - masterServers_ = java.util.Collections.emptyList(); + metaLocations_ = java.util.Collections.emptyList(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; - for (int i = 0; i < getMasterServersCount(); i++) { - if (!getMasterServers(i).isInitialized()) { + for (int i = 0; i < getMetaLocationsCount(); i++) { + if (!getMetaLocations(i).isInitialized()) { memoizedIsInitialized = 0; return false; } @@ -69104,8 +69089,8 @@ public final boolean isInitialized() { public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); - for (int i = 0; i < masterServers_.size(); i++) { - output.writeMessage(1, masterServers_.get(i)); + for (int i = 0; i < metaLocations_.size(); i++) { + output.writeMessage(1, metaLocations_.get(i)); } getUnknownFields().writeTo(output); } @@ -69116,9 +69101,9 @@ public int getSerializedSize() { if (size != -1) return size; size = 0; - for (int i = 0; i < masterServers_.size(); i++) { + for (int i = 0; i < metaLocations_.size(); i++) { size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, masterServers_.get(i)); + .computeMessageSize(1, metaLocations_.get(i)); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; @@ -69137,14 +69122,14 @@ public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse)) { + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse)) { return super.equals(obj); } - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse) obj; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse) obj; boolean result = true; - result = result && getMasterServersList() - .equals(other.getMasterServersList()); + result = result && getMetaLocationsList() + .equals(other.getMetaLocationsList()); result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -69158,62 +69143,62 @@ public int hashCode() { } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); - if (getMasterServersCount() > 0) { - hash = (37 * hash) + MASTER_SERVERS_FIELD_NUMBER; - hash = (53 * hash) + getMasterServersList().hashCode(); + if (getMetaLocationsCount() > 0) { + hash = (37 * hash) + META_LOCATIONS_FIELD_NUMBER; + hash = (53 * hash) + getMetaLocationsList().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse parseFrom(byte[] data) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse parseFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse parseDelimitedFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse parseDelimitedFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -69222,7 +69207,7 @@ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMasters public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse prototype) { + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @@ -69234,24 +69219,24 @@ protected Builder newBuilderForType( return builder; } /** - * Protobuf type {@code hbase.pb.GetMastersResponse} + * Protobuf type {@code hbase.pb.GetMetaRegionLocationsResponse} */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseOrBuilder { + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponseOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetMastersResponse_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetMetaRegionLocationsResponse_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetMastersResponse_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetMetaRegionLocationsResponse_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse.Builder.class); } - // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse.newBuilder() + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -69263,7 +69248,7 @@ private Builder( } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getMasterServersFieldBuilder(); + getMetaLocationsFieldBuilder(); } } private static Builder create() { @@ -69272,11 +69257,11 @@ private static Builder create() { public Builder clear() { super.clear(); - if (masterServersBuilder_ == null) { - masterServers_ = java.util.Collections.emptyList(); + if (metaLocationsBuilder_ == null) { + metaLocations_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000001); } else { - masterServersBuilder_.clear(); + metaLocationsBuilder_.clear(); } return this; } @@ -69287,71 +69272,71 @@ public Builder clone() { public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetMastersResponse_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetMetaRegionLocationsResponse_descriptor; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse.getDefaultInstance(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse.getDefaultInstance(); } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse build() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse result = buildPartial(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse(this); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse(this); int from_bitField0_ = bitField0_; - if (masterServersBuilder_ == null) { + if (metaLocationsBuilder_ == null) { if (((bitField0_ & 0x00000001) == 0x00000001)) { - masterServers_ = java.util.Collections.unmodifiableList(masterServers_); + metaLocations_ = java.util.Collections.unmodifiableList(metaLocations_); bitField0_ = (bitField0_ & ~0x00000001); } - result.masterServers_ = masterServers_; + result.metaLocations_ = metaLocations_; } else { - result.masterServers_ = masterServersBuilder_.build(); + result.metaLocations_ = metaLocationsBuilder_.build(); } onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse)other); + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse)other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse.getDefaultInstance()) return this; - if (masterServersBuilder_ == null) { - if (!other.masterServers_.isEmpty()) { - if (masterServers_.isEmpty()) { - masterServers_ = other.masterServers_; + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse.getDefaultInstance()) return this; + if (metaLocationsBuilder_ == null) { + if (!other.metaLocations_.isEmpty()) { + if (metaLocations_.isEmpty()) { + metaLocations_ = other.metaLocations_; bitField0_ = (bitField0_ & ~0x00000001); } else { - ensureMasterServersIsMutable(); - masterServers_.addAll(other.masterServers_); + ensureMetaLocationsIsMutable(); + metaLocations_.addAll(other.metaLocations_); } onChanged(); } } else { - if (!other.masterServers_.isEmpty()) { - if (masterServersBuilder_.isEmpty()) { - masterServersBuilder_.dispose(); - masterServersBuilder_ = null; - masterServers_ = other.masterServers_; + if (!other.metaLocations_.isEmpty()) { + if (metaLocationsBuilder_.isEmpty()) { + metaLocationsBuilder_.dispose(); + metaLocationsBuilder_ = null; + metaLocations_ = other.metaLocations_; bitField0_ = (bitField0_ & ~0x00000001); - masterServersBuilder_ = + metaLocationsBuilder_ = com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? - getMasterServersFieldBuilder() : null; + getMetaLocationsFieldBuilder() : null; } else { - masterServersBuilder_.addAllMessages(other.masterServers_); + metaLocationsBuilder_.addAllMessages(other.metaLocations_); } } } @@ -69360,8 +69345,8 @@ public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos } public final boolean isInitialized() { - for (int i = 0; i < getMasterServersCount(); i++) { - if (!getMasterServers(i).isInitialized()) { + for (int i = 0; i < getMetaLocationsCount(); i++) { + if (!getMetaLocations(i).isInitialized()) { return false; } @@ -69373,11 +69358,11 @@ public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse parsedMessage = null; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse) e.getUnfinishedMessage(); + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { @@ -69388,283 +69373,355 @@ public Builder mergeFrom( } private int bitField0_; - // repeated .hbase.pb.GetMastersResponseEntry master_servers = 1; - private java.util.List masterServers_ = + // repeated .hbase.pb.RegionLocation meta_locations = 1; + private java.util.List metaLocations_ = java.util.Collections.emptyList(); - private void ensureMasterServersIsMutable() { + private void ensureMetaLocationsIsMutable() { if (!((bitField0_ & 0x00000001) == 0x00000001)) { - masterServers_ = new java.util.ArrayList(masterServers_); + metaLocations_ = new java.util.ArrayList(metaLocations_); bitField0_ |= 0x00000001; } } private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry.Builder, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntryOrBuilder> masterServersBuilder_; + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocationOrBuilder> metaLocationsBuilder_; /** - * repeated .hbase.pb.GetMastersResponseEntry master_servers = 1; + * repeated .hbase.pb.RegionLocation meta_locations = 1; + * + *
+       ** Not set if meta region locations could not be determined. 
+       * 
*/ - public java.util.List getMasterServersList() { - if (masterServersBuilder_ == null) { - return java.util.Collections.unmodifiableList(masterServers_); + public java.util.List getMetaLocationsList() { + if (metaLocationsBuilder_ == null) { + return java.util.Collections.unmodifiableList(metaLocations_); } else { - return masterServersBuilder_.getMessageList(); + return metaLocationsBuilder_.getMessageList(); } } /** - * repeated .hbase.pb.GetMastersResponseEntry master_servers = 1; + * repeated .hbase.pb.RegionLocation meta_locations = 1; + * + *
+       ** Not set if meta region locations could not be determined. 
+       * 
*/ - public int getMasterServersCount() { - if (masterServersBuilder_ == null) { - return masterServers_.size(); + public int getMetaLocationsCount() { + if (metaLocationsBuilder_ == null) { + return metaLocations_.size(); } else { - return masterServersBuilder_.getCount(); + return metaLocationsBuilder_.getCount(); } } /** - * repeated .hbase.pb.GetMastersResponseEntry master_servers = 1; + * repeated .hbase.pb.RegionLocation meta_locations = 1; + * + *
+       ** Not set if meta region locations could not be determined. 
+       * 
*/ - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry getMasterServers(int index) { - if (masterServersBuilder_ == null) { - return masterServers_.get(index); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation getMetaLocations(int index) { + if (metaLocationsBuilder_ == null) { + return metaLocations_.get(index); } else { - return masterServersBuilder_.getMessage(index); + return metaLocationsBuilder_.getMessage(index); } } /** - * repeated .hbase.pb.GetMastersResponseEntry master_servers = 1; + * repeated .hbase.pb.RegionLocation meta_locations = 1; + * + *
+       ** Not set if meta region locations could not be determined. 
+       * 
*/ - public Builder setMasterServers( - int index, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry value) { - if (masterServersBuilder_ == null) { + public Builder setMetaLocations( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation value) { + if (metaLocationsBuilder_ == null) { if (value == null) { throw new NullPointerException(); } - ensureMasterServersIsMutable(); - masterServers_.set(index, value); + ensureMetaLocationsIsMutable(); + metaLocations_.set(index, value); onChanged(); } else { - masterServersBuilder_.setMessage(index, value); + metaLocationsBuilder_.setMessage(index, value); } return this; } /** - * repeated .hbase.pb.GetMastersResponseEntry master_servers = 1; + * repeated .hbase.pb.RegionLocation meta_locations = 1; + * + *
+       ** Not set if meta region locations could not be determined. 
+       * 
*/ - public Builder setMasterServers( - int index, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry.Builder builderForValue) { - if (masterServersBuilder_ == null) { - ensureMasterServersIsMutable(); - masterServers_.set(index, builderForValue.build()); + public Builder setMetaLocations( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation.Builder builderForValue) { + if (metaLocationsBuilder_ == null) { + ensureMetaLocationsIsMutable(); + metaLocations_.set(index, builderForValue.build()); onChanged(); } else { - masterServersBuilder_.setMessage(index, builderForValue.build()); + metaLocationsBuilder_.setMessage(index, builderForValue.build()); } return this; } /** - * repeated .hbase.pb.GetMastersResponseEntry master_servers = 1; + * repeated .hbase.pb.RegionLocation meta_locations = 1; + * + *
+       ** Not set if meta region locations could not be determined. 
+       * 
*/ - public Builder addMasterServers(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry value) { - if (masterServersBuilder_ == null) { + public Builder addMetaLocations(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation value) { + if (metaLocationsBuilder_ == null) { if (value == null) { throw new NullPointerException(); - } - ensureMasterServersIsMutable(); - masterServers_.add(value); + } + ensureMetaLocationsIsMutable(); + metaLocations_.add(value); onChanged(); } else { - masterServersBuilder_.addMessage(value); + metaLocationsBuilder_.addMessage(value); } return this; } /** - * repeated .hbase.pb.GetMastersResponseEntry master_servers = 1; + * repeated .hbase.pb.RegionLocation meta_locations = 1; + * + *
+       ** Not set if meta region locations could not be determined. 
+       * 
*/ - public Builder addMasterServers( - int index, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry value) { - if (masterServersBuilder_ == null) { + public Builder addMetaLocations( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation value) { + if (metaLocationsBuilder_ == null) { if (value == null) { throw new NullPointerException(); } - ensureMasterServersIsMutable(); - masterServers_.add(index, value); + ensureMetaLocationsIsMutable(); + metaLocations_.add(index, value); onChanged(); } else { - masterServersBuilder_.addMessage(index, value); + metaLocationsBuilder_.addMessage(index, value); } return this; } /** - * repeated .hbase.pb.GetMastersResponseEntry master_servers = 1; + * repeated .hbase.pb.RegionLocation meta_locations = 1; + * + *
+       ** Not set if meta region locations could not be determined. 
+       * 
*/ - public Builder addMasterServers( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry.Builder builderForValue) { - if (masterServersBuilder_ == null) { - ensureMasterServersIsMutable(); - masterServers_.add(builderForValue.build()); + public Builder addMetaLocations( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation.Builder builderForValue) { + if (metaLocationsBuilder_ == null) { + ensureMetaLocationsIsMutable(); + metaLocations_.add(builderForValue.build()); onChanged(); } else { - masterServersBuilder_.addMessage(builderForValue.build()); + metaLocationsBuilder_.addMessage(builderForValue.build()); } return this; } /** - * repeated .hbase.pb.GetMastersResponseEntry master_servers = 1; + * repeated .hbase.pb.RegionLocation meta_locations = 1; + * + *
+       ** Not set if meta region locations could not be determined. 
+       * 
*/ - public Builder addMasterServers( - int index, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry.Builder builderForValue) { - if (masterServersBuilder_ == null) { - ensureMasterServersIsMutable(); - masterServers_.add(index, builderForValue.build()); + public Builder addMetaLocations( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation.Builder builderForValue) { + if (metaLocationsBuilder_ == null) { + ensureMetaLocationsIsMutable(); + metaLocations_.add(index, builderForValue.build()); onChanged(); } else { - masterServersBuilder_.addMessage(index, builderForValue.build()); + metaLocationsBuilder_.addMessage(index, builderForValue.build()); } return this; } /** - * repeated .hbase.pb.GetMastersResponseEntry master_servers = 1; + * repeated .hbase.pb.RegionLocation meta_locations = 1; + * + *
+       ** Not set if meta region locations could not be determined. 
+       * 
*/ - public Builder addAllMasterServers( - java.lang.Iterable values) { - if (masterServersBuilder_ == null) { - ensureMasterServersIsMutable(); - super.addAll(values, masterServers_); + public Builder addAllMetaLocations( + java.lang.Iterable values) { + if (metaLocationsBuilder_ == null) { + ensureMetaLocationsIsMutable(); + super.addAll(values, metaLocations_); onChanged(); } else { - masterServersBuilder_.addAllMessages(values); + metaLocationsBuilder_.addAllMessages(values); } return this; } /** - * repeated .hbase.pb.GetMastersResponseEntry master_servers = 1; + * repeated .hbase.pb.RegionLocation meta_locations = 1; + * + *
+       ** Not set if meta region locations could not be determined. 
+       * 
*/ - public Builder clearMasterServers() { - if (masterServersBuilder_ == null) { - masterServers_ = java.util.Collections.emptyList(); + public Builder clearMetaLocations() { + if (metaLocationsBuilder_ == null) { + metaLocations_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000001); onChanged(); } else { - masterServersBuilder_.clear(); + metaLocationsBuilder_.clear(); } return this; } /** - * repeated .hbase.pb.GetMastersResponseEntry master_servers = 1; + * repeated .hbase.pb.RegionLocation meta_locations = 1; + * + *
+       ** Not set if meta region locations could not be determined. 
+       * 
*/ - public Builder removeMasterServers(int index) { - if (masterServersBuilder_ == null) { - ensureMasterServersIsMutable(); - masterServers_.remove(index); + public Builder removeMetaLocations(int index) { + if (metaLocationsBuilder_ == null) { + ensureMetaLocationsIsMutable(); + metaLocations_.remove(index); onChanged(); } else { - masterServersBuilder_.remove(index); + metaLocationsBuilder_.remove(index); } return this; } /** - * repeated .hbase.pb.GetMastersResponseEntry master_servers = 1; + * repeated .hbase.pb.RegionLocation meta_locations = 1; + * + *
+       ** Not set if meta region locations could not be determined. 
+       * 
*/ - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry.Builder getMasterServersBuilder( + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation.Builder getMetaLocationsBuilder( int index) { - return getMasterServersFieldBuilder().getBuilder(index); + return getMetaLocationsFieldBuilder().getBuilder(index); } /** - * repeated .hbase.pb.GetMastersResponseEntry master_servers = 1; + * repeated .hbase.pb.RegionLocation meta_locations = 1; + * + *
+       ** Not set if meta region locations could not be determined. 
+       * 
*/ - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntryOrBuilder getMasterServersOrBuilder( + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocationOrBuilder getMetaLocationsOrBuilder( int index) { - if (masterServersBuilder_ == null) { - return masterServers_.get(index); } else { - return masterServersBuilder_.getMessageOrBuilder(index); + if (metaLocationsBuilder_ == null) { + return metaLocations_.get(index); } else { + return metaLocationsBuilder_.getMessageOrBuilder(index); } } /** - * repeated .hbase.pb.GetMastersResponseEntry master_servers = 1; + * repeated .hbase.pb.RegionLocation meta_locations = 1; + * + *
+       ** Not set if meta region locations could not be determined. 
+       * 
*/ - public java.util.List - getMasterServersOrBuilderList() { - if (masterServersBuilder_ != null) { - return masterServersBuilder_.getMessageOrBuilderList(); + public java.util.List + getMetaLocationsOrBuilderList() { + if (metaLocationsBuilder_ != null) { + return metaLocationsBuilder_.getMessageOrBuilderList(); } else { - return java.util.Collections.unmodifiableList(masterServers_); + return java.util.Collections.unmodifiableList(metaLocations_); } } /** - * repeated .hbase.pb.GetMastersResponseEntry master_servers = 1; + * repeated .hbase.pb.RegionLocation meta_locations = 1; + * + *
+       ** Not set if meta region locations could not be determined. 
+       * 
*/ - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry.Builder addMasterServersBuilder() { - return getMasterServersFieldBuilder().addBuilder( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry.getDefaultInstance()); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation.Builder addMetaLocationsBuilder() { + return getMetaLocationsFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation.getDefaultInstance()); } /** - * repeated .hbase.pb.GetMastersResponseEntry master_servers = 1; + * repeated .hbase.pb.RegionLocation meta_locations = 1; + * + *
+       ** Not set if meta region locations could not be determined. 
+       * 
*/ - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry.Builder addMasterServersBuilder( + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation.Builder addMetaLocationsBuilder( int index) { - return getMasterServersFieldBuilder().addBuilder( - index, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry.getDefaultInstance()); + return getMetaLocationsFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation.getDefaultInstance()); } /** - * repeated .hbase.pb.GetMastersResponseEntry master_servers = 1; + * repeated .hbase.pb.RegionLocation meta_locations = 1; + * + *
+       ** Not set if meta region locations could not be determined. 
+       * 
*/ - public java.util.List - getMasterServersBuilderList() { - return getMasterServersFieldBuilder().getBuilderList(); + public java.util.List + getMetaLocationsBuilderList() { + return getMetaLocationsFieldBuilder().getBuilderList(); } private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry.Builder, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntryOrBuilder> - getMasterServersFieldBuilder() { - if (masterServersBuilder_ == null) { - masterServersBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry.Builder, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntryOrBuilder>( - masterServers_, + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocationOrBuilder> + getMetaLocationsFieldBuilder() { + if (metaLocationsBuilder_ == null) { + metaLocationsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocationOrBuilder>( + metaLocations_, ((bitField0_ & 0x00000001) == 0x00000001), getParentForChildren(), isClean()); - masterServers_ = null; + metaLocations_ = null; } - return masterServersBuilder_; + return metaLocationsBuilder_; } - // @@protoc_insertion_point(builder_scope:hbase.pb.GetMastersResponse) + // @@protoc_insertion_point(builder_scope:hbase.pb.GetMetaRegionLocationsResponse) } static { - defaultInstance = new GetMastersResponse(true); + defaultInstance = new GetMetaRegionLocationsResponse(true); defaultInstance.initFields(); } - // @@protoc_insertion_point(class_scope:hbase.pb.GetMastersResponse) + // @@protoc_insertion_point(class_scope:hbase.pb.GetMetaRegionLocationsResponse) } - public interface GetMetaRegionLocationsRequestOrBuilder + public interface GetNumLiveRSRequestOrBuilder extends com.google.protobuf.MessageOrBuilder { } /** - * Protobuf type {@code hbase.pb.GetMetaRegionLocationsRequest} + * Protobuf type {@code hbase.pb.GetNumLiveRSRequest} * *
-   ** Request and response to get the current list of meta region locations 
+   ** Request and response to get the number of live region servers 
    * 
*/ - public static final class GetMetaRegionLocationsRequest extends + public static final class GetNumLiveRSRequest extends com.google.protobuf.GeneratedMessage - implements GetMetaRegionLocationsRequestOrBuilder { - // Use GetMetaRegionLocationsRequest.newBuilder() to construct. - private GetMetaRegionLocationsRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + implements GetNumLiveRSRequestOrBuilder { + // Use GetNumLiveRSRequest.newBuilder() to construct. + private GetNumLiveRSRequest(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } - private GetMetaRegionLocationsRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + private GetNumLiveRSRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - private static final GetMetaRegionLocationsRequest defaultInstance; - public static GetMetaRegionLocationsRequest getDefaultInstance() { + private static final GetNumLiveRSRequest defaultInstance; + public static GetNumLiveRSRequest getDefaultInstance() { return defaultInstance; } - public GetMetaRegionLocationsRequest getDefaultInstanceForType() { + public GetNumLiveRSRequest getDefaultInstanceForType() { return defaultInstance; } @@ -69674,7 +69731,7 @@ public GetMetaRegionLocationsRequest getDefaultInstanceForType() { getUnknownFields() { return this.unknownFields; } - private GetMetaRegionLocationsRequest( + private GetNumLiveRSRequest( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { @@ -69710,28 +69767,28 @@ private GetMetaRegionLocationsRequest( } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetMetaRegionLocationsRequest_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetNumLiveRSRequest_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetMetaRegionLocationsRequest_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetNumLiveRSRequest_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest.Builder.class); } - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public GetMetaRegionLocationsRequest parsePartialFrom( + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public GetNumLiveRSRequest parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - return new GetMetaRegionLocationsRequest(input, extensionRegistry); + return new GetNumLiveRSRequest(input, extensionRegistry); } }; @java.lang.Override - public com.google.protobuf.Parser getParserForType() { + public com.google.protobuf.Parser getParserForType() { return PARSER; } @@ -69775,10 +69832,10 @@ public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest)) { + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest)) { return super.equals(obj); } - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest) obj; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest) obj; boolean result = true; result = result && @@ -69799,53 +69856,53 @@ public int hashCode() { return hash; } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest parseFrom(byte[] data) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest parseFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest parseDelimitedFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest parseDelimitedFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -69854,7 +69911,7 @@ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaReg public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest prototype) { + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @@ -69866,28 +69923,28 @@ protected Builder newBuilderForType( return builder; } /** - * Protobuf type {@code hbase.pb.GetMetaRegionLocationsRequest} + * Protobuf type {@code hbase.pb.GetNumLiveRSRequest} * *
-     ** Request and response to get the current list of meta region locations 
+     ** Request and response to get the number of live region servers 
      * 
*/ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequestOrBuilder { + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequestOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetMetaRegionLocationsRequest_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetNumLiveRSRequest_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetMetaRegionLocationsRequest_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetNumLiveRSRequest_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest.Builder.class); } - // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest.newBuilder() + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -69916,38 +69973,38 @@ public Builder clone() { public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetMetaRegionLocationsRequest_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetNumLiveRSRequest_descriptor; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest.getDefaultInstance(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest.getDefaultInstance(); } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest build() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest result = buildPartial(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest(this); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest(this); onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest)other); + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest)other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest.getDefaultInstance()) return this; + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest.getDefaultInstance()) return this; this.mergeUnknownFields(other.getUnknownFields()); return this; } @@ -69960,11 +70017,11 @@ public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest parsedMessage = null; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest) e.getUnfinishedMessage(); + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { @@ -69974,84 +70031,49 @@ public Builder mergeFrom( return this; } - // @@protoc_insertion_point(builder_scope:hbase.pb.GetMetaRegionLocationsRequest) + // @@protoc_insertion_point(builder_scope:hbase.pb.GetNumLiveRSRequest) } static { - defaultInstance = new GetMetaRegionLocationsRequest(true); + defaultInstance = new GetNumLiveRSRequest(true); defaultInstance.initFields(); } - // @@protoc_insertion_point(class_scope:hbase.pb.GetMetaRegionLocationsRequest) + // @@protoc_insertion_point(class_scope:hbase.pb.GetNumLiveRSRequest) } - public interface GetMetaRegionLocationsResponseOrBuilder + public interface GetNumLiveRSResponseOrBuilder extends com.google.protobuf.MessageOrBuilder { - // repeated .hbase.pb.RegionLocation meta_locations = 1; - /** - * repeated .hbase.pb.RegionLocation meta_locations = 1; - * - *
-     ** Not set if meta region locations could not be determined. 
-     * 
- */ - java.util.List - getMetaLocationsList(); - /** - * repeated .hbase.pb.RegionLocation meta_locations = 1; - * - *
-     ** Not set if meta region locations could not be determined. 
-     * 
- */ - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation getMetaLocations(int index); - /** - * repeated .hbase.pb.RegionLocation meta_locations = 1; - * - *
-     ** Not set if meta region locations could not be determined. 
-     * 
- */ - int getMetaLocationsCount(); + // required int32 num_region_servers = 1; /** - * repeated .hbase.pb.RegionLocation meta_locations = 1; - * - *
-     ** Not set if meta region locations could not be determined. 
-     * 
+ * required int32 num_region_servers = 1; */ - java.util.List - getMetaLocationsOrBuilderList(); + boolean hasNumRegionServers(); /** - * repeated .hbase.pb.RegionLocation meta_locations = 1; - * - *
-     ** Not set if meta region locations could not be determined. 
-     * 
+ * required int32 num_region_servers = 1; */ - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocationOrBuilder getMetaLocationsOrBuilder( - int index); + int getNumRegionServers(); } /** - * Protobuf type {@code hbase.pb.GetMetaRegionLocationsResponse} + * Protobuf type {@code hbase.pb.GetNumLiveRSResponse} */ - public static final class GetMetaRegionLocationsResponse extends + public static final class GetNumLiveRSResponse extends com.google.protobuf.GeneratedMessage - implements GetMetaRegionLocationsResponseOrBuilder { - // Use GetMetaRegionLocationsResponse.newBuilder() to construct. - private GetMetaRegionLocationsResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + implements GetNumLiveRSResponseOrBuilder { + // Use GetNumLiveRSResponse.newBuilder() to construct. + private GetNumLiveRSResponse(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } - private GetMetaRegionLocationsResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + private GetNumLiveRSResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - private static final GetMetaRegionLocationsResponse defaultInstance; - public static GetMetaRegionLocationsResponse getDefaultInstance() { + private static final GetNumLiveRSResponse defaultInstance; + public static GetNumLiveRSResponse getDefaultInstance() { return defaultInstance; } - public GetMetaRegionLocationsResponse getDefaultInstanceForType() { + public GetNumLiveRSResponse getDefaultInstanceForType() { return defaultInstance; } @@ -70061,7 +70083,7 @@ public GetMetaRegionLocationsResponse getDefaultInstanceForType() { getUnknownFields() { return this.unknownFields; } - private GetMetaRegionLocationsResponse( + private GetNumLiveRSResponse( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { @@ -70084,12 +70106,9 @@ private GetMetaRegionLocationsResponse( } break; } - case 10: { - if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { - metaLocations_ = new java.util.ArrayList(); - mutable_bitField0_ |= 0x00000001; - } - metaLocations_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation.PARSER, extensionRegistry)); + case 8: { + bitField0_ |= 0x00000001; + numRegionServers_ = input.readInt32(); break; } } @@ -70100,109 +70119,65 @@ private GetMetaRegionLocationsResponse( throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { - if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { - metaLocations_ = java.util.Collections.unmodifiableList(metaLocations_); - } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetMetaRegionLocationsResponse_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetNumLiveRSResponse_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetMetaRegionLocationsResponse_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetNumLiveRSResponse_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse.Builder.class); } - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public GetMetaRegionLocationsResponse parsePartialFrom( + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public GetNumLiveRSResponse parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - return new GetMetaRegionLocationsResponse(input, extensionRegistry); + return new GetNumLiveRSResponse(input, extensionRegistry); } }; @java.lang.Override - public com.google.protobuf.Parser getParserForType() { + public com.google.protobuf.Parser getParserForType() { return PARSER; } - // repeated .hbase.pb.RegionLocation meta_locations = 1; - public static final int META_LOCATIONS_FIELD_NUMBER = 1; - private java.util.List metaLocations_; - /** - * repeated .hbase.pb.RegionLocation meta_locations = 1; - * - *
-     ** Not set if meta region locations could not be determined. 
-     * 
- */ - public java.util.List getMetaLocationsList() { - return metaLocations_; - } - /** - * repeated .hbase.pb.RegionLocation meta_locations = 1; - * - *
-     ** Not set if meta region locations could not be determined. 
-     * 
- */ - public java.util.List - getMetaLocationsOrBuilderList() { - return metaLocations_; - } - /** - * repeated .hbase.pb.RegionLocation meta_locations = 1; - * - *
-     ** Not set if meta region locations could not be determined. 
-     * 
- */ - public int getMetaLocationsCount() { - return metaLocations_.size(); - } + private int bitField0_; + // required int32 num_region_servers = 1; + public static final int NUM_REGION_SERVERS_FIELD_NUMBER = 1; + private int numRegionServers_; /** - * repeated .hbase.pb.RegionLocation meta_locations = 1; - * - *
-     ** Not set if meta region locations could not be determined. 
-     * 
+ * required int32 num_region_servers = 1; */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation getMetaLocations(int index) { - return metaLocations_.get(index); + public boolean hasNumRegionServers() { + return ((bitField0_ & 0x00000001) == 0x00000001); } /** - * repeated .hbase.pb.RegionLocation meta_locations = 1; - * - *
-     ** Not set if meta region locations could not be determined. 
-     * 
+ * required int32 num_region_servers = 1; */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocationOrBuilder getMetaLocationsOrBuilder( - int index) { - return metaLocations_.get(index); + public int getNumRegionServers() { + return numRegionServers_; } private void initFields() { - metaLocations_ = java.util.Collections.emptyList(); + numRegionServers_ = 0; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; - for (int i = 0; i < getMetaLocationsCount(); i++) { - if (!getMetaLocations(i).isInitialized()) { - memoizedIsInitialized = 0; - return false; - } + if (!hasNumRegionServers()) { + memoizedIsInitialized = 0; + return false; } memoizedIsInitialized = 1; return true; @@ -70211,8 +70186,8 @@ public final boolean isInitialized() { public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); - for (int i = 0; i < metaLocations_.size(); i++) { - output.writeMessage(1, metaLocations_.get(i)); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeInt32(1, numRegionServers_); } getUnknownFields().writeTo(output); } @@ -70223,9 +70198,9 @@ public int getSerializedSize() { if (size != -1) return size; size = 0; - for (int i = 0; i < metaLocations_.size(); i++) { + if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, metaLocations_.get(i)); + .computeInt32Size(1, numRegionServers_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; @@ -70244,14 +70219,17 @@ public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse)) { + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse)) { return super.equals(obj); } - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse) obj; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse) obj; boolean result = true; - result = result && getMetaLocationsList() - .equals(other.getMetaLocationsList()); + result = result && (hasNumRegionServers() == other.hasNumRegionServers()); + if (hasNumRegionServers()) { + result = result && (getNumRegionServers() + == other.getNumRegionServers()); + } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -70265,62 +70243,62 @@ public int hashCode() { } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); - if (getMetaLocationsCount() > 0) { - hash = (37 * hash) + META_LOCATIONS_FIELD_NUMBER; - hash = (53 * hash) + getMetaLocationsList().hashCode(); + if (hasNumRegionServers()) { + hash = (37 * hash) + NUM_REGION_SERVERS_FIELD_NUMBER; + hash = (53 * hash) + getNumRegionServers(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse parseFrom(byte[] data) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse parseFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse parseDelimitedFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse parseDelimitedFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -70329,7 +70307,7 @@ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaReg public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse prototype) { + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @@ -70341,509 +70319,222 @@ protected Builder newBuilderForType( return builder; } /** - * Protobuf type {@code hbase.pb.GetMetaRegionLocationsResponse} + * Protobuf type {@code hbase.pb.GetNumLiveRSResponse} */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponseOrBuilder { + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponseOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetMetaRegionLocationsResponse_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetNumLiveRSResponse_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetMetaRegionLocationsResponse_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetNumLiveRSResponse_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse.Builder.class); } - // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse.newBuilder() + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse.newBuilder() private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getMetaLocationsFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - if (metaLocationsBuilder_ == null) { - metaLocations_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000001); - } else { - metaLocationsBuilder_.clear(); - } - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetMetaRegionLocationsResponse_descriptor; - } - - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse.getDefaultInstance(); - } - - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse build() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse(this); - int from_bitField0_ = bitField0_; - if (metaLocationsBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001)) { - metaLocations_ = java.util.Collections.unmodifiableList(metaLocations_); - bitField0_ = (bitField0_ & ~0x00000001); - } - result.metaLocations_ = metaLocations_; - } else { - result.metaLocations_ = metaLocationsBuilder_.build(); - } - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse.getDefaultInstance()) return this; - if (metaLocationsBuilder_ == null) { - if (!other.metaLocations_.isEmpty()) { - if (metaLocations_.isEmpty()) { - metaLocations_ = other.metaLocations_; - bitField0_ = (bitField0_ & ~0x00000001); - } else { - ensureMetaLocationsIsMutable(); - metaLocations_.addAll(other.metaLocations_); - } - onChanged(); - } - } else { - if (!other.metaLocations_.isEmpty()) { - if (metaLocationsBuilder_.isEmpty()) { - metaLocationsBuilder_.dispose(); - metaLocationsBuilder_ = null; - metaLocations_ = other.metaLocations_; - bitField0_ = (bitField0_ & ~0x00000001); - metaLocationsBuilder_ = - com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? - getMetaLocationsFieldBuilder() : null; - } else { - metaLocationsBuilder_.addAllMessages(other.metaLocations_); - } - } - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - for (int i = 0; i < getMetaLocationsCount(); i++) { - if (!getMetaLocations(i).isInitialized()) { - - return false; - } - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse) e.getUnfinishedMessage(); - throw e; - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; - } - private int bitField0_; - - // repeated .hbase.pb.RegionLocation meta_locations = 1; - private java.util.List metaLocations_ = - java.util.Collections.emptyList(); - private void ensureMetaLocationsIsMutable() { - if (!((bitField0_ & 0x00000001) == 0x00000001)) { - metaLocations_ = new java.util.ArrayList(metaLocations_); - bitField0_ |= 0x00000001; - } - } - - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocationOrBuilder> metaLocationsBuilder_; - - /** - * repeated .hbase.pb.RegionLocation meta_locations = 1; - * - *
-       ** Not set if meta region locations could not be determined. 
-       * 
- */ - public java.util.List getMetaLocationsList() { - if (metaLocationsBuilder_ == null) { - return java.util.Collections.unmodifiableList(metaLocations_); - } else { - return metaLocationsBuilder_.getMessageList(); - } - } - /** - * repeated .hbase.pb.RegionLocation meta_locations = 1; - * - *
-       ** Not set if meta region locations could not be determined. 
-       * 
- */ - public int getMetaLocationsCount() { - if (metaLocationsBuilder_ == null) { - return metaLocations_.size(); - } else { - return metaLocationsBuilder_.getCount(); - } - } - /** - * repeated .hbase.pb.RegionLocation meta_locations = 1; - * - *
-       ** Not set if meta region locations could not be determined. 
-       * 
- */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation getMetaLocations(int index) { - if (metaLocationsBuilder_ == null) { - return metaLocations_.get(index); - } else { - return metaLocationsBuilder_.getMessage(index); - } - } - /** - * repeated .hbase.pb.RegionLocation meta_locations = 1; - * - *
-       ** Not set if meta region locations could not be determined. 
-       * 
- */ - public Builder setMetaLocations( - int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation value) { - if (metaLocationsBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureMetaLocationsIsMutable(); - metaLocations_.set(index, value); - onChanged(); - } else { - metaLocationsBuilder_.setMessage(index, value); - } - return this; - } - /** - * repeated .hbase.pb.RegionLocation meta_locations = 1; - * - *
-       ** Not set if meta region locations could not be determined. 
-       * 
- */ - public Builder setMetaLocations( - int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation.Builder builderForValue) { - if (metaLocationsBuilder_ == null) { - ensureMetaLocationsIsMutable(); - metaLocations_.set(index, builderForValue.build()); - onChanged(); - } else { - metaLocationsBuilder_.setMessage(index, builderForValue.build()); - } - return this; + maybeForceBuilderInitialization(); } - /** - * repeated .hbase.pb.RegionLocation meta_locations = 1; - * - *
-       ** Not set if meta region locations could not be determined. 
-       * 
- */ - public Builder addMetaLocations(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation value) { - if (metaLocationsBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureMetaLocationsIsMutable(); - metaLocations_.add(value); - onChanged(); - } else { - metaLocationsBuilder_.addMessage(value); - } - return this; + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); } - /** - * repeated .hbase.pb.RegionLocation meta_locations = 1; - * - *
-       ** Not set if meta region locations could not be determined. 
-       * 
- */ - public Builder addMetaLocations( - int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation value) { - if (metaLocationsBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureMetaLocationsIsMutable(); - metaLocations_.add(index, value); - onChanged(); - } else { - metaLocationsBuilder_.addMessage(index, value); + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } - return this; } - /** - * repeated .hbase.pb.RegionLocation meta_locations = 1; - * - *
-       ** Not set if meta region locations could not be determined. 
-       * 
- */ - public Builder addMetaLocations( - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation.Builder builderForValue) { - if (metaLocationsBuilder_ == null) { - ensureMetaLocationsIsMutable(); - metaLocations_.add(builderForValue.build()); - onChanged(); - } else { - metaLocationsBuilder_.addMessage(builderForValue.build()); - } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + numRegionServers_ = 0; + bitField0_ = (bitField0_ & ~0x00000001); return this; } - /** - * repeated .hbase.pb.RegionLocation meta_locations = 1; - * - *
-       ** Not set if meta region locations could not be determined. 
-       * 
- */ - public Builder addMetaLocations( - int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation.Builder builderForValue) { - if (metaLocationsBuilder_ == null) { - ensureMetaLocationsIsMutable(); - metaLocations_.add(index, builderForValue.build()); - onChanged(); - } else { - metaLocationsBuilder_.addMessage(index, builderForValue.build()); + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetNumLiveRSResponse_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); } - return this; + return result; } - /** - * repeated .hbase.pb.RegionLocation meta_locations = 1; - * - *
-       ** Not set if meta region locations could not be determined. 
-       * 
- */ - public Builder addAllMetaLocations( - java.lang.Iterable values) { - if (metaLocationsBuilder_ == null) { - ensureMetaLocationsIsMutable(); - super.addAll(values, metaLocations_); - onChanged(); - } else { - metaLocationsBuilder_.addAllMessages(values); + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; } - return this; + result.numRegionServers_ = numRegionServers_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; } - /** - * repeated .hbase.pb.RegionLocation meta_locations = 1; - * - *
-       ** Not set if meta region locations could not be determined. 
-       * 
- */ - public Builder clearMetaLocations() { - if (metaLocationsBuilder_ == null) { - metaLocations_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000001); - onChanged(); + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse)other); } else { - metaLocationsBuilder_.clear(); + super.mergeFrom(other); + return this; } - return this; } - /** - * repeated .hbase.pb.RegionLocation meta_locations = 1; - * - *
-       ** Not set if meta region locations could not be determined. 
-       * 
- */ - public Builder removeMetaLocations(int index) { - if (metaLocationsBuilder_ == null) { - ensureMetaLocationsIsMutable(); - metaLocations_.remove(index); - onChanged(); - } else { - metaLocationsBuilder_.remove(index); + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse.getDefaultInstance()) return this; + if (other.hasNumRegionServers()) { + setNumRegionServers(other.getNumRegionServers()); } + this.mergeUnknownFields(other.getUnknownFields()); return this; } - /** - * repeated .hbase.pb.RegionLocation meta_locations = 1; - * - *
-       ** Not set if meta region locations could not be determined. 
-       * 
- */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation.Builder getMetaLocationsBuilder( - int index) { - return getMetaLocationsFieldBuilder().getBuilder(index); - } - /** - * repeated .hbase.pb.RegionLocation meta_locations = 1; - * - *
-       ** Not set if meta region locations could not be determined. 
-       * 
- */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocationOrBuilder getMetaLocationsOrBuilder( - int index) { - if (metaLocationsBuilder_ == null) { - return metaLocations_.get(index); } else { - return metaLocationsBuilder_.getMessageOrBuilder(index); + + public final boolean isInitialized() { + if (!hasNumRegionServers()) { + + return false; } + return true; } - /** - * repeated .hbase.pb.RegionLocation meta_locations = 1; - * - *
-       ** Not set if meta region locations could not be determined. 
-       * 
- */ - public java.util.List - getMetaLocationsOrBuilderList() { - if (metaLocationsBuilder_ != null) { - return metaLocationsBuilder_.getMessageOrBuilderList(); - } else { - return java.util.Collections.unmodifiableList(metaLocations_); + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } } + return this; } + private int bitField0_; + + // required int32 num_region_servers = 1; + private int numRegionServers_ ; /** - * repeated .hbase.pb.RegionLocation meta_locations = 1; - * - *
-       ** Not set if meta region locations could not be determined. 
-       * 
+ * required int32 num_region_servers = 1; */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation.Builder addMetaLocationsBuilder() { - return getMetaLocationsFieldBuilder().addBuilder( - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation.getDefaultInstance()); + public boolean hasNumRegionServers() { + return ((bitField0_ & 0x00000001) == 0x00000001); } - /** - * repeated .hbase.pb.RegionLocation meta_locations = 1; - * - *
-       ** Not set if meta region locations could not be determined. 
-       * 
- */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation.Builder addMetaLocationsBuilder( - int index) { - return getMetaLocationsFieldBuilder().addBuilder( - index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation.getDefaultInstance()); + /** + * required int32 num_region_servers = 1; + */ + public int getNumRegionServers() { + return numRegionServers_; } /** - * repeated .hbase.pb.RegionLocation meta_locations = 1; - * - *
-       ** Not set if meta region locations could not be determined. 
-       * 
+ * required int32 num_region_servers = 1; */ - public java.util.List - getMetaLocationsBuilderList() { - return getMetaLocationsFieldBuilder().getBuilderList(); + public Builder setNumRegionServers(int value) { + bitField0_ |= 0x00000001; + numRegionServers_ = value; + onChanged(); + return this; } - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocationOrBuilder> - getMetaLocationsFieldBuilder() { - if (metaLocationsBuilder_ == null) { - metaLocationsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocationOrBuilder>( - metaLocations_, - ((bitField0_ & 0x00000001) == 0x00000001), - getParentForChildren(), - isClean()); - metaLocations_ = null; - } - return metaLocationsBuilder_; + /** + * required int32 num_region_servers = 1; + */ + public Builder clearNumRegionServers() { + bitField0_ = (bitField0_ & ~0x00000001); + numRegionServers_ = 0; + onChanged(); + return this; } - // @@protoc_insertion_point(builder_scope:hbase.pb.GetMetaRegionLocationsResponse) + // @@protoc_insertion_point(builder_scope:hbase.pb.GetNumLiveRSResponse) } static { - defaultInstance = new GetMetaRegionLocationsResponse(true); + defaultInstance = new GetNumLiveRSResponse(true); defaultInstance.initFields(); } - // @@protoc_insertion_point(class_scope:hbase.pb.GetMetaRegionLocationsResponse) + // @@protoc_insertion_point(class_scope:hbase.pb.GetNumLiveRSResponse) } - public interface GetNumLiveRSRequestOrBuilder + public interface GetTableStateRequestOrBuilder extends com.google.protobuf.MessageOrBuilder { + + // required string table_name = 1; + /** + * required string table_name = 1; + */ + boolean hasTableName(); + /** + * required string table_name = 1; + */ + java.lang.String getTableName(); + /** + * required string table_name = 1; + */ + com.google.protobuf.ByteString + getTableNameBytes(); + + // required bool is_enabled = 2; + /** + * required bool is_enabled = 2; + */ + boolean hasIsEnabled(); + /** + * required bool is_enabled = 2; + */ + boolean getIsEnabled(); } /** - * Protobuf type {@code hbase.pb.GetNumLiveRSRequest} + * Protobuf type {@code hbase.pb.GetTableStateRequest} * *
-   ** Request and response to get the number of live region servers 
+   ** Request to check the state of a given table 
    * 
*/ - public static final class GetNumLiveRSRequest extends + public static final class GetTableStateRequest extends com.google.protobuf.GeneratedMessage - implements GetNumLiveRSRequestOrBuilder { - // Use GetNumLiveRSRequest.newBuilder() to construct. - private GetNumLiveRSRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + implements GetTableStateRequestOrBuilder { + // Use GetTableStateRequest.newBuilder() to construct. + private GetTableStateRequest(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } - private GetNumLiveRSRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + private GetTableStateRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - private static final GetNumLiveRSRequest defaultInstance; - public static GetNumLiveRSRequest getDefaultInstance() { + private static final GetTableStateRequest defaultInstance; + public static GetTableStateRequest getDefaultInstance() { return defaultInstance; } - public GetNumLiveRSRequest getDefaultInstanceForType() { + public GetTableStateRequest getDefaultInstanceForType() { return defaultInstance; } @@ -70853,11 +70544,12 @@ public GetNumLiveRSRequest getDefaultInstanceForType() { getUnknownFields() { return this.unknownFields; } - private GetNumLiveRSRequest( + private GetTableStateRequest( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); + int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { @@ -70875,6 +70567,16 @@ private GetNumLiveRSRequest( } break; } + case 10: { + bitField0_ |= 0x00000001; + tableName_ = input.readBytes(); + break; + } + case 16: { + bitField0_ |= 0x00000002; + isEnabled_ = input.readBool(); + break; + } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { @@ -70889,38 +70591,108 @@ private GetNumLiveRSRequest( } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetNumLiveRSRequest_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetTableStateRequest_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetNumLiveRSRequest_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetTableStateRequest_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest.Builder.class); } - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public GetNumLiveRSRequest parsePartialFrom( + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public GetTableStateRequest parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - return new GetNumLiveRSRequest(input, extensionRegistry); + return new GetTableStateRequest(input, extensionRegistry); } }; @java.lang.Override - public com.google.protobuf.Parser getParserForType() { + public com.google.protobuf.Parser getParserForType() { return PARSER; } + private int bitField0_; + // required string table_name = 1; + public static final int TABLE_NAME_FIELD_NUMBER = 1; + private java.lang.Object tableName_; + /** + * required string table_name = 1; + */ + public boolean hasTableName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string table_name = 1; + */ + public java.lang.String getTableName() { + java.lang.Object ref = tableName_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + tableName_ = s; + } + return s; + } + } + /** + * required string table_name = 1; + */ + public com.google.protobuf.ByteString + getTableNameBytes() { + java.lang.Object ref = tableName_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + tableName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // required bool is_enabled = 2; + public static final int IS_ENABLED_FIELD_NUMBER = 2; + private boolean isEnabled_; + /** + * required bool is_enabled = 2; + */ + public boolean hasIsEnabled() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required bool is_enabled = 2; + */ + public boolean getIsEnabled() { + return isEnabled_; + } + private void initFields() { + tableName_ = ""; + isEnabled_ = false; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; + if (!hasTableName()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasIsEnabled()) { + memoizedIsInitialized = 0; + return false; + } memoizedIsInitialized = 1; return true; } @@ -70928,6 +70700,12 @@ public final boolean isInitialized() { public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getTableNameBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeBool(2, isEnabled_); + } getUnknownFields().writeTo(output); } @@ -70937,6 +70715,14 @@ public int getSerializedSize() { if (size != -1) return size; size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getTableNameBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(2, isEnabled_); + } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; @@ -70954,12 +70740,22 @@ public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest)) { + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest)) { return super.equals(obj); } - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest) obj; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest) obj; boolean result = true; + result = result && (hasTableName() == other.hasTableName()); + if (hasTableName()) { + result = result && getTableName() + .equals(other.getTableName()); + } + result = result && (hasIsEnabled() == other.hasIsEnabled()); + if (hasIsEnabled()) { + result = result && (getIsEnabled() + == other.getIsEnabled()); + } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -70973,58 +70769,66 @@ public int hashCode() { } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasTableName()) { + hash = (37 * hash) + TABLE_NAME_FIELD_NUMBER; + hash = (53 * hash) + getTableName().hashCode(); + } + if (hasIsEnabled()) { + hash = (37 * hash) + IS_ENABLED_FIELD_NUMBER; + hash = (53 * hash) + hashBoolean(getIsEnabled()); + } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest parseFrom(byte[] data) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest parseFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest parseDelimitedFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest parseDelimitedFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -71033,7 +70837,7 @@ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLive public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest prototype) { + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @@ -71045,28 +70849,28 @@ protected Builder newBuilderForType( return builder; } /** - * Protobuf type {@code hbase.pb.GetNumLiveRSRequest} + * Protobuf type {@code hbase.pb.GetTableStateRequest} * *
-     ** Request and response to get the number of live region servers 
+     ** Request to check the state of a given table 
      * 
*/ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequestOrBuilder { + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequestOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetNumLiveRSRequest_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetTableStateRequest_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetNumLiveRSRequest_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetTableStateRequest_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest.Builder.class); } - // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest.newBuilder() + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -71086,6 +70890,10 @@ private static Builder create() { public Builder clear() { super.clear(); + tableName_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + isEnabled_ = false; + bitField0_ = (bitField0_ & ~0x00000002); return this; } @@ -71095,43 +70903,70 @@ public Builder clone() { public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetNumLiveRSRequest_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetTableStateRequest_descriptor; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest.getDefaultInstance(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest.getDefaultInstance(); } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest build() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest result = buildPartial(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest(this); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.tableName_ = tableName_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.isEnabled_ = isEnabled_; + result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest)other); + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest)other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest.getDefaultInstance()) return this; + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest.getDefaultInstance()) return this; + if (other.hasTableName()) { + bitField0_ |= 0x00000001; + tableName_ = other.tableName_; + onChanged(); + } + if (other.hasIsEnabled()) { + setIsEnabled(other.getIsEnabled()); + } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { + if (!hasTableName()) { + + return false; + } + if (!hasIsEnabled()) { + + return false; + } return true; } @@ -71139,11 +70974,11 @@ public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest parsedMessage = null; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest) e.getUnfinishedMessage(); + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { @@ -71152,50 +70987,158 @@ public Builder mergeFrom( } return this; } + private int bitField0_; - // @@protoc_insertion_point(builder_scope:hbase.pb.GetNumLiveRSRequest) + // required string table_name = 1; + private java.lang.Object tableName_ = ""; + /** + * required string table_name = 1; + */ + public boolean hasTableName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string table_name = 1; + */ + public java.lang.String getTableName() { + java.lang.Object ref = tableName_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + tableName_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string table_name = 1; + */ + public com.google.protobuf.ByteString + getTableNameBytes() { + java.lang.Object ref = tableName_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + tableName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string table_name = 1; + */ + public Builder setTableName( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + tableName_ = value; + onChanged(); + return this; + } + /** + * required string table_name = 1; + */ + public Builder clearTableName() { + bitField0_ = (bitField0_ & ~0x00000001); + tableName_ = getDefaultInstance().getTableName(); + onChanged(); + return this; + } + /** + * required string table_name = 1; + */ + public Builder setTableNameBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + tableName_ = value; + onChanged(); + return this; + } + + // required bool is_enabled = 2; + private boolean isEnabled_ ; + /** + * required bool is_enabled = 2; + */ + public boolean hasIsEnabled() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required bool is_enabled = 2; + */ + public boolean getIsEnabled() { + return isEnabled_; + } + /** + * required bool is_enabled = 2; + */ + public Builder setIsEnabled(boolean value) { + bitField0_ |= 0x00000002; + isEnabled_ = value; + onChanged(); + return this; + } + /** + * required bool is_enabled = 2; + */ + public Builder clearIsEnabled() { + bitField0_ = (bitField0_ & ~0x00000002); + isEnabled_ = false; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.GetTableStateRequest) } static { - defaultInstance = new GetNumLiveRSRequest(true); + defaultInstance = new GetTableStateRequest(true); defaultInstance.initFields(); } - // @@protoc_insertion_point(class_scope:hbase.pb.GetNumLiveRSRequest) + // @@protoc_insertion_point(class_scope:hbase.pb.GetTableStateRequest) } - public interface GetNumLiveRSResponseOrBuilder + public interface GetTableStateResponseOrBuilder extends com.google.protobuf.MessageOrBuilder { - // required int32 num_region_servers = 1; + // required bool enabled_or_disabled = 1; /** - * required int32 num_region_servers = 1; + * required bool enabled_or_disabled = 1; */ - boolean hasNumRegionServers(); + boolean hasEnabledOrDisabled(); /** - * required int32 num_region_servers = 1; + * required bool enabled_or_disabled = 1; */ - int getNumRegionServers(); + boolean getEnabledOrDisabled(); } /** - * Protobuf type {@code hbase.pb.GetNumLiveRSResponse} + * Protobuf type {@code hbase.pb.GetTableStateResponse} */ - public static final class GetNumLiveRSResponse extends + public static final class GetTableStateResponse extends com.google.protobuf.GeneratedMessage - implements GetNumLiveRSResponseOrBuilder { - // Use GetNumLiveRSResponse.newBuilder() to construct. - private GetNumLiveRSResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + implements GetTableStateResponseOrBuilder { + // Use GetTableStateResponse.newBuilder() to construct. + private GetTableStateResponse(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } - private GetNumLiveRSResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + private GetTableStateResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - private static final GetNumLiveRSResponse defaultInstance; - public static GetNumLiveRSResponse getDefaultInstance() { + private static final GetTableStateResponse defaultInstance; + public static GetTableStateResponse getDefaultInstance() { return defaultInstance; } - public GetNumLiveRSResponse getDefaultInstanceForType() { + public GetTableStateResponse getDefaultInstanceForType() { return defaultInstance; } @@ -71205,7 +71148,7 @@ public GetNumLiveRSResponse getDefaultInstanceForType() { getUnknownFields() { return this.unknownFields; } - private GetNumLiveRSResponse( + private GetTableStateResponse( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { @@ -71230,7 +71173,7 @@ private GetNumLiveRSResponse( } case 8: { bitField0_ |= 0x00000001; - numRegionServers_ = input.readInt32(); + enabledOrDisabled_ = input.readBool(); break; } } @@ -71247,57 +71190,57 @@ private GetNumLiveRSResponse( } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetNumLiveRSResponse_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetTableStateResponse_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetNumLiveRSResponse_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetTableStateResponse_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.Builder.class); } - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public GetNumLiveRSResponse parsePartialFrom( + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public GetTableStateResponse parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - return new GetNumLiveRSResponse(input, extensionRegistry); + return new GetTableStateResponse(input, extensionRegistry); } }; @java.lang.Override - public com.google.protobuf.Parser getParserForType() { + public com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; - // required int32 num_region_servers = 1; - public static final int NUM_REGION_SERVERS_FIELD_NUMBER = 1; - private int numRegionServers_; + // required bool enabled_or_disabled = 1; + public static final int ENABLED_OR_DISABLED_FIELD_NUMBER = 1; + private boolean enabledOrDisabled_; /** - * required int32 num_region_servers = 1; + * required bool enabled_or_disabled = 1; */ - public boolean hasNumRegionServers() { + public boolean hasEnabledOrDisabled() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** - * required int32 num_region_servers = 1; + * required bool enabled_or_disabled = 1; */ - public int getNumRegionServers() { - return numRegionServers_; + public boolean getEnabledOrDisabled() { + return enabledOrDisabled_; } private void initFields() { - numRegionServers_ = 0; + enabledOrDisabled_ = false; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; - if (!hasNumRegionServers()) { + if (!hasEnabledOrDisabled()) { memoizedIsInitialized = 0; return false; } @@ -71309,7 +71252,7 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeInt32(1, numRegionServers_); + output.writeBool(1, enabledOrDisabled_); } getUnknownFields().writeTo(output); } @@ -71322,7 +71265,7 @@ public int getSerializedSize() { size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream - .computeInt32Size(1, numRegionServers_); + .computeBoolSize(1, enabledOrDisabled_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; @@ -71341,16 +71284,16 @@ public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse)) { + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse)) { return super.equals(obj); } - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse) obj; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse) obj; boolean result = true; - result = result && (hasNumRegionServers() == other.hasNumRegionServers()); - if (hasNumRegionServers()) { - result = result && (getNumRegionServers() - == other.getNumRegionServers()); + result = result && (hasEnabledOrDisabled() == other.hasEnabledOrDisabled()); + if (hasEnabledOrDisabled()) { + result = result && (getEnabledOrDisabled() + == other.getEnabledOrDisabled()); } result = result && getUnknownFields().equals(other.getUnknownFields()); @@ -71365,62 +71308,62 @@ public int hashCode() { } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasNumRegionServers()) { - hash = (37 * hash) + NUM_REGION_SERVERS_FIELD_NUMBER; - hash = (53 * hash) + getNumRegionServers(); + if (hasEnabledOrDisabled()) { + hash = (37 * hash) + ENABLED_OR_DISABLED_FIELD_NUMBER; + hash = (53 * hash) + hashBoolean(getEnabledOrDisabled()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse parseFrom(byte[] data) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse parseFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse parseDelimitedFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse parseDelimitedFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -71429,7 +71372,7 @@ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLive public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse prototype) { + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @@ -71441,24 +71384,24 @@ protected Builder newBuilderForType( return builder; } /** - * Protobuf type {@code hbase.pb.GetNumLiveRSResponse} + * Protobuf type {@code hbase.pb.GetTableStateResponse} */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponseOrBuilder { + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponseOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetNumLiveRSResponse_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetTableStateResponse_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetNumLiveRSResponse_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetTableStateResponse_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.Builder.class); } - // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse.newBuilder() + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -71478,7 +71421,7 @@ private static Builder create() { public Builder clear() { super.clear(); - numRegionServers_ = 0; + enabledOrDisabled_ = false; bitField0_ = (bitField0_ & ~0x00000001); return this; } @@ -71489,54 +71432,54 @@ public Builder clone() { public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetNumLiveRSResponse_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetTableStateResponse_descriptor; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse.getDefaultInstance(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.getDefaultInstance(); } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse build() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse result = buildPartial(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse(this); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } - result.numRegionServers_ = numRegionServers_; + result.enabledOrDisabled_ = enabledOrDisabled_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse)other); + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse)other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse.getDefaultInstance()) return this; - if (other.hasNumRegionServers()) { - setNumRegionServers(other.getNumRegionServers()); + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.getDefaultInstance()) return this; + if (other.hasEnabledOrDisabled()) { + setEnabledOrDisabled(other.getEnabledOrDisabled()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { - if (!hasNumRegionServers()) { + if (!hasEnabledOrDisabled()) { return false; } @@ -71547,11 +71490,11 @@ public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse parsedMessage = null; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse) e.getUnfinishedMessage(); + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { @@ -71562,48 +71505,48 @@ public Builder mergeFrom( } private int bitField0_; - // required int32 num_region_servers = 1; - private int numRegionServers_ ; + // required bool enabled_or_disabled = 1; + private boolean enabledOrDisabled_ ; /** - * required int32 num_region_servers = 1; + * required bool enabled_or_disabled = 1; */ - public boolean hasNumRegionServers() { + public boolean hasEnabledOrDisabled() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** - * required int32 num_region_servers = 1; + * required bool enabled_or_disabled = 1; */ - public int getNumRegionServers() { - return numRegionServers_; + public boolean getEnabledOrDisabled() { + return enabledOrDisabled_; } /** - * required int32 num_region_servers = 1; + * required bool enabled_or_disabled = 1; */ - public Builder setNumRegionServers(int value) { + public Builder setEnabledOrDisabled(boolean value) { bitField0_ |= 0x00000001; - numRegionServers_ = value; + enabledOrDisabled_ = value; onChanged(); return this; } /** - * required int32 num_region_servers = 1; + * required bool enabled_or_disabled = 1; */ - public Builder clearNumRegionServers() { + public Builder clearEnabledOrDisabled() { bitField0_ = (bitField0_ & ~0x00000001); - numRegionServers_ = 0; + enabledOrDisabled_ = false; onChanged(); return this; } - // @@protoc_insertion_point(builder_scope:hbase.pb.GetNumLiveRSResponse) + // @@protoc_insertion_point(builder_scope:hbase.pb.GetTableStateResponse) } static { - defaultInstance = new GetNumLiveRSResponse(true); + defaultInstance = new GetTableStateResponse(true); defaultInstance.initFields(); } - // @@protoc_insertion_point(class_scope:hbase.pb.GetNumLiveRSResponse) + // @@protoc_insertion_point(class_scope:hbase.pb.GetTableStateResponse) } /** @@ -72421,20 +72364,8 @@ public abstract void isSnapshotCleanupEnabled( */ public abstract void getLogEntries( com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.LogRequest request, - com.google.protobuf.RpcCallback done); - - /** - * rpc GetTableState(.hbase.pb.GetTableStateRequest) returns (.hbase.pb.GetTableStateResponse); - * - *
-       ** returns table state 
-       * 
- */ - public abstract void getTableState( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest request, - com.google.protobuf.RpcCallback done); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.LogRequest request, + com.google.protobuf.RpcCallback done); } @@ -72961,14 +72892,6 @@ public void getLogEntries( impl.getLogEntries(controller, request, done); } - @java.lang.Override - public void getTableState( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest request, - com.google.protobuf.RpcCallback done) { - impl.getTableState(controller, request, done); - } - }; } @@ -73121,8 +73044,6 @@ public final com.google.protobuf.Message callBlockingMethod( return impl.isSnapshotCleanupEnabled(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest)request); case 64: return impl.getLogEntries(controller, (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.LogRequest)request); - case 65: - return impl.getTableState(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest)request); default: throw new java.lang.AssertionError("Can't get here."); } @@ -73267,8 +73188,6 @@ public final com.google.protobuf.Message callBlockingMethod( return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest.getDefaultInstance(); case 64: return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.LogRequest.getDefaultInstance(); - case 65: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -73413,8 +73332,6 @@ public final com.google.protobuf.Message callBlockingMethod( return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse.getDefaultInstance(); case 64: return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.LogEntry.getDefaultInstance(); - case 65: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -74233,18 +74150,6 @@ public abstract void getLogEntries( org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.LogRequest request, com.google.protobuf.RpcCallback done); - /** - * rpc GetTableState(.hbase.pb.GetTableStateRequest) returns (.hbase.pb.GetTableStateResponse); - * - *
-     ** returns table state 
-     * 
- */ - public abstract void getTableState( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest request, - com.google.protobuf.RpcCallback done); - public static final com.google.protobuf.Descriptors.ServiceDescriptor getDescriptor() { @@ -74592,11 +74497,6 @@ public final void callMethod( com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 65: - this.getTableState(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest)request, - com.google.protobuf.RpcUtil.specializeCallback( - done)); - return; default: throw new java.lang.AssertionError("Can't get here."); } @@ -74741,8 +74641,6 @@ public final void callMethod( return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest.getDefaultInstance(); case 64: return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.LogRequest.getDefaultInstance(); - case 65: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -74887,8 +74785,6 @@ public final void callMethod( return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse.getDefaultInstance(); case 64: return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.LogEntry.getDefaultInstance(); - case 65: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -75884,21 +75780,6 @@ public void getLogEntries( org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.LogEntry.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.LogEntry.getDefaultInstance())); } - - public void getTableState( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest request, - com.google.protobuf.RpcCallback done) { - channel.callMethod( - getDescriptor().getMethods().get(65), - controller, - request, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.getDefaultInstance(), - com.google.protobuf.RpcUtil.generalizeCallback( - done, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.class, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.getDefaultInstance())); - } } public static BlockingInterface newBlockingStub( @@ -76231,11 +76112,6 @@ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.LogEntry getLogEnt com.google.protobuf.RpcController controller, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.LogRequest request) throws com.google.protobuf.ServiceException; - - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse getTableState( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest request) - throws com.google.protobuf.ServiceException; } private static final class BlockingStub implements BlockingInterface { @@ -77024,18 +76900,6 @@ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.LogEntry getLogEnt org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.LogEntry.getDefaultInstance()); } - - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse getTableState( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest request) - throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(65), - controller, - request, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.getDefaultInstance()); - } - } // @@protoc_insertion_point(class_scope:hbase.pb.MasterService) @@ -77107,6 +76971,19 @@ public abstract void getNumLiveRS( org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest request, com.google.protobuf.RpcCallback done); + /** + * rpc GetTableState(.hbase.pb.GetTableStateRequest) returns (.hbase.pb.GetTableStateResponse); + * + *
+       **
+       * Returns the state of the table.
+       * 
+ */ + public abstract void getTableState( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest request, + com.google.protobuf.RpcCallback done); + } public static com.google.protobuf.Service newReflectiveService( @@ -77144,6 +77021,14 @@ public void getNumLiveRS( impl.getNumLiveRS(controller, request, done); } + @java.lang.Override + public void getTableState( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest request, + com.google.protobuf.RpcCallback done) { + impl.getTableState(controller, request, done); + } + }; } @@ -77174,6 +77059,8 @@ public final com.google.protobuf.Message callBlockingMethod( return impl.getMetaRegionLocations(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest)request); case 3: return impl.getNumLiveRS(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest)request); + case 4: + return impl.getTableState(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest)request); default: throw new java.lang.AssertionError("Can't get here."); } @@ -77196,6 +77083,8 @@ public final com.google.protobuf.Message callBlockingMethod( return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest.getDefaultInstance(); case 3: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest.getDefaultInstance(); + case 4: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -77218,6 +77107,8 @@ public final com.google.protobuf.Message callBlockingMethod( return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse.getDefaultInstance(); case 3: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse.getDefaultInstance(); + case 4: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -77279,6 +77170,19 @@ public abstract void getNumLiveRS( org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest request, com.google.protobuf.RpcCallback done); + /** + * rpc GetTableState(.hbase.pb.GetTableStateRequest) returns (.hbase.pb.GetTableStateResponse); + * + *
+     **
+     * Returns the state of the table.
+     * 
+ */ + public abstract void getTableState( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest request, + com.google.protobuf.RpcCallback done); + public static final com.google.protobuf.Descriptors.ServiceDescriptor getDescriptor() { @@ -77321,6 +77225,11 @@ public final void callMethod( com.google.protobuf.RpcUtil.specializeCallback( done)); return; + case 4: + this.getTableState(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; default: throw new java.lang.AssertionError("Can't get here."); } @@ -77343,6 +77252,8 @@ public final void callMethod( return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest.getDefaultInstance(); case 3: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest.getDefaultInstance(); + case 4: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -77365,6 +77276,8 @@ public final void callMethod( return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse.getDefaultInstance(); case 3: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse.getDefaultInstance(); + case 4: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -77445,6 +77358,21 @@ public void getNumLiveRS( org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse.getDefaultInstance())); } + + public void getTableState( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(4), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.class, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.getDefaultInstance())); + } } public static BlockingInterface newBlockingStub( @@ -77472,6 +77400,11 @@ public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRespo com.google.protobuf.RpcController controller, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest request) throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse getTableState( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest request) + throws com.google.protobuf.ServiceException; } private static final class BlockingStub implements BlockingInterface { @@ -77528,6 +77461,18 @@ public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRespo org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse.getDefaultInstance()); } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse getTableState( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(4), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.getDefaultInstance()); + } + } // @@protoc_insertion_point(class_scope:hbase.pb.ClientMetaService) @@ -78013,16 +77958,6 @@ public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRespo private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hbase_pb_GetTableNamesResponse_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_hbase_pb_GetTableStateRequest_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_hbase_pb_GetTableStateRequest_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_hbase_pb_GetTableStateResponse_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_hbase_pb_GetTableStateResponse_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_hbase_pb_GetClusterStatusRequest_descriptor; private static @@ -78213,6 +78148,16 @@ public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRespo private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hbase_pb_GetNumLiveRSResponse_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_GetTableStateRequest_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_GetTableStateRequest_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_GetTableStateResponse_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_GetTableStateResponse_fieldAccessorTable; public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { @@ -78375,241 +78320,240 @@ public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRespo "\t\022!\n\022include_sys_tables\030\002 \001(\010:\005false\022\021\n\t" + "namespace\030\003 \001(\t\"A\n\025GetTableNamesResponse" + "\022(\n\013table_names\030\001 \003(\0132\023.hbase.pb.TableNa" + - "me\"?\n\024GetTableStateRequest\022\'\n\ntable_name" + - "\030\001 \002(\0132\023.hbase.pb.TableName\"B\n\025GetTableS" + - "tateResponse\022)\n\013table_state\030\001 \002(\0132\024.hbas" + - "e.pb.TableState\"\031\n\027GetClusterStatusReque" + - "st\"K\n\030GetClusterStatusResponse\022/\n\016cluste" + - "r_status\030\001 \002(\0132\027.hbase.pb.ClusterStatus\"" + - "\030\n\026IsMasterRunningRequest\"4\n\027IsMasterRun", - "ningResponse\022\031\n\021is_master_running\030\001 \002(\010\"" + - "I\n\024ExecProcedureRequest\0221\n\tprocedure\030\001 \002" + - "(\0132\036.hbase.pb.ProcedureDescription\"F\n\025Ex" + - "ecProcedureResponse\022\030\n\020expected_timeout\030" + - "\001 \001(\003\022\023\n\013return_data\030\002 \001(\014\"K\n\026IsProcedur" + - "eDoneRequest\0221\n\tprocedure\030\001 \001(\0132\036.hbase." + - "pb.ProcedureDescription\"`\n\027IsProcedureDo" + - "neResponse\022\023\n\004done\030\001 \001(\010:\005false\0220\n\010snaps" + - "hot\030\002 \001(\0132\036.hbase.pb.ProcedureDescriptio" + - "n\",\n\031GetProcedureResultRequest\022\017\n\007proc_i", - "d\030\001 \002(\004\"\371\001\n\032GetProcedureResultResponse\0229" + - "\n\005state\030\001 \002(\0162*.hbase.pb.GetProcedureRes" + - "ultResponse.State\022\022\n\nstart_time\030\002 \001(\004\022\023\n" + - "\013last_update\030\003 \001(\004\022\016\n\006result\030\004 \001(\014\0224\n\tex" + - "ception\030\005 \001(\0132!.hbase.pb.ForeignExceptio" + - "nMessage\"1\n\005State\022\r\n\tNOT_FOUND\020\000\022\013\n\007RUNN" + - "ING\020\001\022\014\n\010FINISHED\020\002\"M\n\025AbortProcedureReq" + - "uest\022\017\n\007proc_id\030\001 \002(\004\022#\n\025mayInterruptIfR" + - "unning\030\002 \001(\010:\004true\"6\n\026AbortProcedureResp" + - "onse\022\034\n\024is_procedure_aborted\030\001 \002(\010\"\027\n\025Li", - "stProceduresRequest\"@\n\026ListProceduresRes" + - "ponse\022&\n\tprocedure\030\001 \003(\0132\023.hbase.pb.Proc" + - "edure\"\315\001\n\017SetQuotaRequest\022\021\n\tuser_name\030\001" + - " \001(\t\022\022\n\nuser_group\030\002 \001(\t\022\021\n\tnamespace\030\003 " + - "\001(\t\022\'\n\ntable_name\030\004 \001(\0132\023.hbase.pb.Table" + - "Name\022\022\n\nremove_all\030\005 \001(\010\022\026\n\016bypass_globa" + - "ls\030\006 \001(\010\022+\n\010throttle\030\007 \001(\0132\031.hbase.pb.Th" + - "rottleRequest\"\022\n\020SetQuotaResponse\"J\n\037Maj" + - "orCompactionTimestampRequest\022\'\n\ntable_na" + - "me\030\001 \002(\0132\023.hbase.pb.TableName\"U\n(MajorCo", - "mpactionTimestampForRegionRequest\022)\n\006reg" + - "ion\030\001 \002(\0132\031.hbase.pb.RegionSpecifier\"@\n " + - "MajorCompactionTimestampResponse\022\034\n\024comp" + - "action_timestamp\030\001 \002(\003\"\035\n\033SecurityCapabi" + - "litiesRequest\"\354\001\n\034SecurityCapabilitiesRe" + - "sponse\022G\n\014capabilities\030\001 \003(\01621.hbase.pb." + - "SecurityCapabilitiesResponse.Capability\"" + - "\202\001\n\nCapability\022\031\n\025SIMPLE_AUTHENTICATION\020" + - "\000\022\031\n\025SECURE_AUTHENTICATION\020\001\022\021\n\rAUTHORIZ" + - "ATION\020\002\022\026\n\022CELL_AUTHORIZATION\020\003\022\023\n\017CELL_", - "VISIBILITY\020\004\"D\n\027ClearDeadServersRequest\022" + - ")\n\013server_name\030\001 \003(\0132\024.hbase.pb.ServerNa" + - "me\"E\n\030ClearDeadServersResponse\022)\n\013server" + - "_name\030\001 \003(\0132\024.hbase.pb.ServerName\"A\n\031Set" + - "SnapshotCleanupRequest\022\017\n\007enabled\030\001 \002(\010\022" + - "\023\n\013synchronous\030\002 \001(\010\";\n\032SetSnapshotClean" + - "upResponse\022\035\n\025prev_snapshot_cleanup\030\001 \002(" + - "\010\"!\n\037IsSnapshotCleanupEnabledRequest\"3\n " + - "IsSnapshotCleanupEnabledResponse\022\017\n\007enab" + - "led\030\001 \002(\010\")\n\030BalancerDecisionsRequest\022\r\n", - "\005limit\030\001 \001(\r\"R\n\031BalancerDecisionsRespons" + - "e\0225\n\021balancer_decision\030\001 \003(\0132\032.hbase.pb." + - "BalancerDecision\"\025\n\023GetClusterIdRequest\"" + - "*\n\024GetClusterIdResponse\022\022\n\ncluster_id\030\001 " + - "\001(\t\"\023\n\021GetMastersRequest\"W\n\027GetMastersRe" + - "sponseEntry\022)\n\013server_name\030\001 \002(\0132\024.hbase" + - ".pb.ServerName\022\021\n\tis_active\030\002 \002(\010\"O\n\022Get" + - "MastersResponse\0229\n\016master_servers\030\001 \003(\0132" + - "!.hbase.pb.GetMastersResponseEntry\"\037\n\035Ge" + - "tMetaRegionLocationsRequest\"R\n\036GetMetaRe", - "gionLocationsResponse\0220\n\016meta_locations\030" + - "\001 \003(\0132\030.hbase.pb.RegionLocation\"\025\n\023GetNu" + - "mLiveRSRequest\"2\n\024GetNumLiveRSResponse\022\032" + - "\n\022num_region_servers\030\001 \002(\005*(\n\020MasterSwit" + - "chType\022\t\n\005SPLIT\020\000\022\t\n\005MERGE\020\0012\256/\n\rMasterS" + - "ervice\022e\n\024GetSchemaAlterStatus\022%.hbase.p" + - "b.GetSchemaAlterStatusRequest\032&.hbase.pb" + - ".GetSchemaAlterStatusResponse\022b\n\023GetTabl" + - "eDescriptors\022$.hbase.pb.GetTableDescript" + - "orsRequest\032%.hbase.pb.GetTableDescriptor", - "sResponse\022P\n\rGetTableNames\022\036.hbase.pb.Ge" + - "tTableNamesRequest\032\037.hbase.pb.GetTableNa" + - "mesResponse\022Y\n\020GetClusterStatus\022!.hbase." + - "pb.GetClusterStatusRequest\032\".hbase.pb.Ge" + - "tClusterStatusResponse\022V\n\017IsMasterRunnin" + - "g\022 .hbase.pb.IsMasterRunningRequest\032!.hb" + - "ase.pb.IsMasterRunningResponse\022D\n\tAddCol" + - "umn\022\032.hbase.pb.AddColumnRequest\032\033.hbase." + - "pb.AddColumnResponse\022M\n\014DeleteColumn\022\035.h" + - "base.pb.DeleteColumnRequest\032\036.hbase.pb.D", - "eleteColumnResponse\022M\n\014ModifyColumn\022\035.hb" + - "ase.pb.ModifyColumnRequest\032\036.hbase.pb.Mo" + - "difyColumnResponse\022G\n\nMoveRegion\022\033.hbase" + - ".pb.MoveRegionRequest\032\034.hbase.pb.MoveReg" + - "ionResponse\022k\n\026DispatchMergingRegions\022\'." + - "hbase.pb.DispatchMergingRegionsRequest\032(" + - ".hbase.pb.DispatchMergingRegionsResponse" + - "\022M\n\014AssignRegion\022\035.hbase.pb.AssignRegion" + - "Request\032\036.hbase.pb.AssignRegionResponse\022" + - "S\n\016UnassignRegion\022\037.hbase.pb.UnassignReg", - "ionRequest\032 .hbase.pb.UnassignRegionResp" + - "onse\022P\n\rOfflineRegion\022\036.hbase.pb.Offline" + - "RegionRequest\032\037.hbase.pb.OfflineRegionRe" + - "sponse\022J\n\013DeleteTable\022\034.hbase.pb.DeleteT" + - "ableRequest\032\035.hbase.pb.DeleteTableRespon" + - "se\022P\n\rtruncateTable\022\036.hbase.pb.TruncateT" + - "ableRequest\032\037.hbase.pb.TruncateTableResp" + - "onse\022J\n\013EnableTable\022\034.hbase.pb.EnableTab" + - "leRequest\032\035.hbase.pb.EnableTableResponse" + - "\022M\n\014DisableTable\022\035.hbase.pb.DisableTable", - "Request\032\036.hbase.pb.DisableTableResponse\022" + - "J\n\013ModifyTable\022\034.hbase.pb.ModifyTableReq" + - "uest\032\035.hbase.pb.ModifyTableResponse\022J\n\013C" + - "reateTable\022\034.hbase.pb.CreateTableRequest" + - "\032\035.hbase.pb.CreateTableResponse\022A\n\010Shutd" + - "own\022\031.hbase.pb.ShutdownRequest\032\032.hbase.p" + - "b.ShutdownResponse\022G\n\nStopMaster\022\033.hbase" + - ".pb.StopMasterRequest\032\034.hbase.pb.StopMas" + - "terResponse\022h\n\031IsMasterInMaintenanceMode" + - "\022$.hbase.pb.IsInMaintenanceModeRequest\032%", - ".hbase.pb.IsInMaintenanceModeResponse\022>\n" + - "\007Balance\022\030.hbase.pb.BalanceRequest\032\031.hba" + - "se.pb.BalanceResponse\022_\n\022SetBalancerRunn" + - "ing\022#.hbase.pb.SetBalancerRunningRequest" + - "\032$.hbase.pb.SetBalancerRunningResponse\022\\" + - "\n\021IsBalancerEnabled\022\".hbase.pb.IsBalance" + - "rEnabledRequest\032#.hbase.pb.IsBalancerEna" + - "bledResponse\022k\n\026SetSplitOrMergeEnabled\022\'" + - ".hbase.pb.SetSplitOrMergeEnabledRequest\032" + - "(.hbase.pb.SetSplitOrMergeEnabledRespons", - "e\022h\n\025IsSplitOrMergeEnabled\022&.hbase.pb.Is" + - "SplitOrMergeEnabledRequest\032\'.hbase.pb.Is" + - "SplitOrMergeEnabledResponse\022D\n\tNormalize" + - "\022\032.hbase.pb.NormalizeRequest\032\033.hbase.pb." + - "NormalizeResponse\022e\n\024SetNormalizerRunnin" + - "g\022%.hbase.pb.SetNormalizerRunningRequest" + - "\032&.hbase.pb.SetNormalizerRunningResponse" + - "\022b\n\023IsNormalizerEnabled\022$.hbase.pb.IsNor" + - "malizerEnabledRequest\032%.hbase.pb.IsNorma" + - "lizerEnabledResponse\022S\n\016RunCatalogScan\022\037", - ".hbase.pb.RunCatalogScanRequest\032 .hbase." + - "pb.RunCatalogScanResponse\022e\n\024EnableCatal" + - "ogJanitor\022%.hbase.pb.EnableCatalogJanito" + - "rRequest\032&.hbase.pb.EnableCatalogJanitor" + - "Response\022n\n\027IsCatalogJanitorEnabled\022(.hb" + - "ase.pb.IsCatalogJanitorEnabledRequest\032)." + - "hbase.pb.IsCatalogJanitorEnabledResponse" + - "\022V\n\017RunCleanerChore\022 .hbase.pb.RunCleane" + - "rChoreRequest\032!.hbase.pb.RunCleanerChore" + - "Response\022k\n\026SetCleanerChoreRunning\022\'.hba", - "se.pb.SetCleanerChoreRunningRequest\032(.hb" + - "ase.pb.SetCleanerChoreRunningResponse\022h\n" + - "\025IsCleanerChoreEnabled\022&.hbase.pb.IsClea" + - "nerChoreEnabledRequest\032\'.hbase.pb.IsClea" + - "nerChoreEnabledResponse\022^\n\021ExecMasterSer" + - "vice\022#.hbase.pb.CoprocessorServiceReques" + - "t\032$.hbase.pb.CoprocessorServiceResponse\022" + - "A\n\010Snapshot\022\031.hbase.pb.SnapshotRequest\032\032" + - ".hbase.pb.SnapshotResponse\022h\n\025GetComplet" + - "edSnapshots\022&.hbase.pb.GetCompletedSnaps", - "hotsRequest\032\'.hbase.pb.GetCompletedSnaps" + - "hotsResponse\022S\n\016DeleteSnapshot\022\037.hbase.p" + - "b.DeleteSnapshotRequest\032 .hbase.pb.Delet" + - "eSnapshotResponse\022S\n\016IsSnapshotDone\022\037.hb" + - "ase.pb.IsSnapshotDoneRequest\032 .hbase.pb." + - "IsSnapshotDoneResponse\022V\n\017RestoreSnapsho" + - "t\022 .hbase.pb.RestoreSnapshotRequest\032!.hb" + - "ase.pb.RestoreSnapshotResponse\022h\n\025IsRest" + - "oreSnapshotDone\022&.hbase.pb.IsRestoreSnap" + - "shotDoneRequest\032\'.hbase.pb.IsRestoreSnap", - "shotDoneResponse\022P\n\rExecProcedure\022\036.hbas" + - "e.pb.ExecProcedureRequest\032\037.hbase.pb.Exe" + - "cProcedureResponse\022W\n\024ExecProcedureWithR" + - "et\022\036.hbase.pb.ExecProcedureRequest\032\037.hba" + - "se.pb.ExecProcedureResponse\022V\n\017IsProcedu" + - "reDone\022 .hbase.pb.IsProcedureDoneRequest" + - "\032!.hbase.pb.IsProcedureDoneResponse\022V\n\017M" + - "odifyNamespace\022 .hbase.pb.ModifyNamespac" + - "eRequest\032!.hbase.pb.ModifyNamespaceRespo" + - "nse\022V\n\017CreateNamespace\022 .hbase.pb.Create", - "NamespaceRequest\032!.hbase.pb.CreateNamesp" + - "aceResponse\022V\n\017DeleteNamespace\022 .hbase.p" + - "b.DeleteNamespaceRequest\032!.hbase.pb.Dele" + - "teNamespaceResponse\022k\n\026GetNamespaceDescr" + - "iptor\022\'.hbase.pb.GetNamespaceDescriptorR" + - "equest\032(.hbase.pb.GetNamespaceDescriptor" + - "Response\022q\n\030ListNamespaceDescriptors\022).h" + - "base.pb.ListNamespaceDescriptorsRequest\032" + - "*.hbase.pb.ListNamespaceDescriptorsRespo" + - "nse\022\206\001\n\037ListTableDescriptorsByNamespace\022", - "0.hbase.pb.ListTableDescriptorsByNamespa" + - "ceRequest\0321.hbase.pb.ListTableDescriptor" + - "sByNamespaceResponse\022t\n\031ListTableNamesBy" + - "Namespace\022*.hbase.pb.ListTableNamesByNam" + - "espaceRequest\032+.hbase.pb.ListTableNamesB" + - "yNamespaceResponse\022A\n\010SetQuota\022\031.hbase.p" + - "b.SetQuotaRequest\032\032.hbase.pb.SetQuotaRes" + - "ponse\022x\n\037getLastMajorCompactionTimestamp" + - "\022).hbase.pb.MajorCompactionTimestampRequ" + - "est\032*.hbase.pb.MajorCompactionTimestampR", - "esponse\022\212\001\n(getLastMajorCompactionTimest" + - "ampForRegion\0222.hbase.pb.MajorCompactionT" + - "imestampForRegionRequest\032*.hbase.pb.Majo" + - "rCompactionTimestampResponse\022_\n\022getProce" + - "dureResult\022#.hbase.pb.GetProcedureResult" + - "Request\032$.hbase.pb.GetProcedureResultRes" + - "ponse\022h\n\027getSecurityCapabilities\022%.hbase" + - ".pb.SecurityCapabilitiesRequest\032&.hbase." + - "pb.SecurityCapabilitiesResponse\022S\n\016Abort" + - "Procedure\022\037.hbase.pb.AbortProcedureReque", - "st\032 .hbase.pb.AbortProcedureResponse\022S\n\016" + - "ListProcedures\022\037.hbase.pb.ListProcedures" + - "Request\032 .hbase.pb.ListProceduresRespons" + - "e\022Y\n\020ClearDeadServers\022!.hbase.pb.ClearDe" + - "adServersRequest\032\".hbase.pb.ClearDeadSer" + - "versResponse\022S\n\016ListNamespaces\022\037.hbase.p" + - "b.ListNamespacesRequest\032 .hbase.pb.ListN" + - "amespacesResponse\022b\n\025SwitchSnapshotClean" + - "up\022#.hbase.pb.SetSnapshotCleanupRequest\032" + - "$.hbase.pb.SetSnapshotCleanupResponse\022q\n", - "\030IsSnapshotCleanupEnabled\022).hbase.pb.IsS" + - "napshotCleanupEnabledRequest\032*.hbase.pb." + - "IsSnapshotCleanupEnabledResponse\0229\n\rGetL" + - "ogEntries\022\024.hbase.pb.LogRequest\032\022.hbase." + - "pb.LogEntry\022P\n\rGetTableState\022\036.hbase.pb." + - "GetTableStateRequest\032\037.hbase.pb.GetTable" + - "StateResponse2\347\002\n\021ClientMetaService\022M\n\014G" + - "etClusterId\022\035.hbase.pb.GetClusterIdReque" + - "st\032\036.hbase.pb.GetClusterIdResponse\022G\n\nGe" + - "tMasters\022\033.hbase.pb.GetMastersRequest\032\034.", - "hbase.pb.GetMastersResponse\022k\n\026GetMetaRe" + - "gionLocations\022\'.hbase.pb.GetMetaRegionLo" + - "cationsRequest\032(.hbase.pb.GetMetaRegionL" + - "ocationsResponse\022M\n\014GetNumLiveRS\022\035.hbase" + - ".pb.GetNumLiveRSRequest\032\036.hbase.pb.GetNu" + - "mLiveRSResponseBB\n*org.apache.hadoop.hba" + - "se.protobuf.generatedB\014MasterProtosH\001\210\001\001" + - "\240\001\001" + "me\"\031\n\027GetClusterStatusRequest\"K\n\030GetClus" + + "terStatusResponse\022/\n\016cluster_status\030\001 \002(" + + "\0132\027.hbase.pb.ClusterStatus\"\030\n\026IsMasterRu" + + "nningRequest\"4\n\027IsMasterRunningResponse\022" + + "\031\n\021is_master_running\030\001 \002(\010\"I\n\024ExecProced" + + "ureRequest\0221\n\tprocedure\030\001 \002(\0132\036.hbase.pb" + + ".ProcedureDescription\"F\n\025ExecProcedureRe", + "sponse\022\030\n\020expected_timeout\030\001 \001(\003\022\023\n\013retu" + + "rn_data\030\002 \001(\014\"K\n\026IsProcedureDoneRequest\022" + + "1\n\tprocedure\030\001 \001(\0132\036.hbase.pb.ProcedureD" + + "escription\"`\n\027IsProcedureDoneResponse\022\023\n" + + "\004done\030\001 \001(\010:\005false\0220\n\010snapshot\030\002 \001(\0132\036.h" + + "base.pb.ProcedureDescription\",\n\031GetProce" + + "dureResultRequest\022\017\n\007proc_id\030\001 \002(\004\"\371\001\n\032G" + + "etProcedureResultResponse\0229\n\005state\030\001 \002(\016" + + "2*.hbase.pb.GetProcedureResultResponse.S" + + "tate\022\022\n\nstart_time\030\002 \001(\004\022\023\n\013last_update\030", + "\003 \001(\004\022\016\n\006result\030\004 \001(\014\0224\n\texception\030\005 \001(\013" + + "2!.hbase.pb.ForeignExceptionMessage\"1\n\005S" + + "tate\022\r\n\tNOT_FOUND\020\000\022\013\n\007RUNNING\020\001\022\014\n\010FINI" + + "SHED\020\002\"M\n\025AbortProcedureRequest\022\017\n\007proc_" + + "id\030\001 \002(\004\022#\n\025mayInterruptIfRunning\030\002 \001(\010:" + + "\004true\"6\n\026AbortProcedureResponse\022\034\n\024is_pr" + + "ocedure_aborted\030\001 \002(\010\"\027\n\025ListProceduresR" + + "equest\"@\n\026ListProceduresResponse\022&\n\tproc" + + "edure\030\001 \003(\0132\023.hbase.pb.Procedure\"\315\001\n\017Set" + + "QuotaRequest\022\021\n\tuser_name\030\001 \001(\t\022\022\n\nuser_", + "group\030\002 \001(\t\022\021\n\tnamespace\030\003 \001(\t\022\'\n\ntable_" + + "name\030\004 \001(\0132\023.hbase.pb.TableName\022\022\n\nremov" + + "e_all\030\005 \001(\010\022\026\n\016bypass_globals\030\006 \001(\010\022+\n\010t" + + "hrottle\030\007 \001(\0132\031.hbase.pb.ThrottleRequest" + + "\"\022\n\020SetQuotaResponse\"J\n\037MajorCompactionT" + + "imestampRequest\022\'\n\ntable_name\030\001 \002(\0132\023.hb" + + "ase.pb.TableName\"U\n(MajorCompactionTimes" + + "tampForRegionRequest\022)\n\006region\030\001 \002(\0132\031.h" + + "base.pb.RegionSpecifier\"@\n MajorCompacti" + + "onTimestampResponse\022\034\n\024compaction_timest", + "amp\030\001 \002(\003\"\035\n\033SecurityCapabilitiesRequest" + + "\"\354\001\n\034SecurityCapabilitiesResponse\022G\n\014cap" + + "abilities\030\001 \003(\01621.hbase.pb.SecurityCapab" + + "ilitiesResponse.Capability\"\202\001\n\nCapabilit" + + "y\022\031\n\025SIMPLE_AUTHENTICATION\020\000\022\031\n\025SECURE_A" + + "UTHENTICATION\020\001\022\021\n\rAUTHORIZATION\020\002\022\026\n\022CE" + + "LL_AUTHORIZATION\020\003\022\023\n\017CELL_VISIBILITY\020\004\"" + + "D\n\027ClearDeadServersRequest\022)\n\013server_nam" + + "e\030\001 \003(\0132\024.hbase.pb.ServerName\"E\n\030ClearDe" + + "adServersResponse\022)\n\013server_name\030\001 \003(\0132\024", + ".hbase.pb.ServerName\"A\n\031SetSnapshotClean" + + "upRequest\022\017\n\007enabled\030\001 \002(\010\022\023\n\013synchronou" + + "s\030\002 \001(\010\";\n\032SetSnapshotCleanupResponse\022\035\n" + + "\025prev_snapshot_cleanup\030\001 \002(\010\"!\n\037IsSnapsh" + + "otCleanupEnabledRequest\"3\n IsSnapshotCle" + + "anupEnabledResponse\022\017\n\007enabled\030\001 \002(\010\")\n\030" + + "BalancerDecisionsRequest\022\r\n\005limit\030\001 \001(\r\"" + + "R\n\031BalancerDecisionsResponse\0225\n\021balancer" + + "_decision\030\001 \003(\0132\032.hbase.pb.BalancerDecis" + + "ion\"\025\n\023GetClusterIdRequest\"*\n\024GetCluster", + "IdResponse\022\022\n\ncluster_id\030\001 \001(\t\"\023\n\021GetMas" + + "tersRequest\"W\n\027GetMastersResponseEntry\022)" + + "\n\013server_name\030\001 \002(\0132\024.hbase.pb.ServerNam" + + "e\022\021\n\tis_active\030\002 \002(\010\"O\n\022GetMastersRespon" + + "se\0229\n\016master_servers\030\001 \003(\0132!.hbase.pb.Ge" + + "tMastersResponseEntry\"\037\n\035GetMetaRegionLo" + + "cationsRequest\"R\n\036GetMetaRegionLocations" + + "Response\0220\n\016meta_locations\030\001 \003(\0132\030.hbase" + + ".pb.RegionLocation\"\025\n\023GetNumLiveRSReques" + + "t\"2\n\024GetNumLiveRSResponse\022\032\n\022num_region_", + "servers\030\001 \002(\005\">\n\024GetTableStateRequest\022\022\n" + + "\ntable_name\030\001 \002(\t\022\022\n\nis_enabled\030\002 \002(\010\"4\n" + + "\025GetTableStateResponse\022\033\n\023enabled_or_dis" + + "abled\030\001 \002(\010*(\n\020MasterSwitchType\022\t\n\005SPLIT" + + "\020\000\022\t\n\005MERGE\020\0012\334.\n\rMasterService\022e\n\024GetSc" + + "hemaAlterStatus\022%.hbase.pb.GetSchemaAlte" + + "rStatusRequest\032&.hbase.pb.GetSchemaAlter" + + "StatusResponse\022b\n\023GetTableDescriptors\022$." + + "hbase.pb.GetTableDescriptorsRequest\032%.hb" + + "ase.pb.GetTableDescriptorsResponse\022P\n\rGe", + "tTableNames\022\036.hbase.pb.GetTableNamesRequ" + + "est\032\037.hbase.pb.GetTableNamesResponse\022Y\n\020" + + "GetClusterStatus\022!.hbase.pb.GetClusterSt" + + "atusRequest\032\".hbase.pb.GetClusterStatusR" + + "esponse\022V\n\017IsMasterRunning\022 .hbase.pb.Is" + + "MasterRunningRequest\032!.hbase.pb.IsMaster" + + "RunningResponse\022D\n\tAddColumn\022\032.hbase.pb." + + "AddColumnRequest\032\033.hbase.pb.AddColumnRes" + + "ponse\022M\n\014DeleteColumn\022\035.hbase.pb.DeleteC" + + "olumnRequest\032\036.hbase.pb.DeleteColumnResp", + "onse\022M\n\014ModifyColumn\022\035.hbase.pb.ModifyCo" + + "lumnRequest\032\036.hbase.pb.ModifyColumnRespo" + + "nse\022G\n\nMoveRegion\022\033.hbase.pb.MoveRegionR" + + "equest\032\034.hbase.pb.MoveRegionResponse\022k\n\026" + + "DispatchMergingRegions\022\'.hbase.pb.Dispat" + + "chMergingRegionsRequest\032(.hbase.pb.Dispa" + + "tchMergingRegionsResponse\022M\n\014AssignRegio" + + "n\022\035.hbase.pb.AssignRegionRequest\032\036.hbase" + + ".pb.AssignRegionResponse\022S\n\016UnassignRegi" + + "on\022\037.hbase.pb.UnassignRegionRequest\032 .hb", + "ase.pb.UnassignRegionResponse\022P\n\rOffline" + + "Region\022\036.hbase.pb.OfflineRegionRequest\032\037" + + ".hbase.pb.OfflineRegionResponse\022J\n\013Delet" + + "eTable\022\034.hbase.pb.DeleteTableRequest\032\035.h" + + "base.pb.DeleteTableResponse\022P\n\rtruncateT" + + "able\022\036.hbase.pb.TruncateTableRequest\032\037.h" + + "base.pb.TruncateTableResponse\022J\n\013EnableT" + + "able\022\034.hbase.pb.EnableTableRequest\032\035.hba" + + "se.pb.EnableTableResponse\022M\n\014DisableTabl" + + "e\022\035.hbase.pb.DisableTableRequest\032\036.hbase", + ".pb.DisableTableResponse\022J\n\013ModifyTable\022" + + "\034.hbase.pb.ModifyTableRequest\032\035.hbase.pb" + + ".ModifyTableResponse\022J\n\013CreateTable\022\034.hb" + + "ase.pb.CreateTableRequest\032\035.hbase.pb.Cre" + + "ateTableResponse\022A\n\010Shutdown\022\031.hbase.pb." + + "ShutdownRequest\032\032.hbase.pb.ShutdownRespo" + + "nse\022G\n\nStopMaster\022\033.hbase.pb.StopMasterR" + + "equest\032\034.hbase.pb.StopMasterResponse\022h\n\031" + + "IsMasterInMaintenanceMode\022$.hbase.pb.IsI" + + "nMaintenanceModeRequest\032%.hbase.pb.IsInM", + "aintenanceModeResponse\022>\n\007Balance\022\030.hbas" + + "e.pb.BalanceRequest\032\031.hbase.pb.BalanceRe" + + "sponse\022_\n\022SetBalancerRunning\022#.hbase.pb." + + "SetBalancerRunningRequest\032$.hbase.pb.Set" + + "BalancerRunningResponse\022\\\n\021IsBalancerEna" + + "bled\022\".hbase.pb.IsBalancerEnabledRequest" + + "\032#.hbase.pb.IsBalancerEnabledResponse\022k\n" + + "\026SetSplitOrMergeEnabled\022\'.hbase.pb.SetSp" + + "litOrMergeEnabledRequest\032(.hbase.pb.SetS" + + "plitOrMergeEnabledResponse\022h\n\025IsSplitOrM", + "ergeEnabled\022&.hbase.pb.IsSplitOrMergeEna" + + "bledRequest\032\'.hbase.pb.IsSplitOrMergeEna" + + "bledResponse\022D\n\tNormalize\022\032.hbase.pb.Nor" + + "malizeRequest\032\033.hbase.pb.NormalizeRespon" + + "se\022e\n\024SetNormalizerRunning\022%.hbase.pb.Se" + + "tNormalizerRunningRequest\032&.hbase.pb.Set" + + "NormalizerRunningResponse\022b\n\023IsNormalize" + + "rEnabled\022$.hbase.pb.IsNormalizerEnabledR" + + "equest\032%.hbase.pb.IsNormalizerEnabledRes" + + "ponse\022S\n\016RunCatalogScan\022\037.hbase.pb.RunCa", + "talogScanRequest\032 .hbase.pb.RunCatalogSc" + + "anResponse\022e\n\024EnableCatalogJanitor\022%.hba" + + "se.pb.EnableCatalogJanitorRequest\032&.hbas" + + "e.pb.EnableCatalogJanitorResponse\022n\n\027IsC" + + "atalogJanitorEnabled\022(.hbase.pb.IsCatalo" + + "gJanitorEnabledRequest\032).hbase.pb.IsCata" + + "logJanitorEnabledResponse\022V\n\017RunCleanerC" + + "hore\022 .hbase.pb.RunCleanerChoreRequest\032!" + + ".hbase.pb.RunCleanerChoreResponse\022k\n\026Set" + + "CleanerChoreRunning\022\'.hbase.pb.SetCleane", + "rChoreRunningRequest\032(.hbase.pb.SetClean" + + "erChoreRunningResponse\022h\n\025IsCleanerChore" + + "Enabled\022&.hbase.pb.IsCleanerChoreEnabled" + + "Request\032\'.hbase.pb.IsCleanerChoreEnabled" + + "Response\022^\n\021ExecMasterService\022#.hbase.pb" + + ".CoprocessorServiceRequest\032$.hbase.pb.Co" + + "processorServiceResponse\022A\n\010Snapshot\022\031.h" + + "base.pb.SnapshotRequest\032\032.hbase.pb.Snaps" + + "hotResponse\022h\n\025GetCompletedSnapshots\022&.h" + + "base.pb.GetCompletedSnapshotsRequest\032\'.h", + "base.pb.GetCompletedSnapshotsResponse\022S\n" + + "\016DeleteSnapshot\022\037.hbase.pb.DeleteSnapsho" + + "tRequest\032 .hbase.pb.DeleteSnapshotRespon" + + "se\022S\n\016IsSnapshotDone\022\037.hbase.pb.IsSnapsh" + + "otDoneRequest\032 .hbase.pb.IsSnapshotDoneR" + + "esponse\022V\n\017RestoreSnapshot\022 .hbase.pb.Re" + + "storeSnapshotRequest\032!.hbase.pb.RestoreS" + + "napshotResponse\022h\n\025IsRestoreSnapshotDone" + + "\022&.hbase.pb.IsRestoreSnapshotDoneRequest" + + "\032\'.hbase.pb.IsRestoreSnapshotDoneRespons", + "e\022P\n\rExecProcedure\022\036.hbase.pb.ExecProced" + + "ureRequest\032\037.hbase.pb.ExecProcedureRespo" + + "nse\022W\n\024ExecProcedureWithRet\022\036.hbase.pb.E" + + "xecProcedureRequest\032\037.hbase.pb.ExecProce" + + "dureResponse\022V\n\017IsProcedureDone\022 .hbase." + + "pb.IsProcedureDoneRequest\032!.hbase.pb.IsP" + + "rocedureDoneResponse\022V\n\017ModifyNamespace\022" + + " .hbase.pb.ModifyNamespaceRequest\032!.hbas" + + "e.pb.ModifyNamespaceResponse\022V\n\017CreateNa" + + "mespace\022 .hbase.pb.CreateNamespaceReques", + "t\032!.hbase.pb.CreateNamespaceResponse\022V\n\017" + + "DeleteNamespace\022 .hbase.pb.DeleteNamespa" + + "ceRequest\032!.hbase.pb.DeleteNamespaceResp" + + "onse\022k\n\026GetNamespaceDescriptor\022\'.hbase.p" + + "b.GetNamespaceDescriptorRequest\032(.hbase." + + "pb.GetNamespaceDescriptorResponse\022q\n\030Lis" + + "tNamespaceDescriptors\022).hbase.pb.ListNam" + + "espaceDescriptorsRequest\032*.hbase.pb.List" + + "NamespaceDescriptorsResponse\022\206\001\n\037ListTab" + + "leDescriptorsByNamespace\0220.hbase.pb.List", + "TableDescriptorsByNamespaceRequest\0321.hba" + + "se.pb.ListTableDescriptorsByNamespaceRes" + + "ponse\022t\n\031ListTableNamesByNamespace\022*.hba" + + "se.pb.ListTableNamesByNamespaceRequest\032+" + + ".hbase.pb.ListTableNamesByNamespaceRespo" + + "nse\022A\n\010SetQuota\022\031.hbase.pb.SetQuotaReque" + + "st\032\032.hbase.pb.SetQuotaResponse\022x\n\037getLas" + + "tMajorCompactionTimestamp\022).hbase.pb.Maj" + + "orCompactionTimestampRequest\032*.hbase.pb." + + "MajorCompactionTimestampResponse\022\212\001\n(get", + "LastMajorCompactionTimestampForRegion\0222." + + "hbase.pb.MajorCompactionTimestampForRegi" + + "onRequest\032*.hbase.pb.MajorCompactionTime" + + "stampResponse\022_\n\022getProcedureResult\022#.hb" + + "ase.pb.GetProcedureResultRequest\032$.hbase" + + ".pb.GetProcedureResultResponse\022h\n\027getSec" + + "urityCapabilities\022%.hbase.pb.SecurityCap" + + "abilitiesRequest\032&.hbase.pb.SecurityCapa" + + "bilitiesResponse\022S\n\016AbortProcedure\022\037.hba" + + "se.pb.AbortProcedureRequest\032 .hbase.pb.A", + "bortProcedureResponse\022S\n\016ListProcedures\022" + + "\037.hbase.pb.ListProceduresRequest\032 .hbase" + + ".pb.ListProceduresResponse\022Y\n\020ClearDeadS" + + "ervers\022!.hbase.pb.ClearDeadServersReques" + + "t\032\".hbase.pb.ClearDeadServersResponse\022S\n" + + "\016ListNamespaces\022\037.hbase.pb.ListNamespace" + + "sRequest\032 .hbase.pb.ListNamespacesRespon" + + "se\022b\n\025SwitchSnapshotCleanup\022#.hbase.pb.S" + + "etSnapshotCleanupRequest\032$.hbase.pb.SetS" + + "napshotCleanupResponse\022q\n\030IsSnapshotClea", + "nupEnabled\022).hbase.pb.IsSnapshotCleanupE" + + "nabledRequest\032*.hbase.pb.IsSnapshotClean" + + "upEnabledResponse\0229\n\rGetLogEntries\022\024.hba" + + "se.pb.LogRequest\032\022.hbase.pb.LogEntry2\271\003\n" + + "\021ClientMetaService\022M\n\014GetClusterId\022\035.hba" + + "se.pb.GetClusterIdRequest\032\036.hbase.pb.Get" + + "ClusterIdResponse\022G\n\nGetMasters\022\033.hbase." + + "pb.GetMastersRequest\032\034.hbase.pb.GetMaste" + + "rsResponse\022k\n\026GetMetaRegionLocations\022\'.h" + + "base.pb.GetMetaRegionLocationsRequest\032(.", + "hbase.pb.GetMetaRegionLocationsResponse\022" + + "M\n\014GetNumLiveRS\022\035.hbase.pb.GetNumLiveRSR" + + "equest\032\036.hbase.pb.GetNumLiveRSResponse\022P" + + "\n\rGetTableState\022\036.hbase.pb.GetTableState" + + "Request\032\037.hbase.pb.GetTableStateResponse" + + "BB\n*org.apache.hadoop.hbase.protobuf.gen" + + "eratedB\014MasterProtosH\001\210\001\001\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -79192,246 +79136,246 @@ public com.google.protobuf.ExtensionRegistry assignDescriptors( com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_GetTableNamesResponse_descriptor, new java.lang.String[] { "TableNames", }); - internal_static_hbase_pb_GetTableStateRequest_descriptor = - getDescriptor().getMessageTypes().get(96); - internal_static_hbase_pb_GetTableStateRequest_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_hbase_pb_GetTableStateRequest_descriptor, - new java.lang.String[] { "TableName", }); - internal_static_hbase_pb_GetTableStateResponse_descriptor = - getDescriptor().getMessageTypes().get(97); - internal_static_hbase_pb_GetTableStateResponse_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_hbase_pb_GetTableStateResponse_descriptor, - new java.lang.String[] { "TableState", }); internal_static_hbase_pb_GetClusterStatusRequest_descriptor = - getDescriptor().getMessageTypes().get(98); + getDescriptor().getMessageTypes().get(96); internal_static_hbase_pb_GetClusterStatusRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_GetClusterStatusRequest_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_GetClusterStatusResponse_descriptor = - getDescriptor().getMessageTypes().get(99); + getDescriptor().getMessageTypes().get(97); internal_static_hbase_pb_GetClusterStatusResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_GetClusterStatusResponse_descriptor, new java.lang.String[] { "ClusterStatus", }); internal_static_hbase_pb_IsMasterRunningRequest_descriptor = - getDescriptor().getMessageTypes().get(100); + getDescriptor().getMessageTypes().get(98); internal_static_hbase_pb_IsMasterRunningRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_IsMasterRunningRequest_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_IsMasterRunningResponse_descriptor = - getDescriptor().getMessageTypes().get(101); + getDescriptor().getMessageTypes().get(99); internal_static_hbase_pb_IsMasterRunningResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_IsMasterRunningResponse_descriptor, new java.lang.String[] { "IsMasterRunning", }); internal_static_hbase_pb_ExecProcedureRequest_descriptor = - getDescriptor().getMessageTypes().get(102); + getDescriptor().getMessageTypes().get(100); internal_static_hbase_pb_ExecProcedureRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_ExecProcedureRequest_descriptor, new java.lang.String[] { "Procedure", }); internal_static_hbase_pb_ExecProcedureResponse_descriptor = - getDescriptor().getMessageTypes().get(103); + getDescriptor().getMessageTypes().get(101); internal_static_hbase_pb_ExecProcedureResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_ExecProcedureResponse_descriptor, new java.lang.String[] { "ExpectedTimeout", "ReturnData", }); internal_static_hbase_pb_IsProcedureDoneRequest_descriptor = - getDescriptor().getMessageTypes().get(104); + getDescriptor().getMessageTypes().get(102); internal_static_hbase_pb_IsProcedureDoneRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_IsProcedureDoneRequest_descriptor, new java.lang.String[] { "Procedure", }); internal_static_hbase_pb_IsProcedureDoneResponse_descriptor = - getDescriptor().getMessageTypes().get(105); + getDescriptor().getMessageTypes().get(103); internal_static_hbase_pb_IsProcedureDoneResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_IsProcedureDoneResponse_descriptor, new java.lang.String[] { "Done", "Snapshot", }); internal_static_hbase_pb_GetProcedureResultRequest_descriptor = - getDescriptor().getMessageTypes().get(106); + getDescriptor().getMessageTypes().get(104); internal_static_hbase_pb_GetProcedureResultRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_GetProcedureResultRequest_descriptor, new java.lang.String[] { "ProcId", }); internal_static_hbase_pb_GetProcedureResultResponse_descriptor = - getDescriptor().getMessageTypes().get(107); + getDescriptor().getMessageTypes().get(105); internal_static_hbase_pb_GetProcedureResultResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_GetProcedureResultResponse_descriptor, new java.lang.String[] { "State", "StartTime", "LastUpdate", "Result", "Exception", }); internal_static_hbase_pb_AbortProcedureRequest_descriptor = - getDescriptor().getMessageTypes().get(108); + getDescriptor().getMessageTypes().get(106); internal_static_hbase_pb_AbortProcedureRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_AbortProcedureRequest_descriptor, new java.lang.String[] { "ProcId", "MayInterruptIfRunning", }); internal_static_hbase_pb_AbortProcedureResponse_descriptor = - getDescriptor().getMessageTypes().get(109); + getDescriptor().getMessageTypes().get(107); internal_static_hbase_pb_AbortProcedureResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_AbortProcedureResponse_descriptor, new java.lang.String[] { "IsProcedureAborted", }); internal_static_hbase_pb_ListProceduresRequest_descriptor = - getDescriptor().getMessageTypes().get(110); + getDescriptor().getMessageTypes().get(108); internal_static_hbase_pb_ListProceduresRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_ListProceduresRequest_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_ListProceduresResponse_descriptor = - getDescriptor().getMessageTypes().get(111); + getDescriptor().getMessageTypes().get(109); internal_static_hbase_pb_ListProceduresResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_ListProceduresResponse_descriptor, new java.lang.String[] { "Procedure", }); internal_static_hbase_pb_SetQuotaRequest_descriptor = - getDescriptor().getMessageTypes().get(112); + getDescriptor().getMessageTypes().get(110); internal_static_hbase_pb_SetQuotaRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_SetQuotaRequest_descriptor, new java.lang.String[] { "UserName", "UserGroup", "Namespace", "TableName", "RemoveAll", "BypassGlobals", "Throttle", }); internal_static_hbase_pb_SetQuotaResponse_descriptor = - getDescriptor().getMessageTypes().get(113); + getDescriptor().getMessageTypes().get(111); internal_static_hbase_pb_SetQuotaResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_SetQuotaResponse_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_MajorCompactionTimestampRequest_descriptor = - getDescriptor().getMessageTypes().get(114); + getDescriptor().getMessageTypes().get(112); internal_static_hbase_pb_MajorCompactionTimestampRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_MajorCompactionTimestampRequest_descriptor, new java.lang.String[] { "TableName", }); internal_static_hbase_pb_MajorCompactionTimestampForRegionRequest_descriptor = - getDescriptor().getMessageTypes().get(115); + getDescriptor().getMessageTypes().get(113); internal_static_hbase_pb_MajorCompactionTimestampForRegionRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_MajorCompactionTimestampForRegionRequest_descriptor, new java.lang.String[] { "Region", }); internal_static_hbase_pb_MajorCompactionTimestampResponse_descriptor = - getDescriptor().getMessageTypes().get(116); + getDescriptor().getMessageTypes().get(114); internal_static_hbase_pb_MajorCompactionTimestampResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_MajorCompactionTimestampResponse_descriptor, new java.lang.String[] { "CompactionTimestamp", }); internal_static_hbase_pb_SecurityCapabilitiesRequest_descriptor = - getDescriptor().getMessageTypes().get(117); + getDescriptor().getMessageTypes().get(115); internal_static_hbase_pb_SecurityCapabilitiesRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_SecurityCapabilitiesRequest_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_SecurityCapabilitiesResponse_descriptor = - getDescriptor().getMessageTypes().get(118); + getDescriptor().getMessageTypes().get(116); internal_static_hbase_pb_SecurityCapabilitiesResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_SecurityCapabilitiesResponse_descriptor, new java.lang.String[] { "Capabilities", }); internal_static_hbase_pb_ClearDeadServersRequest_descriptor = - getDescriptor().getMessageTypes().get(119); + getDescriptor().getMessageTypes().get(117); internal_static_hbase_pb_ClearDeadServersRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_ClearDeadServersRequest_descriptor, new java.lang.String[] { "ServerName", }); internal_static_hbase_pb_ClearDeadServersResponse_descriptor = - getDescriptor().getMessageTypes().get(120); + getDescriptor().getMessageTypes().get(118); internal_static_hbase_pb_ClearDeadServersResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_ClearDeadServersResponse_descriptor, new java.lang.String[] { "ServerName", }); internal_static_hbase_pb_SetSnapshotCleanupRequest_descriptor = - getDescriptor().getMessageTypes().get(121); + getDescriptor().getMessageTypes().get(119); internal_static_hbase_pb_SetSnapshotCleanupRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_SetSnapshotCleanupRequest_descriptor, new java.lang.String[] { "Enabled", "Synchronous", }); internal_static_hbase_pb_SetSnapshotCleanupResponse_descriptor = - getDescriptor().getMessageTypes().get(122); + getDescriptor().getMessageTypes().get(120); internal_static_hbase_pb_SetSnapshotCleanupResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_SetSnapshotCleanupResponse_descriptor, new java.lang.String[] { "PrevSnapshotCleanup", }); internal_static_hbase_pb_IsSnapshotCleanupEnabledRequest_descriptor = - getDescriptor().getMessageTypes().get(123); + getDescriptor().getMessageTypes().get(121); internal_static_hbase_pb_IsSnapshotCleanupEnabledRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_IsSnapshotCleanupEnabledRequest_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_IsSnapshotCleanupEnabledResponse_descriptor = - getDescriptor().getMessageTypes().get(124); + getDescriptor().getMessageTypes().get(122); internal_static_hbase_pb_IsSnapshotCleanupEnabledResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_IsSnapshotCleanupEnabledResponse_descriptor, new java.lang.String[] { "Enabled", }); internal_static_hbase_pb_BalancerDecisionsRequest_descriptor = - getDescriptor().getMessageTypes().get(125); + getDescriptor().getMessageTypes().get(123); internal_static_hbase_pb_BalancerDecisionsRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_BalancerDecisionsRequest_descriptor, new java.lang.String[] { "Limit", }); internal_static_hbase_pb_BalancerDecisionsResponse_descriptor = - getDescriptor().getMessageTypes().get(126); + getDescriptor().getMessageTypes().get(124); internal_static_hbase_pb_BalancerDecisionsResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_BalancerDecisionsResponse_descriptor, new java.lang.String[] { "BalancerDecision", }); internal_static_hbase_pb_GetClusterIdRequest_descriptor = - getDescriptor().getMessageTypes().get(127); + getDescriptor().getMessageTypes().get(125); internal_static_hbase_pb_GetClusterIdRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_GetClusterIdRequest_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_GetClusterIdResponse_descriptor = - getDescriptor().getMessageTypes().get(128); + getDescriptor().getMessageTypes().get(126); internal_static_hbase_pb_GetClusterIdResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_GetClusterIdResponse_descriptor, new java.lang.String[] { "ClusterId", }); internal_static_hbase_pb_GetMastersRequest_descriptor = - getDescriptor().getMessageTypes().get(129); + getDescriptor().getMessageTypes().get(127); internal_static_hbase_pb_GetMastersRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_GetMastersRequest_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_GetMastersResponseEntry_descriptor = - getDescriptor().getMessageTypes().get(130); + getDescriptor().getMessageTypes().get(128); internal_static_hbase_pb_GetMastersResponseEntry_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_GetMastersResponseEntry_descriptor, new java.lang.String[] { "ServerName", "IsActive", }); internal_static_hbase_pb_GetMastersResponse_descriptor = - getDescriptor().getMessageTypes().get(131); + getDescriptor().getMessageTypes().get(129); internal_static_hbase_pb_GetMastersResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_GetMastersResponse_descriptor, new java.lang.String[] { "MasterServers", }); internal_static_hbase_pb_GetMetaRegionLocationsRequest_descriptor = - getDescriptor().getMessageTypes().get(132); + getDescriptor().getMessageTypes().get(130); internal_static_hbase_pb_GetMetaRegionLocationsRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_GetMetaRegionLocationsRequest_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_GetMetaRegionLocationsResponse_descriptor = - getDescriptor().getMessageTypes().get(133); + getDescriptor().getMessageTypes().get(131); internal_static_hbase_pb_GetMetaRegionLocationsResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_GetMetaRegionLocationsResponse_descriptor, new java.lang.String[] { "MetaLocations", }); internal_static_hbase_pb_GetNumLiveRSRequest_descriptor = - getDescriptor().getMessageTypes().get(134); + getDescriptor().getMessageTypes().get(132); internal_static_hbase_pb_GetNumLiveRSRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_GetNumLiveRSRequest_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_GetNumLiveRSResponse_descriptor = - getDescriptor().getMessageTypes().get(135); + getDescriptor().getMessageTypes().get(133); internal_static_hbase_pb_GetNumLiveRSResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_GetNumLiveRSResponse_descriptor, new java.lang.String[] { "NumRegionServers", }); + internal_static_hbase_pb_GetTableStateRequest_descriptor = + getDescriptor().getMessageTypes().get(134); + internal_static_hbase_pb_GetTableStateRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_GetTableStateRequest_descriptor, + new java.lang.String[] { "TableName", "IsEnabled", }); + internal_static_hbase_pb_GetTableStateResponse_descriptor = + getDescriptor().getMessageTypes().get(135); + internal_static_hbase_pb_GetTableStateResponse_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_GetTableStateResponse_descriptor, + new java.lang.String[] { "EnabledOrDisabled", }); return null; } }; diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java index e872f4c2a7bd..fc181a8bc18a 100644 --- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java @@ -4419,12 +4419,12 @@ public Builder clearMode() { // @@protoc_insertion_point(class_scope:hbase.pb.SplitLogTask) } - public interface DeprecatedTableStateOrBuilder + public interface TableOrBuilder extends com.google.protobuf.MessageOrBuilder { - // required .hbase.pb.DeprecatedTableState.State state = 1 [default = ENABLED]; + // required .hbase.pb.Table.State state = 1 [default = ENABLED]; /** - * required .hbase.pb.DeprecatedTableState.State state = 1 [default = ENABLED]; + * required .hbase.pb.Table.State state = 1 [default = ENABLED]; * *
      * This is the table's state.  If no znode for a table,
@@ -4434,7 +4434,7 @@ public interface DeprecatedTableStateOrBuilder
      */
     boolean hasState();
     /**
-     * required .hbase.pb.DeprecatedTableState.State state = 1 [default = ENABLED];
+     * required .hbase.pb.Table.State state = 1 [default = ENABLED];
      *
      * 
      * This is the table's state.  If no znode for a table,
@@ -4442,33 +4442,32 @@ public interface DeprecatedTableStateOrBuilder
      * for more.
      * 
*/ - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.State getState(); + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State getState(); } /** - * Protobuf type {@code hbase.pb.DeprecatedTableState} + * Protobuf type {@code hbase.pb.Table} * *
    **
    * The znode that holds state of table.
-   * Deprected, table state is stored in table descriptor on HDFS.
    * 
*/ - public static final class DeprecatedTableState extends + public static final class Table extends com.google.protobuf.GeneratedMessage - implements DeprecatedTableStateOrBuilder { - // Use DeprecatedTableState.newBuilder() to construct. - private DeprecatedTableState(com.google.protobuf.GeneratedMessage.Builder builder) { + implements TableOrBuilder { + // Use Table.newBuilder() to construct. + private Table(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } - private DeprecatedTableState(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + private Table(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - private static final DeprecatedTableState defaultInstance; - public static DeprecatedTableState getDefaultInstance() { + private static final Table defaultInstance; + public static Table getDefaultInstance() { return defaultInstance; } - public DeprecatedTableState getDefaultInstanceForType() { + public Table getDefaultInstanceForType() { return defaultInstance; } @@ -4478,7 +4477,7 @@ public DeprecatedTableState getDefaultInstanceForType() { getUnknownFields() { return this.unknownFields; } - private DeprecatedTableState( + private Table( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { @@ -4503,7 +4502,7 @@ private DeprecatedTableState( } case 8: { int rawValue = input.readEnum(); - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.State value = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.State.valueOf(rawValue); + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State value = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State.valueOf(rawValue); if (value == null) { unknownFields.mergeVarintField(1, rawValue); } else { @@ -4526,33 +4525,33 @@ private DeprecatedTableState( } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_DeprecatedTableState_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_Table_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_DeprecatedTableState_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_Table_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.class, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.class, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.Builder.class); } - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public DeprecatedTableState parsePartialFrom( + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser
() { + public Table parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - return new DeprecatedTableState(input, extensionRegistry); + return new Table(input, extensionRegistry); } }; @java.lang.Override - public com.google.protobuf.Parser getParserForType() { + public com.google.protobuf.Parser
getParserForType() { return PARSER; } /** - * Protobuf enum {@code hbase.pb.DeprecatedTableState.State} + * Protobuf enum {@code hbase.pb.Table.State} * *
      * Table's current state
@@ -4630,7 +4629,7 @@ public State findValueByNumber(int number) {
       }
       public static final com.google.protobuf.Descriptors.EnumDescriptor
           getDescriptor() {
-        return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.getDescriptor().getEnumTypes().get(0);
+        return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.getDescriptor().getEnumTypes().get(0);
       }
 
       private static final State[] VALUES = values();
@@ -4652,15 +4651,15 @@ private State(int index, int value) {
         this.value = value;
       }
 
-      // @@protoc_insertion_point(enum_scope:hbase.pb.DeprecatedTableState.State)
+      // @@protoc_insertion_point(enum_scope:hbase.pb.Table.State)
     }
 
     private int bitField0_;
-    // required .hbase.pb.DeprecatedTableState.State state = 1 [default = ENABLED];
+    // required .hbase.pb.Table.State state = 1 [default = ENABLED];
     public static final int STATE_FIELD_NUMBER = 1;
-    private org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.State state_;
+    private org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State state_;
     /**
-     * required .hbase.pb.DeprecatedTableState.State state = 1 [default = ENABLED];
+     * required .hbase.pb.Table.State state = 1 [default = ENABLED];
      *
      * 
      * This is the table's state.  If no znode for a table,
@@ -4672,7 +4671,7 @@ public boolean hasState() {
       return ((bitField0_ & 0x00000001) == 0x00000001);
     }
     /**
-     * required .hbase.pb.DeprecatedTableState.State state = 1 [default = ENABLED];
+     * required .hbase.pb.Table.State state = 1 [default = ENABLED];
      *
      * 
      * This is the table's state.  If no znode for a table,
@@ -4680,12 +4679,12 @@ public boolean hasState() {
      * for more.
      * 
*/ - public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.State getState() { + public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State getState() { return state_; } private void initFields() { - state_ = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.State.ENABLED; + state_ = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State.ENABLED; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { @@ -4736,10 +4735,10 @@ public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState)) { + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table)) { return super.equals(obj); } - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState other = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState) obj; + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table other = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table) obj; boolean result = true; result = result && (hasState() == other.hasState()); @@ -4769,53 +4768,53 @@ public int hashCode() { return hash; } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState parseFrom(byte[] data) + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState parseFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState parseDelimitedFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState parseDelimitedFrom( + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -4824,7 +4823,7 @@ public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Depreca public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState prototype) { + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @@ -4836,30 +4835,29 @@ protected Builder newBuilderForType( return builder; } /** - * Protobuf type {@code hbase.pb.DeprecatedTableState} + * Protobuf type {@code hbase.pb.Table} * *
      **
      * The znode that holds state of table.
-     * Deprected, table state is stored in table descriptor on HDFS.
      * 
*/ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableStateOrBuilder { + implements org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_DeprecatedTableState_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_Table_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_DeprecatedTableState_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_Table_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.class, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.class, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.Builder.class); } - // Construct using org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.newBuilder() + // Construct using org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -4879,7 +4877,7 @@ private static Builder create() { public Builder clear() { super.clear(); - state_ = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.State.ENABLED; + state_ = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State.ENABLED; bitField0_ = (bitField0_ & ~0x00000001); return this; } @@ -4890,23 +4888,23 @@ public Builder clone() { public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_DeprecatedTableState_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_Table_descriptor; } - public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.getDefaultInstance(); + public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.getDefaultInstance(); } - public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState build() { - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState result = buildPartial(); + public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table build() { + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState result = new org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState(this); + public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table result = new org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { @@ -4919,16 +4917,16 @@ public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTabl } public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState)other); + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table)other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.getDefaultInstance()) return this; + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.getDefaultInstance()) return this; if (other.hasState()) { setState(other.getState()); } @@ -4948,11 +4946,11 @@ public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState parsedMessage = null; + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState) e.getUnfinishedMessage(); + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { @@ -4963,10 +4961,10 @@ public Builder mergeFrom( } private int bitField0_; - // required .hbase.pb.DeprecatedTableState.State state = 1 [default = ENABLED]; - private org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.State state_ = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.State.ENABLED; + // required .hbase.pb.Table.State state = 1 [default = ENABLED]; + private org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State state_ = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State.ENABLED; /** - * required .hbase.pb.DeprecatedTableState.State state = 1 [default = ENABLED]; + * required .hbase.pb.Table.State state = 1 [default = ENABLED]; * *
        * This is the table's state.  If no znode for a table,
@@ -4978,7 +4976,7 @@ public boolean hasState() {
         return ((bitField0_ & 0x00000001) == 0x00000001);
       }
       /**
-       * required .hbase.pb.DeprecatedTableState.State state = 1 [default = ENABLED];
+       * required .hbase.pb.Table.State state = 1 [default = ENABLED];
        *
        * 
        * This is the table's state.  If no znode for a table,
@@ -4986,11 +4984,11 @@ public boolean hasState() {
        * for more.
        * 
*/ - public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.State getState() { + public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State getState() { return state_; } /** - * required .hbase.pb.DeprecatedTableState.State state = 1 [default = ENABLED]; + * required .hbase.pb.Table.State state = 1 [default = ENABLED]; * *
        * This is the table's state.  If no znode for a table,
@@ -4998,7 +4996,7 @@ public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTabl
        * for more.
        * 
*/ - public Builder setState(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.State value) { + public Builder setState(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State value) { if (value == null) { throw new NullPointerException(); } @@ -5008,7 +5006,7 @@ public Builder setState(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProt return this; } /** - * required .hbase.pb.DeprecatedTableState.State state = 1 [default = ENABLED]; + * required .hbase.pb.Table.State state = 1 [default = ENABLED]; * *
        * This is the table's state.  If no znode for a table,
@@ -5018,20 +5016,20 @@ public Builder setState(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProt
        */
       public Builder clearState() {
         bitField0_ = (bitField0_ & ~0x00000001);
-        state_ = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.State.ENABLED;
+        state_ = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State.ENABLED;
         onChanged();
         return this;
       }
 
-      // @@protoc_insertion_point(builder_scope:hbase.pb.DeprecatedTableState)
+      // @@protoc_insertion_point(builder_scope:hbase.pb.Table)
     }
 
     static {
-      defaultInstance = new DeprecatedTableState(true);
+      defaultInstance = new Table(true);
       defaultInstance.initFields();
     }
 
-    // @@protoc_insertion_point(class_scope:hbase.pb.DeprecatedTableState)
+    // @@protoc_insertion_point(class_scope:hbase.pb.Table)
   }
 
   public interface TableCFOrBuilder
@@ -10936,10 +10934,10 @@ public Builder clearEnabled() {
     com.google.protobuf.GeneratedMessage.FieldAccessorTable
       internal_static_hbase_pb_SplitLogTask_fieldAccessorTable;
   private static com.google.protobuf.Descriptors.Descriptor
-    internal_static_hbase_pb_DeprecatedTableState_descriptor;
+    internal_static_hbase_pb_Table_descriptor;
   private static
     com.google.protobuf.GeneratedMessage.FieldAccessorTable
-      internal_static_hbase_pb_DeprecatedTableState_fieldAccessorTable;
+      internal_static_hbase_pb_Table_fieldAccessorTable;
   private static com.google.protobuf.Descriptors.Descriptor
     internal_static_hbase_pb_TableCF_descriptor;
   private static
@@ -11003,29 +11001,28 @@ public Builder clearEnabled() {
       "\022\016\n\nUNASSIGNED\020\000\022\t\n\005OWNED\020\001\022\014\n\010RESIGNED\020" +
       "\002\022\010\n\004DONE\020\003\022\007\n\003ERR\020\004\">\n\014RecoveryMode\022\013\n\007" +
       "UNKNOWN\020\000\022\021\n\rLOG_SPLITTING\020\001\022\016\n\nLOG_REPL" +
-      "AY\020\002\"\225\001\n\024DeprecatedTableState\022<\n\005state\030\001",
-      " \002(\0162$.hbase.pb.DeprecatedTableState.Sta" +
-      "te:\007ENABLED\"?\n\005State\022\013\n\007ENABLED\020\000\022\014\n\010DIS" +
-      "ABLED\020\001\022\r\n\tDISABLING\020\002\022\014\n\010ENABLING\020\003\"D\n\007" +
-      "TableCF\022\'\n\ntable_name\030\001 \001(\0132\023.hbase.pb.T" +
-      "ableName\022\020\n\010families\030\002 \003(\014\"\330\001\n\017Replicati" +
-      "onPeer\022\022\n\nclusterkey\030\001 \002(\t\022\037\n\027replicatio" +
-      "nEndpointImpl\030\002 \001(\t\022&\n\004data\030\003 \003(\0132\030.hbas" +
-      "e.pb.BytesBytesPair\022/\n\rconfiguration\030\004 \003" +
-      "(\0132\030.hbase.pb.NameStringPair\022$\n\ttable_cf" +
-      "s\030\005 \003(\0132\021.hbase.pb.TableCF\022\021\n\tbandwidth\030",
-      "\006 \001(\003\"g\n\020ReplicationState\022/\n\005state\030\001 \002(\016" +
-      "2 .hbase.pb.ReplicationState.State\"\"\n\005St" +
-      "ate\022\013\n\007ENABLED\020\000\022\014\n\010DISABLED\020\001\"+\n\027Replic" +
-      "ationHLogPosition\022\020\n\010position\030\001 \002(\003\"%\n\017R" +
-      "eplicationLock\022\022\n\nlock_owner\030\001 \002(\t\"\252\001\n\tT" +
-      "ableLock\022\'\n\ntable_name\030\001 \001(\0132\023.hbase.pb." +
-      "TableName\022(\n\nlock_owner\030\002 \001(\0132\024.hbase.pb" +
-      ".ServerName\022\021\n\tthread_id\030\003 \001(\003\022\021\n\tis_sha" +
-      "red\030\004 \001(\010\022\017\n\007purpose\030\005 \001(\t\022\023\n\013create_tim" +
-      "e\030\006 \001(\003\"\036\n\013SwitchState\022\017\n\007enabled\030\001 \001(\010B",
-      "E\n*org.apache.hadoop.hbase.protobuf.gene" +
-      "ratedB\017ZooKeeperProtosH\001\210\001\001\240\001\001"
+      "AY\020\002\"w\n\005Table\022-\n\005state\030\001 \002(\0162\025.hbase.pb.",
+      "Table.State:\007ENABLED\"?\n\005State\022\013\n\007ENABLED" +
+      "\020\000\022\014\n\010DISABLED\020\001\022\r\n\tDISABLING\020\002\022\014\n\010ENABL" +
+      "ING\020\003\"D\n\007TableCF\022\'\n\ntable_name\030\001 \001(\0132\023.h" +
+      "base.pb.TableName\022\020\n\010families\030\002 \003(\014\"\330\001\n\017" +
+      "ReplicationPeer\022\022\n\nclusterkey\030\001 \002(\t\022\037\n\027r" +
+      "eplicationEndpointImpl\030\002 \001(\t\022&\n\004data\030\003 \003" +
+      "(\0132\030.hbase.pb.BytesBytesPair\022/\n\rconfigur" +
+      "ation\030\004 \003(\0132\030.hbase.pb.NameStringPair\022$\n" +
+      "\ttable_cfs\030\005 \003(\0132\021.hbase.pb.TableCF\022\021\n\tb" +
+      "andwidth\030\006 \001(\003\"g\n\020ReplicationState\022/\n\005st",
+      "ate\030\001 \002(\0162 .hbase.pb.ReplicationState.St" +
+      "ate\"\"\n\005State\022\013\n\007ENABLED\020\000\022\014\n\010DISABLED\020\001\"" +
+      "+\n\027ReplicationHLogPosition\022\020\n\010position\030\001" +
+      " \002(\003\"%\n\017ReplicationLock\022\022\n\nlock_owner\030\001 " +
+      "\002(\t\"\252\001\n\tTableLock\022\'\n\ntable_name\030\001 \001(\0132\023." +
+      "hbase.pb.TableName\022(\n\nlock_owner\030\002 \001(\0132\024" +
+      ".hbase.pb.ServerName\022\021\n\tthread_id\030\003 \001(\003\022" +
+      "\021\n\tis_shared\030\004 \001(\010\022\017\n\007purpose\030\005 \001(\t\022\023\n\013c" +
+      "reate_time\030\006 \001(\003\"\036\n\013SwitchState\022\017\n\007enabl" +
+      "ed\030\001 \001(\010BE\n*org.apache.hadoop.hbase.prot",
+      "obuf.generatedB\017ZooKeeperProtosH\001\210\001\001\240\001\001"
     };
     com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
       new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -11062,11 +11059,11 @@ public com.google.protobuf.ExtensionRegistry assignDescriptors(
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_SplitLogTask_descriptor,
               new java.lang.String[] { "State", "ServerName", "Mode", });
-          internal_static_hbase_pb_DeprecatedTableState_descriptor =
+          internal_static_hbase_pb_Table_descriptor =
             getDescriptor().getMessageTypes().get(5);
-          internal_static_hbase_pb_DeprecatedTableState_fieldAccessorTable = new
+          internal_static_hbase_pb_Table_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
-              internal_static_hbase_pb_DeprecatedTableState_descriptor,
+              internal_static_hbase_pb_Table_descriptor,
               new java.lang.String[] { "State", });
           internal_static_hbase_pb_TableCF_descriptor =
             getDescriptor().getMessageTypes().get(6);
diff --git a/hbase-protocol/src/main/protobuf/HBase.proto b/hbase-protocol/src/main/protobuf/HBase.proto
index 82eed3d8d832..d02712304faa 100644
--- a/hbase-protocol/src/main/protobuf/HBase.proto
+++ b/hbase-protocol/src/main/protobuf/HBase.proto
@@ -39,27 +39,6 @@ message TableSchema {
   repeated NameStringPair configuration = 4;
 }
 
-/** Denotes state of the table */
-message TableState {
-  // Table's current state
-  enum State {
-    ENABLED = 0;
-    DISABLED = 1;
-    DISABLING = 2;
-    ENABLING = 3;
-  }
-  // This is the table's state.
-  required State state = 1;
-  required TableName table = 2;
-  optional uint64 timestamp = 3;
-}
-
-/** On HDFS representation of table state. */
-message TableDescriptor {
-  required TableSchema schema = 1;
-  optional TableState.State state = 2 [ default = ENABLED ];
-}
-
 /**
  * Column Family Schema
  * Inspired by the rest ColumSchemaMessage
diff --git a/hbase-protocol/src/main/protobuf/Master.proto b/hbase-protocol/src/main/protobuf/Master.proto
index 0730bff2e0ed..38ac599a8df2 100644
--- a/hbase-protocol/src/main/protobuf/Master.proto
+++ b/hbase-protocol/src/main/protobuf/Master.proto
@@ -457,14 +457,6 @@ message GetTableNamesResponse {
   repeated TableName table_names = 1;
 }
 
-message GetTableStateRequest {
-  required TableName table_name = 1;
-}
-
-message GetTableStateResponse {
-  required TableState table_state = 1;
-}
-
 message GetClusterStatusRequest {
 }
 
@@ -934,10 +926,6 @@ service MasterService {
 
   rpc GetLogEntries(LogRequest)
     returns(LogEntry);
-
-  /** returns table state */
-  rpc GetTableState(GetTableStateRequest)
-    returns(GetTableStateResponse);
 }
 
 /** Request and response to get the clusterID for this cluster */
@@ -974,6 +962,16 @@ message GetNumLiveRSResponse {
   required int32 num_region_servers = 1;
 }
 
+/** Request to check the state of a given table */
+message GetTableStateRequest {
+  required string table_name = 1;
+  required bool is_enabled = 2;
+}
+
+message GetTableStateResponse {
+ required bool enabled_or_disabled = 1;
+}
+
 /**
  * Implements all the RPCs needed by clients to look up cluster meta information needed for connection establishment.
  */
@@ -998,4 +996,9 @@ service ClientMetaService {
    * Get number of live region servers.
    */
   rpc GetNumLiveRS(GetNumLiveRSRequest) returns(GetNumLiveRSResponse);
+
+  /**
+   * Returns the state of the table.
+   */
+  rpc GetTableState(GetTableStateRequest) returns(GetTableStateResponse);
 }
diff --git a/hbase-protocol/src/main/protobuf/ZooKeeper.proto b/hbase-protocol/src/main/protobuf/ZooKeeper.proto
index ad740f3ccba4..1638bf707147 100644
--- a/hbase-protocol/src/main/protobuf/ZooKeeper.proto
+++ b/hbase-protocol/src/main/protobuf/ZooKeeper.proto
@@ -105,9 +105,8 @@ message SplitLogTask {
 
 /**
  * The znode that holds state of table.
- * Deprected, table state is stored in table descriptor on HDFS.
  */
-message DeprecatedTableState {
+message Table {
   // Table's current state
   enum State {
     ENABLED = 0;
diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java
index 49f2e3c7f4ae..2b12f8103534 100644
--- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java
+++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java
@@ -45,7 +45,6 @@
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.constraint.ConstraintException;
 import org.apache.hadoop.hbase.master.AssignmentManager;
 import org.apache.hadoop.hbase.master.LoadBalancer;
@@ -55,6 +54,7 @@
 import org.apache.hadoop.hbase.master.ServerManager;
 import org.apache.hadoop.hbase.master.TableLockManager.TableLock;
 import org.apache.hadoop.hbase.net.Address;
+import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
 
 /**
  * Service to support Region Server Grouping (HBase-6721)
@@ -269,8 +269,8 @@ public void moveTables(Set tables, String targetGroup) throws IOExcep
     }
     for(TableName table: tables) {
       if (master.getAssignmentManager().getTableStateManager().isTableState(table,
-          TableState.State.DISABLED,
-          TableState.State.DISABLING)) {
+          ZooKeeperProtos.Table.State.DISABLED,
+          ZooKeeperProtos.Table.State.DISABLING)) {
         LOG.debug("Skipping move regions because the table" + table + " is disabled.");
         continue;
       }
diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java
index 41a83a584f94..6799e69fb9f4 100644
--- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java
+++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java
@@ -53,6 +53,7 @@
 import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.TableStateManager;
 import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Get;
@@ -60,13 +61,11 @@
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.Table;
-import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.constraint.ConstraintException;
 import org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint;
 import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
 import org.apache.hadoop.hbase.master.MasterServices;
 import org.apache.hadoop.hbase.master.ServerListener;
-import org.apache.hadoop.hbase.master.TableStateManager;
 import org.apache.hadoop.hbase.master.procedure.CreateTableProcedure;
 import org.apache.hadoop.hbase.master.procedure.ProcedurePrepareLatch;
 import org.apache.hadoop.hbase.net.Address;
@@ -75,6 +74,7 @@
 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
 import org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos;
 import org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos;
+import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
 import org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos.MutateRowsRequest;
 import org.apache.hadoop.hbase.regionserver.DisabledRegionSplitPolicy;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -646,7 +646,7 @@ public boolean visit(Result row) throws IOException {
                     if (sn == null) {
                       found.set(false);
                     } else if (tsm.isTableState(RSGROUP_TABLE_NAME,
-                        TableState.State.ENABLED)) {
+                        ZooKeeperProtos.Table.State.ENABLED)) {
                       try {
                         ClientProtos.ClientService.BlockingInterface rs = conn.getClient(sn);
                         ClientProtos.GetRequest request =
@@ -670,7 +670,7 @@ public boolean visit(Result row) throws IOException {
                     if (sn == null) {
                       nsFound.set(false);
                     } else if (tsm.isTableState(TableName.NAMESPACE_TABLE_NAME,
-                        TableState.State.ENABLED)) {
+                        ZooKeeperProtos.Table.State.ENABLED)) {
                       try {
                         ClientProtos.ClientService.BlockingInterface rs = conn.getClient(sn);
                         ClientProtos.GetRequest request =
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/CoordinatedStateManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/CoordinatedStateManager.java
index b4c808cb1062..bdb202d362c9 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/CoordinatedStateManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/CoordinatedStateManager.java
@@ -55,4 +55,12 @@ public interface CoordinatedStateManager {
    * @return instance of Server coordinated state manager runs within
    */
   Server getServer();
+
+  /**
+   * Returns implementation of TableStateManager.
+   * @throws InterruptedException if operation is interrupted
+   * @throws CoordinatedStateException if error happens in underlying coordination mechanism
+   */
+  TableStateManager getTableStateManager() throws InterruptedException,
+    CoordinatedStateException;
 }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptor.java
deleted file mode 100644
index c275f00c72a4..000000000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptor.java
+++ /dev/null
@@ -1,158 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase;
-
-import com.google.protobuf.InvalidProtocolBufferException;
-import java.io.IOException;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.client.TableState;
-import org.apache.hadoop.hbase.exceptions.DeserializationException;
-import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
-import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
-
-/**
- * Class represents table state on HDFS.
- */
-@InterfaceAudience.Private
-public class TableDescriptor {
-  private HTableDescriptor hTableDescriptor;
-  private TableState.State tableState;
-
-  /**
-   * Creates TableDescriptor with all fields.
-   * @param hTableDescriptor HTableDescriptor to use
-   * @param tableState table state
-   */
-  public TableDescriptor(HTableDescriptor hTableDescriptor,
-      TableState.State tableState) {
-    this.hTableDescriptor = hTableDescriptor;
-    this.tableState = tableState;
-  }
-
-  /**
-   * Creates TableDescriptor with Enabled table.
-   * @param hTableDescriptor HTableDescriptor to use
-   */
-  public TableDescriptor(HTableDescriptor hTableDescriptor) {
-    this(hTableDescriptor, TableState.State.ENABLED);
-  }
-
-  /**
-   * Associated HTableDescriptor
-   * @return instance of HTableDescriptor
-   */
-  public HTableDescriptor getHTableDescriptor() {
-    return hTableDescriptor;
-  }
-
-  public void setHTableDescriptor(HTableDescriptor hTableDescriptor) {
-    this.hTableDescriptor = hTableDescriptor;
-  }
-
-  public TableState.State getTableState() {
-    return tableState;
-  }
-
-  public void setTableState(TableState.State tableState) {
-    this.tableState = tableState;
-  }
-
-  /**
-   * Convert to PB.
-   */
-  public HBaseProtos.TableDescriptor convert() {
-    return HBaseProtos.TableDescriptor.newBuilder()
-        .setSchema(hTableDescriptor.convert())
-        .setState(tableState.convert())
-        .build();
-  }
-
-  /**
-   * Convert from PB
-   */
-  public static TableDescriptor convert(HBaseProtos.TableDescriptor proto) {
-    HTableDescriptor hTableDescriptor = HTableDescriptor.convert(proto.getSchema());
-    TableState.State state = TableState.State.convert(proto.getState());
-    return new TableDescriptor(hTableDescriptor, state);
-  }
-
-  /**
-   * @return This instance serialized with pb with pb magic prefix
-   * @see #parseFrom(byte[])
-   */
-  public byte [] toByteArray() {
-    return ProtobufUtil.prependPBMagic(convert().toByteArray());
-  }
-
-  /**
-   * @param bytes A pb serialized {@link TableDescriptor} instance with pb magic prefix
-   * @see #toByteArray()
-   */
-  public static TableDescriptor parseFrom(final byte [] bytes)
-      throws DeserializationException, IOException {
-    if (!ProtobufUtil.isPBMagicPrefix(bytes)) {
-      throw new DeserializationException("Expected PB encoded TableDescriptor");
-    }
-    int pblen = ProtobufUtil.lengthOfPBMagic();
-    HBaseProtos.TableDescriptor.Builder builder = HBaseProtos.TableDescriptor.newBuilder();
-    HBaseProtos.TableDescriptor ts;
-    try {
-      ts = builder.mergeFrom(bytes, pblen, bytes.length - pblen).build();
-    } catch (InvalidProtocolBufferException e) {
-      throw new DeserializationException(e);
-    }
-    return convert(ts);
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) {
-      return true;
-    }
-    if (o == null || getClass() != o.getClass()) {
-      return false;
-    }
-
-    TableDescriptor that = (TableDescriptor) o;
-
-    if (hTableDescriptor != null ?
-        !hTableDescriptor.equals(that.hTableDescriptor) :
-        that.hTableDescriptor != null){
-      return false;
-    }
-    if (tableState != that.tableState) {
-      return false;
-    }
-    return true;
-  }
-
-  @Override
-  public int hashCode() {
-    int result = hTableDescriptor != null ? hTableDescriptor.hashCode() : 0;
-    result = 31 * result + (tableState != null ? tableState.hashCode() : 0);
-    return result;
-  }
-
-  @Override
-  public String toString() {
-    return "TableDescriptor{" +
-        "hTableDescriptor=" + hTableDescriptor +
-        ", tableState=" + tableState +
-        '}';
-  }
-}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptors.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptors.java
index c7bfd03e9595..33ae1d5aa933 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptors.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptors.java
@@ -36,14 +36,6 @@ public interface TableDescriptors {
   HTableDescriptor get(final TableName tableName)
   throws IOException;
 
-  /**
-   * @param tableName
-   * @return TableDescriptor for tablename
-   * @throws IOException
-   */
-  TableDescriptor getDescriptor(final TableName tableName)
-      throws IOException;
-
   /**
    * Get Map of all NamespaceDescriptors for a given namespace.
    * @return Map of all descriptors.
@@ -61,15 +53,6 @@ Map getByNamespace(String name)
   Map getAll()
   throws IOException;
 
-  /**
-   * Get Map of all TableDescriptors. Populates the descriptor cache as a
-   * side effect.
-   * @return Map of all descriptors.
-   * @throws IOException
-   */
-  Map getAllDescriptors()
-      throws IOException;
-
   /**
    * Add or update descriptor
    * @param htd Descriptor to set into TableDescriptors
@@ -78,14 +61,6 @@ Map getAllDescriptors()
   void add(final HTableDescriptor htd)
   throws IOException;
 
-  /**
-   * Add or update descriptor
-   * @param htd Descriptor to set into TableDescriptors
-   * @throws IOException
-   */
-  void add(final TableDescriptor htd)
-      throws IOException;
-
   /**
    * @param tablename
    * @return Instance of table descriptor or null if none found.
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/TableStateManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/TableStateManager.java
new file mode 100644
index 000000000000..21c09b8a853e
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/TableStateManager.java
@@ -0,0 +1,121 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase;
+
+import java.io.InterruptedIOException;
+import java.util.Set;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
+
+/**
+ * Helper class for table state management for operations running inside
+ * RegionServer or HMaster.
+ * Depending on implementation, fetches information from HBase system table,
+ * local data store, ZooKeeper ensemble or somewhere else.
+ * Code running on client side (with no coordinated state context) shall instead use
+ * {@link org.apache.hadoop.hbase.zookeeper.ZKTableStateClientSideReader}
+ */
+@InterfaceAudience.Private
+public interface TableStateManager {
+
+  /**
+   * Sets the table into desired state. Fails silently if the table is already in this state.
+   * @param tableName table to process
+   * @param state new state of this table
+   * @throws CoordinatedStateException if error happened when trying to set table state
+   */
+  void setTableState(TableName tableName, ZooKeeperProtos.Table.State state)
+    throws CoordinatedStateException;
+
+  /**
+   * Sets the specified table into the newState, but only if the table is already in
+   * one of the possibleCurrentStates (otherwise no operation is performed).
+   * @param tableName table to process
+   * @param newState new state for the table
+   * @param states table should be in one of these states for the operation
+   *                              to be performed
+   * @throws CoordinatedStateException if error happened while performing operation
+   * @return true if operation succeeded, false otherwise
+   */
+  boolean setTableStateIfInStates(TableName tableName, ZooKeeperProtos.Table.State newState,
+                                  ZooKeeperProtos.Table.State... states)
+    throws CoordinatedStateException;
+
+  /**
+   * Sets the specified table into the newState, but only if the table is NOT in
+   * one of the possibleCurrentStates (otherwise no operation is performed).
+   * @param tableName table to process
+   * @param newState new state for the table
+   * @param states table should NOT be in one of these states for the operation
+   *                              to be performed
+   * @throws CoordinatedStateException if error happened while performing operation
+   * @return true if operation succeeded, false otherwise
+   */
+  boolean setTableStateIfNotInStates(TableName tableName, ZooKeeperProtos.Table.State newState,
+                                     ZooKeeperProtos.Table.State... states)
+    throws CoordinatedStateException;
+
+  /**
+   * @return true if the table is in any one of the listed states, false otherwise.
+   */
+  boolean isTableState(TableName tableName, ZooKeeperProtos.Table.State... states);
+
+  /**
+   * @return true if the table is in any one of the listed states, false otherwise.
+   */
+  boolean isTableState(TableName tableName, boolean checkSource,
+      ZooKeeperProtos.Table.State... states);
+
+  /**
+   * Mark table as deleted. Fails silently if the table is not currently marked as disabled.
+   * @param tableName table to be deleted
+   * @throws CoordinatedStateException if error happened while performing operation
+   */
+  void setDeletedTable(TableName tableName) throws CoordinatedStateException;
+
+  /**
+   * Checks if table is present.
+   *
+   * @param tableName table we're checking
+   * @return true if the table is present, false otherwise
+   */
+  boolean isTablePresent(TableName tableName);
+
+  /**
+   * @return set of tables which are in any one of the listed states, empty Set if none
+   */
+  Set getTablesInStates(ZooKeeperProtos.Table.State... states)
+    throws InterruptedIOException, CoordinatedStateException;
+
+  /**
+   * If the table is found in the given state the in-memory state is removed. This
+   * helps in cases where CreateTable is to be retried by the client in case of
+   * failures.  If deletePermanentState is true - the flag kept permanently is
+   * also reset.
+   *
+   * @param tableName table we're working on
+   * @param states if table isn't in any one of these states, operation aborts
+   * @param deletePermanentState if true, reset the permanent flag
+   * @throws CoordinatedStateException if error happened in underlying coordination engine
+   */
+  void checkAndRemoveTableState(TableName tableName, ZooKeeperProtos.Table.State states,
+                            boolean deletePermanentState)
+    throws CoordinatedStateException;
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/BaseCoordinatedStateManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/BaseCoordinatedStateManager.java
index 03762aba5eba..f79e5d8dfbb5 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/BaseCoordinatedStateManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/BaseCoordinatedStateManager.java
@@ -18,8 +18,10 @@
 package org.apache.hadoop.hbase.coordination;
 
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.CoordinatedStateException;
 import org.apache.hadoop.hbase.CoordinatedStateManager;
 import org.apache.hadoop.hbase.Server;
+import org.apache.hadoop.hbase.TableStateManager;
 
 /**
  * Base class for {@link org.apache.hadoop.hbase.CoordinatedStateManager} implementations.
@@ -47,6 +49,9 @@ public Server getServer() {
     return null;
   }
 
+  @Override
+  public abstract TableStateManager getTableStateManager() throws InterruptedException,
+    CoordinatedStateException;
   /**
    * Method to retrieve coordination for split log worker
    */
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkCoordinatedStateManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkCoordinatedStateManager.java
index 7222b0f52220..2f739befb4f8 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkCoordinatedStateManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkCoordinatedStateManager.java
@@ -20,9 +20,13 @@
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.CoordinatedStateException;
 import org.apache.hadoop.hbase.HBaseInterfaceAudience;
 import org.apache.hadoop.hbase.Server;
+import org.apache.hadoop.hbase.TableStateManager;
+import org.apache.hadoop.hbase.zookeeper.ZKTableStateManager;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
+import org.apache.zookeeper.KeeperException;
 
 /**
  * ZooKeeper-based implementation of {@link org.apache.hadoop.hbase.CoordinatedStateManager}.
@@ -56,6 +60,16 @@ public Server getServer() {
     return server;
   }
 
+  @Override
+  public TableStateManager getTableStateManager() throws InterruptedException,
+      CoordinatedStateException {
+    try {
+      return new ZKTableStateManager(server.getZooKeeper());
+    } catch (KeeperException e) {
+      throw new CoordinatedStateException(e);
+    }
+  }
+
   @Override
   public SplitLogWorkerCoordination getSplitLogWorkerCoordination() {
     return splitLogWorkerCoordination;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkOpenRegionCoordination.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkOpenRegionCoordination.java
index b54740a86ffe..812bbe25ccb8 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkOpenRegionCoordination.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkOpenRegionCoordination.java
@@ -23,11 +23,11 @@
 import org.apache.hadoop.hbase.CoordinatedStateManager;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.executor.EventType;
 import org.apache.hadoop.hbase.master.AssignmentManager;
 import org.apache.hadoop.hbase.master.RegionState;
 import org.apache.hadoop.hbase.protobuf.generated.AdminProtos;
+import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.RegionServerServices;
 import org.apache.hadoop.hbase.zookeeper.ZKAssign;
@@ -309,7 +309,7 @@ public boolean commitOpenOnMasterSide(AssignmentManager assignmentManager,
     }
     if (!openedNodeDeleted) {
       if (assignmentManager.getTableStateManager().isTableState(regionInfo.getTable(),
-          TableState.State.DISABLED, TableState.State.DISABLING)) {
+          ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING)) {
         debugLog(regionInfo, "Opened region "
           + regionInfo.getShortNameToLog() + " but "
           + "this table is disabled, triggering close of region");
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
index f7cfc4cc0479..4d5a6b7c5e45 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
@@ -20,6 +20,7 @@
 
 import com.google.common.collect.LinkedHashMultimap;
 import java.io.IOException;
+import java.io.InterruptedIOException;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
@@ -63,6 +64,7 @@
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.TableNotFoundException;
+import org.apache.hadoop.hbase.TableStateManager;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.Admin.MasterSwitchType;
@@ -75,7 +77,6 @@
 import org.apache.hadoop.hbase.coordination.ZkOpenRegionCoordination;
 import org.apache.hadoop.hbase.coordination.ZkRegionMergeCoordination;
 import org.apache.hadoop.hbase.exceptions.DeserializationException;
-import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.executor.EventHandler;
 import org.apache.hadoop.hbase.executor.EventType;
 import org.apache.hadoop.hbase.executor.ExecutorService;
@@ -91,12 +92,12 @@
 import org.apache.hadoop.hbase.master.handler.OpenedRegionHandler;
 import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition;
 import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode;
+import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
 import org.apache.hadoop.hbase.regionserver.RegionAlreadyInTransitionException;
 import org.apache.hadoop.hbase.regionserver.RegionOpeningState;
 import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
 import org.apache.hadoop.hbase.util.ConfigUtil;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-import org.apache.hadoop.hbase.util.ExceptionUtil;
 import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.util.KeyLocker;
 import org.apache.hadoop.hbase.util.Pair;
@@ -282,11 +283,14 @@ public enum ServerHostRegion {
    * @param service Executor service
    * @param metricsMaster metrics manager
    * @param tableLockManager TableLock manager
+   * @throws KeeperException
+   * @throws IOException
    */
   public AssignmentManager(MasterServices server, ServerManager serverManager,
       final LoadBalancer balancer,
       final ExecutorService service, MetricsMaster metricsMaster,
-      final TableLockManager tableLockManager, final TableStateManager tableStateManager) {
+      final TableLockManager tableLockManager) throws KeeperException,
+        IOException, CoordinatedStateException {
     super(server.getZooKeeper());
     this.server = server;
     this.serverManager = serverManager;
@@ -299,9 +303,15 @@ public AssignmentManager(MasterServices server, ServerManager serverManager,
     this.shouldAssignRegionsWithFavoredNodes = conf.getClass(
            HConstants.HBASE_MASTER_LOADBALANCER_CLASS, Object.class).equals(
            FavoredNodeLoadBalancer.class);
-
-    this.tableStateManager = tableStateManager;
-
+    try {
+      if (server.getCoordinatedStateManager() != null) {
+        this.tableStateManager = server.getCoordinatedStateManager().getTableStateManager();
+      } else {
+        this.tableStateManager = null;
+      }
+    } catch (InterruptedException e) {
+      throw new InterruptedIOException();
+    }
     // This is the max attempts, not retries, so it should be at least 1.
     this.maximumAttempts = Math.max(1,
       this.server.getConfiguration().getInt("hbase.assignment.maximum.attempts", 10));
@@ -379,7 +389,7 @@ public boolean unregisterListener(final AssignmentListener listener) {
   }
 
   /**
-   * @return Instance of TableStateManager.
+   * @return Instance of ZKTableStateManager.
    */
   public TableStateManager getTableStateManager() {
     // These are 'expensive' to make involving trip to zk ensemble so allow
@@ -502,9 +512,10 @@ void failoverCleanupDone() {
    * @throws IOException
    * @throws KeeperException
    * @throws InterruptedException
+   * @throws CoordinatedStateException
    */
   void joinCluster() throws IOException,
-      KeeperException, CoordinatedStateException {
+      KeeperException, InterruptedException, CoordinatedStateException {
     long startTime = System.currentTimeMillis();
     // Concurrency note: In the below the accesses on regionsInTransition are
     // outside of a synchronization block where usually all accesses to RIT are
@@ -545,7 +556,7 @@ void joinCluster() throws IOException,
    * @throws InterruptedException
    */
   boolean processDeadServersAndRegionsInTransition(final Set deadServers)
-      throws KeeperException, IOException {
+  throws KeeperException, IOException, InterruptedException, CoordinatedStateException {
     List nodes = ZKUtil.listChildrenNoWatch(watcher, watcher.assignmentZNode);
 
     if (useZKForAssignment && nodes == null) {
@@ -553,6 +564,7 @@ boolean processDeadServersAndRegionsInTransition(final Set deadServe
       server.abort(errorMessage, new IOException(errorMessage));
       return true; // Doesn't matter in this case
     }
+
     boolean failover = !serverManager.getDeadServers().isEmpty();
     if (failover) {
       // This may not be a failover actually, especially if meta is on this master.
@@ -673,11 +685,7 @@ boolean processDeadServersAndRegionsInTransition(final Set deadServe
     if (!failover) {
       // Fresh cluster startup.
       LOG.info("Clean cluster startup. Assigning user regions");
-      try {
-        assignAllUserRegions(allRegions);
-      } catch (InterruptedException ie) {
-        ExceptionUtil.rethrowIfInterrupt(ie);
-      }
+      assignAllUserRegions(allRegions);
     }
     // unassign replicas of the split parents and the merged regions
     // the daughter replicas are opened in assignAllUserRegions if it was
@@ -695,10 +703,11 @@ boolean processDeadServersAndRegionsInTransition(final Set deadServe
    * locations are returned.
    */
   private Map getUserRegionsToAssign()
-      throws IOException {
+      throws InterruptedIOException, CoordinatedStateException {
     Set disabledOrDisablingOrEnabling =
-        tableStateManager.getTablesInStates(TableState.State.DISABLED,
-          TableState.State.DISABLING, TableState.State.ENABLING);
+        tableStateManager.getTablesInStates(ZooKeeperProtos.Table.State.DISABLED,
+          ZooKeeperProtos.Table.State.DISABLING, ZooKeeperProtos.Table.State.ENABLING);
+
     // Clean re/start, mark all user regions closed before reassignment
     return regionStates.closeAllUserRegions(disabledOrDisablingOrEnabling);
   }
@@ -726,7 +735,7 @@ public void run() {
         try {
           // Assign the regions
           assignAllUserRegions(getUserRegionsToAssign());
-        } catch (IOException | InterruptedException e) {
+        } catch (CoordinatedStateException | IOException | InterruptedException e) {
           LOG.error("Exception occured while assigning user regions.", e);
         }
       };
@@ -1469,7 +1478,7 @@ public void run() {
             LOG.debug("Znode " + regionNameStr + " deleted, state: " + rs);
 
             boolean disabled = getTableStateManager().isTableState(regionInfo.getTable(),
-                TableState.State.DISABLED, TableState.State.DISABLING);
+                ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING);
 
             ServerName serverName = rs.getServerName();
             if (serverManager.isServerOnline(serverName)) {
@@ -2268,7 +2277,7 @@ public void assign(RegionState state,
             // will not be in ENABLING or ENABLED state.
             TableName tableName = region.getTable();
             if (!tableStateManager.isTableState(tableName,
-              TableState.State.ENABLED, TableState.State.ENABLING)) {
+              ZooKeeperProtos.Table.State.ENABLED, ZooKeeperProtos.Table.State.ENABLING)) {
               LOG.debug("Setting table " + tableName + " to ENABLED state.");
               setEnabledTable(tableName);
             }
@@ -2494,8 +2503,8 @@ private void processAlreadyOpenedRegion(HRegionInfo region, ServerName sn) {
 
   private boolean isDisabledorDisablingRegionInRIT(final HRegionInfo region) {
     if (this.tableStateManager.isTableState(region.getTable(),
-            TableState.State.DISABLED,
-            TableState.State.DISABLING) || replicasToClose.contains(region)) {
+        ZooKeeperProtos.Table.State.DISABLED,
+        ZooKeeperProtos.Table.State.DISABLING) || replicasToClose.contains(region)) {
       LOG.info("Table " + region.getTable() + " is disabled or disabling;"
         + " skipping assign of " + region.getRegionNameAsString());
       offlineDisabledRegion(region);
@@ -3126,7 +3135,7 @@ private void assignAllUserRegions(Map allRegions)
     for (HRegionInfo hri : regionsFromMetaScan) {
       TableName tableName = hri.getTable();
       if (!tableStateManager.isTableState(tableName,
-              TableState.State.ENABLED)) {
+          ZooKeeperProtos.Table.State.ENABLED)) {
         setEnabledTable(tableName);
       }
     }
@@ -3193,14 +3202,14 @@ boolean waitUntilNoRegionsInTransition(final long timeout)
    * @throws IOException
    */
   Set rebuildUserRegions() throws
-          IOException, KeeperException {
+      IOException, KeeperException, CoordinatedStateException {
     Set disabledOrEnablingTables = tableStateManager.getTablesInStates(
-            TableState.State.DISABLED, TableState.State.ENABLING);
+      ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.ENABLING);
 
     Set disabledOrDisablingOrEnabling = tableStateManager.getTablesInStates(
-            TableState.State.DISABLED,
-            TableState.State.DISABLING,
-            TableState.State.ENABLING);
+      ZooKeeperProtos.Table.State.DISABLED,
+      ZooKeeperProtos.Table.State.DISABLING,
+      ZooKeeperProtos.Table.State.ENABLING);
 
     // Region assignment from META
     List results = MetaTableAccessor.fullScanOfMeta(server.getConnection());
@@ -3252,7 +3261,7 @@ Set rebuildUserRegions() throws
         ServerName lastHost = hrl.getServerName();
         ServerName regionLocation = RegionStateStore.getRegionServer(result, replicaId);
         if (tableStateManager.isTableState(regionInfo.getTable(),
-             TableState.State.DISABLED)) {
+             ZooKeeperProtos.Table.State.DISABLED)) {
           // force region to forget it hosts for disabled/disabling tables.
           // see HBASE-13326
           lastHost = null;
@@ -3282,7 +3291,7 @@ Set rebuildUserRegions() throws
         // this will be used in rolling restarts
         if (!disabledOrDisablingOrEnabling.contains(tableName)
           && !getTableStateManager().isTableState(tableName,
-                TableState.State.ENABLED)) {
+            ZooKeeperProtos.Table.State.ENABLED)) {
           setEnabledTable(tableName);
         }
       }
@@ -3299,9 +3308,9 @@ Set rebuildUserRegions() throws
    * @throws IOException
    */
   private void recoverTableInDisablingState()
-          throws KeeperException, IOException {
+      throws KeeperException, IOException, CoordinatedStateException {
     Set disablingTables =
-            tableStateManager.getTablesInStates(TableState.State.DISABLING);
+      tableStateManager.getTablesInStates(ZooKeeperProtos.Table.State.DISABLING);
     if (disablingTables.size() != 0) {
       for (TableName tableName : disablingTables) {
         // Recover by calling DisableTableHandler
@@ -3323,9 +3332,9 @@ private void recoverTableInDisablingState()
    * @throws IOException
    */
   private void recoverTableInEnablingState()
-          throws KeeperException, IOException {
+      throws KeeperException, IOException, CoordinatedStateException {
     Set enablingTables = tableStateManager.
-            getTablesInStates(TableState.State.ENABLING);
+      getTablesInStates(ZooKeeperProtos.Table.State.ENABLING);
     if (enablingTables.size() != 0) {
       for (TableName tableName : enablingTables) {
         // Recover by calling EnableTableHandler
@@ -3397,9 +3406,9 @@ void processRegionInTransitionZkLess() {
         LOG.info("Server " + serverName + " isn't online. SSH will handle this");
         continue;
       }
-      RegionState.State state = regionState.getState();
       HRegionInfo regionInfo = regionState.getRegion();
-      LOG.info("Processing " + regionState);
+      State state = regionState.getState();
+
       switch (state) {
       case CLOSED:
         invokeAssign(regionInfo);
@@ -3803,7 +3812,7 @@ public List cleanOutCrashedServerReferences(final ServerName sn) {
             server.abort("Unexpected ZK exception deleting node " + hri, ke);
           }
           if (tableStateManager.isTableState(hri.getTable(),
-                  TableState.State.DISABLED, TableState.State.DISABLING)) {
+              ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING)) {
             regionStates.regionOffline(hri);
             it.remove();
             continue;
@@ -3826,7 +3835,7 @@ public void balance(final RegionPlan plan) {
     HRegionInfo hri = plan.getRegionInfo();
     TableName tableName = hri.getTable();
     if (tableStateManager.isTableState(tableName,
-            TableState.State.DISABLED, TableState.State.DISABLING)) {
+      ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING)) {
       LOG.info("Ignored moving region of disabling/disabled table "
         + tableName);
       return;
@@ -3874,8 +3883,8 @@ public void shutdown() {
   protected void setEnabledTable(TableName tableName) {
     try {
       this.tableStateManager.setTableState(tableName,
-              TableState.State.ENABLED);
-    } catch (IOException e) {
+        ZooKeeperProtos.Table.State.ENABLED);
+    } catch (CoordinatedStateException e) {
       // here we can abort as it is the start up flow
       String errorMsg = "Unable to ensure that the table " + tableName
           + " will be" + " enabled because of a ZooKeeper issue";
@@ -3980,8 +3989,8 @@ private void onRegionFailedOpen(
         // When there are more than one region server a new RS is selected as the
         // destination and the same is updated in the region plan. (HBASE-5546)
         if (getTableStateManager().isTableState(hri.getTable(),
-                TableState.State.DISABLED, TableState.State.DISABLING) ||
-                replicasToClose.contains(hri)) {
+            ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING) ||
+            replicasToClose.contains(hri)) {
           offlineDisabledRegion(hri);
           return;
         }
@@ -4009,14 +4018,15 @@ private void onRegionOpen(final HRegionInfo hri, final ServerName sn, long openS
     // reset the count, if any
     failedOpenTracker.remove(hri.getEncodedName());
     if (getTableStateManager().isTableState(hri.getTable(),
-        TableState.State.DISABLED, TableState.State.DISABLING)) {
+        ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING)) {
       invokeUnAssign(hri);
     }
   }
 
   private void onRegionClosed(final HRegionInfo hri) {
-    if (getTableStateManager().isTableState(hri.getTable(), TableState.State.DISABLED,
-        TableState.State.DISABLING) || replicasToClose.contains(hri)) {
+    if (getTableStateManager().isTableState(hri.getTable(),
+        ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING) ||
+        replicasToClose.contains(hri)) {
       offlineDisabledRegion(hri);
       return;
     }
@@ -4062,7 +4072,7 @@ private String onRegionSplitReverted(ServerName sn,
     }
 
     if (getTableStateManager().isTableState(p.getTable(),
-        TableState.State.DISABLED, TableState.State.DISABLING)) {
+        ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING)) {
       invokeUnAssign(p);
     }
     return null;
@@ -4088,7 +4098,7 @@ private String onRegionSplit(ServerName sn, TransitionCode code,
 
       // User could disable the table before master knows the new region.
       if (getTableStateManager().isTableState(p.getTable(),
-          TableState.State.DISABLED, TableState.State.DISABLING)) {
+          ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING)) {
         invokeUnAssign(a);
         invokeUnAssign(b);
       } else {
@@ -4142,7 +4152,7 @@ private String onRegionMerge(ServerName sn, TransitionCode code,
 
       // User could disable the table before master knows the new region.
       if (getTableStateManager().isTableState(p.getTable(),
-          TableState.State.DISABLED, TableState.State.DISABLING)) {
+          ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING)) {
         invokeUnAssign(p);
       } else {
         Callable mergeReplicasCallable = new Callable() {
@@ -4182,7 +4192,7 @@ private String onRegionMergeReverted(ServerName sn, TransitionCode code,
     }
 
     if (getTableStateManager().isTableState(p.getTable(),
-        TableState.State.DISABLED, TableState.State.DISABLING)) {
+        ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING)) {
       invokeUnAssign(a);
       invokeUnAssign(b);
     }
@@ -4303,7 +4313,7 @@ private boolean handleRegionMerging(final RegionTransition rt, final String enco
 
       // User could disable the table before master knows the new region.
       if (tableStateManager.isTableState(p.getTable(),
-          TableState.State.DISABLED, TableState.State.DISABLING)) {
+          ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING)) {
         unassign(p);
       }
     }
@@ -4433,7 +4443,7 @@ private boolean handleRegionSplitting(final RegionTransition rt, final String en
 
       // User could disable the table before master knows the new region.
       if (tableStateManager.isTableState(p.getTable(),
-          TableState.State.DISABLED, TableState.State.DISABLING)) {
+          ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING)) {
         unassign(hri_a);
         unassign(hri_b);
       }
@@ -4711,7 +4721,7 @@ protected String onRegionTransition(final ServerName serverName,
         errorMsg = hri.getShortNameToLog()
           + " is not pending close on " + serverName;
       } else {
-          onRegionClosed(hri);
+        onRegionClosed(hri);
       }
       break;
 
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index edee462a5e30..e7c2f1ac64d9 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -86,7 +86,6 @@
 import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitorBase;
 import org.apache.hadoop.hbase.client.RegionReplicaUtil;
 import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
 import org.apache.hadoop.hbase.executor.ExecutorType;
 import org.apache.hadoop.hbase.http.InfoServer;
@@ -142,6 +141,7 @@
 import org.apache.hadoop.hbase.procedure2.store.wal.WALProcedureStore;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionServerInfo;
 import org.apache.hadoop.hbase.protobuf.generated.WALProtos;
+import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
 import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode;
 import org.apache.hadoop.hbase.quotas.MasterQuotaManager;
 import org.apache.hadoop.hbase.regionserver.DefaultStoreEngine;
@@ -169,7 +169,6 @@
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.hbase.util.Threads;
 import org.apache.hadoop.hbase.util.VersionInfo;
-import org.apache.hadoop.hbase.util.ZKDataMigrator;
 import org.apache.hadoop.hbase.zookeeper.DrainingServerTracker;
 import org.apache.hadoop.hbase.zookeeper.LoadBalancerTracker;
 import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker;
@@ -382,9 +381,6 @@ public void run() {
   private long splitPlanCount;
   private long mergePlanCount;
 
-  // handle table states
-  private TableStateManager tableStateManager;
-
   /** flag used in test cases in order to simulate RS failures during master initialization */
   private volatile boolean initializationBeforeMetaAssignment = false;
 
@@ -694,8 +690,9 @@ void initializeZKBasedSystemTrackers() throws IOException,
 
     this.assignmentManager = new AssignmentManager(this, serverManager,
       this.balancer, this.service, this.metricsMaster,
-      this.tableLockManager, tableStateManager);
+      this.tableLockManager);
     zooKeeper.registerListenerFirst(assignmentManager);
+
     this.regionServerTracker = new RegionServerTracker(zooKeeper, this,
         this.serverManager);
     this.regionServerTracker.start();
@@ -727,14 +724,6 @@ void initializeZKBasedSystemTrackers() throws IOException,
     this.mpmHost.register(new MasterFlushTableProcedureManager());
     this.mpmHost.loadProcedures(conf);
     this.mpmHost.initialize(this, this.metricsMaster);
-
-    // migrating existent table state from zk
-    for (Map.Entry entry : ZKDataMigrator
-        .queryForTableStates(getZooKeeper()).entrySet()) {
-      LOG.info("Converting state from zk to new states:" + entry);
-      tableStateManager.setTableState(entry.getKey(), entry.getValue());
-    }
-    ZKUtil.deleteChildrenRecursively(getZooKeeper(), getZooKeeper().tableZNode);
   }
 
   /**
@@ -799,9 +788,6 @@ private void finishActiveMasterInitialization(MonitoredTask status)
     // Invalidate all write locks held previously
     this.tableLockManager.reapWriteLocks();
 
-    this.tableStateManager = new TableStateManager(this);
-    this.tableStateManager.start();
-
     status.setStatus("Initializing ZK system trackers");
     initializeZKBasedSystemTrackers();
 
@@ -1199,8 +1185,8 @@ private void enableCrashedServerProcessing(final boolean waitForMeta)
   }
 
   private void enableMeta(TableName metaTableName) {
-    if (!this.tableStateManager.isTableState(metaTableName,
-            TableState.State.ENABLED)) {
+    if (!this.assignmentManager.getTableStateManager().isTableState(metaTableName,
+        ZooKeeperProtos.Table.State.ENABLED)) {
       this.assignmentManager.setEnabledTable(metaTableName);
     }
   }
@@ -1244,11 +1230,6 @@ public TableNamespaceManager getTableNamespaceManager() {
     return tableNamespaceManager;
   }
 
-  @Override
-  public TableStateManager getTableStateManager() {
-    return tableStateManager;
-  }
-
   /*
    * Start up all services. If any of these threads gets an unhandled exception
    * then they just die with a logged message.  This should be fine because
@@ -1692,8 +1673,9 @@ public boolean normalizeRegions() throws IOException, CoordinatedStateException
     }
 
     try {
-      final List allEnabledTables = new ArrayList<>(this.assignmentManager
-          .getTableStateManager().getTablesInStates(TableState.State.ENABLED));
+      final List allEnabledTables = new ArrayList<>(
+        this.assignmentManager.getTableStateManager().getTablesInStates(
+          ZooKeeperProtos.Table.State.ENABLED));
 
       Collections.shuffle(allEnabledTables);
 
@@ -2542,7 +2524,7 @@ public void checkTableModifiable(final TableName tableName)
       throw new TableNotFoundException(tableName);
     }
     if (!getAssignmentManager().getTableStateManager().
-        isTableState(tableName, TableState.State.DISABLED)) {
+        isTableState(tableName, ZooKeeperProtos.Table.State.DISABLED)) {
       throw new TableNotDisabledException(tableName);
     }
   }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
index 04392da6fa39..edb2ead44548 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
@@ -544,6 +544,7 @@ private Path checkRootDir(final Path rd, final Configuration c,
       fsd.createTableDescriptor(
           new HTableDescriptor(fsd.get(TableName.META_TABLE_NAME)));
     }
+
     return rd;
   }
 
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
index e87f664b5079..30d6c577d64c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
@@ -42,12 +42,11 @@
 import org.apache.hadoop.hbase.ServerLoad;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.TableNotFoundException;
+import org.apache.hadoop.hbase.TableStateManager;
 import org.apache.hadoop.hbase.UnknownRegionException;
 import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.errorhandling.ForeignException;
-import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.exceptions.MergeRegionException;
 import org.apache.hadoop.hbase.exceptions.UnknownProtocolException;
 import org.apache.hadoop.hbase.ipc.PriorityFunction;
@@ -126,6 +125,8 @@
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableDescriptorsResponse;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableNamesRequest;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableNamesResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledRequest;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledResponse;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCleanerChoreEnabledRequest;
@@ -213,6 +214,7 @@
 import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse;
 import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest;
 import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionResponse;
+import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State;
 import org.apache.hadoop.hbase.regionserver.RSRpcServices;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.security.access.AccessChecker;
@@ -1008,11 +1010,13 @@ public GetTableDescriptorsResponse getTableDescriptors(RpcController c,
   public GetTableNamesResponse getTableNames(RpcController controller,
       GetTableNamesRequest req) throws ServiceException {
     try {
-      master.checkServiceStarted();
+      master.checkInitialized();
+
       final String regex = req.hasRegex() ? req.getRegex() : null;
       final String namespace = req.hasNamespace() ? req.getNamespace() : null;
       List tableNames = master.listTableNames(namespace, regex,
           req.getIncludeSysTables());
+
       GetTableNamesResponse.Builder builder = GetTableNamesResponse.newBuilder();
       if (tableNames != null && tableNames.size() > 0) {
         // Add the table names to the response
@@ -1026,26 +1030,6 @@ public GetTableNamesResponse getTableNames(RpcController controller,
     }
   }
 
-  @Override
-  public MasterProtos.GetTableStateResponse getTableState(RpcController controller,
-      MasterProtos.GetTableStateRequest request) throws ServiceException {
-    try {
-      master.checkServiceStarted();
-      TableName tableName = ProtobufUtil.toTableName(request.getTableName());
-      TableState.State state = master.getTableStateManager()
-              .getTableState(tableName);
-      if (state == null) {
-        throw new TableNotFoundException(tableName);
-      }
-      MasterProtos.GetTableStateResponse.Builder builder =
-              MasterProtos.GetTableStateResponse.newBuilder();
-      builder.setTableState(new TableState(tableName, state).convert());
-      return builder.build();
-    } catch (IOException e) {
-      throw new ServiceException(e);
-    }
-  }
-
   @Override
   public IsCatalogJanitorEnabledResponse isCatalogJanitorEnabled(RpcController c,
       IsCatalogJanitorEnabledRequest req) throws ServiceException {
@@ -1896,4 +1880,14 @@ public GetNumLiveRSResponse getNumLiveRS(RpcController rpcController, GetNumLive
     }
     return response.build();
   }
+
+  @Override
+  public GetTableStateResponse getTableState(RpcController rpcController,
+    GetTableStateRequest request) throws ServiceException {
+    final TableStateManager tsm = master.getAssignmentManager().getTableStateManager();
+    final TableName table = TableName.valueOf(request.getTableName());
+    final State stateToCheck = request.getIsEnabled() ? State.ENABLED : State.DISABLED;
+    GetTableStateResponse.Builder resp = GetTableStateResponse.newBuilder();
+    return resp.setEnabledOrDisabled(tsm.isTableState(table, stateToCheck)).build();
+  }
 }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
index d20b76468203..be6fb12d1c3c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
@@ -83,11 +83,6 @@ public interface MasterServices extends Server {
    */
   TableLockManager getTableLockManager();
 
-  /**
-   * @return Master's instance of {@link TableStateManager}
-   */
-  TableStateManager getTableStateManager();
-
   /**
    * @return Master's instance of {@link MasterCoprocessorHost}
    */
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java
index 4fc4c58ceea7..a444833607cc 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java
@@ -31,6 +31,7 @@
 import java.util.SortedSet;
 import java.util.TreeMap;
 import java.util.TreeSet;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -42,13 +43,14 @@
 import org.apache.hadoop.hbase.ServerLoad;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.TableStateManager;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.RegionReplicaUtil;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.master.RegionState.State;
-import org.apache.hadoop.hbase.client.TableState;
+import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.ConfigUtil;
 import org.apache.hadoop.hbase.util.FSUtils;
@@ -58,6 +60,9 @@
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
 import org.apache.zookeeper.KeeperException;
 
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+
 /**
  * Region state accountant. It holds the states of all regions in the memory.
  * In normal scenario, it should match the meta table and the true region states.
@@ -715,7 +720,7 @@ public void regionOffline(
       if (oldServerName != null && serverHoldings.containsKey(oldServerName)) {
         if (force || (newState == State.MERGED || newState == State.SPLIT
             || hri.isMetaRegion() || tableStateManager.isTableState(hri.getTable(),
-            TableState.State.DISABLED, TableState.State.DISABLING))) {
+              ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING))) {
           // Offline the region only if it's merged/split, or the table is disabled/disabling.
           // Otherwise, offline it from this server only when it is online on a different server.
           LOG.info("Offlined " + hri.getShortNameToLog() + " from " + oldServerName);
@@ -1288,8 +1293,8 @@ static boolean isOneOfStates(RegionState regionState, State... states) {
    * Update a region state. It will be put in transition if not already there.
    */
   private RegionState updateRegionState(final HRegionInfo hri,
-      final RegionState.State state, final ServerName serverName, long openSeqNum) {
-    if (state == RegionState.State.FAILED_CLOSE || state == RegionState.State.FAILED_OPEN) {
+      final State state, final ServerName serverName, long openSeqNum) {
+    if (state == State.FAILED_CLOSE || state == State.FAILED_OPEN) {
       LOG.warn("Failed to open/close " + hri.getShortNameToLog()
         + " on " + serverName + ", set to " + state);
     }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java
index e576934bfb23..5929f26337a0 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java
@@ -41,7 +41,6 @@
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.client.Table;
-import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.constraint.ConstraintException;
 import org.apache.hadoop.hbase.master.procedure.CreateNamespaceProcedure;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
@@ -229,7 +228,7 @@ public synchronized boolean isTableAvailableAndInitialized(
     }
 
     // Now check if the table is assigned, if not then fail fast
-    if (isTableAssigned() && isTableEnabled()) {
+    if (isTableAssigned()) {
       try {
         boolean initGoodSofar = true;
         nsTable = this.masterServices.getConnection().getTable(TableName.NAMESPACE_TABLE_NAME);
@@ -298,12 +297,6 @@ public synchronized boolean isTableAvailableAndInitialized(
     return false;
   }
 
-  private boolean isTableEnabled() throws IOException {
-    return masterServices.getTableStateManager().getTableState(
-            TableName.NAMESPACE_TABLE_NAME
-    ).equals(TableState.State.ENABLED);
-  }
-
   private boolean isTableAssigned() {
     return !masterServices.getAssignmentManager().getRegionStates().
         getRegionsOfTable(TableName.NAMESPACE_TABLE_NAME).isEmpty();
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java
deleted file mode 100644
index 4ba3d1086d7f..000000000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java
+++ /dev/null
@@ -1,219 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.master;
-
-import java.io.IOException;
-import java.util.Map;
-import java.util.Set;
-
-import com.google.common.collect.Maps;
-import com.google.common.collect.Sets;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.TableDescriptor;
-import org.apache.hadoop.hbase.TableDescriptors;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.TableNotFoundException;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.client.TableState;
-
-/**
- * This is a helper class used to manage table states.
- * States persisted in tableinfo and cached internally.
- */
-@InterfaceAudience.Private
-public class TableStateManager {
-  private static final Log LOG = LogFactory.getLog(TableStateManager.class);
-  private final TableDescriptors descriptors;
-
-  private final Map tableStates = Maps.newConcurrentMap();
-
-  public TableStateManager(MasterServices master) {
-    this.descriptors = master.getTableDescriptors();
-  }
-
-  public void start() throws IOException {
-    Map all = descriptors.getAllDescriptors();
-    for (TableDescriptor table : all.values()) {
-      TableName tableName = table.getHTableDescriptor().getTableName();
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Adding table state: " + tableName
-            + ": " + table.getTableState());
-      }
-      tableStates.put(tableName, table.getTableState());
-    }
-  }
-
-  /**
-   * Set table state to provided.
-   * Caller should lock table on write.
-   * @param tableName table to change state for
-   * @param newState new state
-   * @throws IOException
-   */
-  public void setTableState(TableName tableName, TableState.State newState) throws IOException {
-    synchronized (tableStates) {
-      TableDescriptor descriptor = readDescriptor(tableName);
-      if (descriptor == null) {
-        throw new TableNotFoundException(tableName);
-      }
-      if (descriptor.getTableState() != newState) {
-        writeDescriptor(
-            new TableDescriptor(descriptor.getHTableDescriptor(), newState));
-      }
-    }
-  }
-
-  /**
-   * Set table state to provided but only if table in specified states
-   * Caller should lock table on write.
-   * @param tableName table to change state for
-   * @param newState new state
-   * @param states states to check against
-   * @throws IOException
-   */
-  public boolean setTableStateIfInStates(TableName tableName,
-                                         TableState.State newState,
-                                         TableState.State... states)
-          throws IOException {
-    synchronized (tableStates) {
-      TableDescriptor descriptor = readDescriptor(tableName);
-      if (descriptor == null) {
-        throw new TableNotFoundException(tableName);
-      }
-      if (TableState.isInStates(descriptor.getTableState(), states)) {
-        writeDescriptor(
-            new TableDescriptor(descriptor.getHTableDescriptor(), newState));
-        return true;
-      } else {
-        return false;
-      }
-    }
-  }
-
-
-  /**
-   * Set table state to provided but only if table not in specified states
-   * Caller should lock table on write.
-   * @param tableName table to change state for
-   * @param newState new state
-   * @param states states to check against
-   * @throws IOException
-   */
-  public boolean setTableStateIfNotInStates(TableName tableName,
-                                            TableState.State newState,
-                                            TableState.State... states)
-          throws IOException {
-    synchronized (tableStates) {
-      TableDescriptor descriptor = readDescriptor(tableName);
-      if (descriptor == null) {
-        throw new TableNotFoundException(tableName);
-      }
-      if (!TableState.isInStates(descriptor.getTableState(), states)) {
-        writeDescriptor(
-            new TableDescriptor(descriptor.getHTableDescriptor(), newState));
-        return true;
-      } else {
-        return false;
-      }
-    }
-  }
-
-  public boolean isTableState(TableName tableName, TableState.State... states) {
-    TableState.State tableState = null;
-    try {
-      tableState = getTableState(tableName);
-    } catch (IOException e) {
-      LOG.error("Unable to get table state, probably table not exists");
-      return false;
-    }
-    return tableState != null && TableState.isInStates(tableState, states);
-  }
-
-  public void setDeletedTable(TableName tableName) throws IOException {
-    TableState.State remove = tableStates.remove(tableName);
-    if (remove == null) {
-      LOG.warn("Moving table " + tableName + " state to deleted but was " +
-              "already deleted");
-    }
-  }
-
-  public boolean isTablePresent(TableName tableName) throws IOException {
-    return getTableState(tableName) != null;
-  }
-
-  /**
-   * Return all tables in given states.
-   *
-   * @param states filter by states
-   * @return tables in given states
-   * @throws IOException
-   */
-  public Set getTablesInStates(TableState.State... states) throws IOException {
-    Set rv = Sets.newHashSet();
-    for (Map.Entry entry : tableStates.entrySet()) {
-      if (TableState.isInStates(entry.getValue(), states)) {
-        rv.add(entry.getKey());
-      }
-    }
-    return rv;
-  }
-
-  public TableState.State getTableState(TableName tableName) throws IOException {
-    TableState.State tableState = tableStates.get(tableName);
-    if (tableState == null) {
-      TableDescriptor descriptor = readDescriptor(tableName);
-      if (descriptor != null) {
-        tableState = descriptor.getTableState();
-      }
-    }
-    return tableState;
-  }
-
-  /**
-   * Write descriptor in place, update cache of states.
-   * Write lock should be hold by caller.
-   *
-   * @param descriptor what to write
-   */
-  private void writeDescriptor(TableDescriptor descriptor) throws IOException {
-    TableName tableName = descriptor.getHTableDescriptor().getTableName();
-    TableState.State state = descriptor.getTableState();
-    descriptors.add(descriptor);
-    LOG.debug("Table " + tableName + " written descriptor for state " + state);
-    tableStates.put(tableName, state);
-    LOG.debug("Table " + tableName + " updated state to " + state);
-  }
-
-  /**
-   * Read current descriptor for table, update cache of states.
-   *
-   * @param table descriptor to read
-   * @return descriptor
-   * @throws IOException
-   */
-  private TableDescriptor readDescriptor(TableName tableName) throws IOException {
-    TableDescriptor descriptor = descriptors.getDescriptor(tableName);
-    if (descriptor == null) {
-      tableStates.remove(tableName);
-    } else {
-      tableStates.put(tableName, descriptor.getTableState());
-    }
-    return descriptor;
-  }
-}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ClosedRegionHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ClosedRegionHandler.java
index 3be3316f26d1..389a738c7a29 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ClosedRegionHandler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ClosedRegionHandler.java
@@ -23,10 +23,11 @@
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.Server;
-import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.executor.EventHandler;
 import org.apache.hadoop.hbase.executor.EventType;
 import org.apache.hadoop.hbase.master.AssignmentManager;
+import org.apache.hadoop.hbase.master.RegionState;
+import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
 
 /**
  * Handles CLOSED region event on Master.
@@ -92,7 +93,7 @@ public void process() {
     LOG.debug("Handling CLOSED event for " + regionInfo.getEncodedName());
     // Check if this table is being disabled or not
     if (this.assignmentManager.getTableStateManager().isTableState(this.regionInfo.getTable(),
-        TableState.State.DISABLED, TableState.State.DISABLING) ||
+        ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING) ||
         assignmentManager.getReplicasToClose().contains(regionInfo)) {
       assignmentManager.offlineDisabledRegion(regionInfo);
       return;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java
index 09569b30dfd9..79e24938afa3 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java
@@ -30,16 +30,14 @@
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.CoordinatedStateException;
+import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.NotAllMetaRegionsOnlineException;
 import org.apache.hadoop.hbase.Server;
-import org.apache.hadoop.hbase.TableDescriptor;
 import org.apache.hadoop.hbase.TableExistsException;
-import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.RegionReplicaUtil;
-import org.apache.hadoop.hbase.client.TableState;
+import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.executor.EventHandler;
 import org.apache.hadoop.hbase.executor.EventType;
 import org.apache.hadoop.hbase.ipc.RpcServer;
@@ -50,6 +48,7 @@
 import org.apache.hadoop.hbase.master.MasterServices;
 import org.apache.hadoop.hbase.master.TableLockManager;
 import org.apache.hadoop.hbase.master.TableLockManager.TableLock;
+import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.security.UserProvider;
 import org.apache.hadoop.hbase.util.FSTableDescriptors;
@@ -120,6 +119,13 @@ public CreateTableHandler prepare()
       if (MetaTableAccessor.tableExists(this.server.getConnection(), tableName)) {
         throw new TableExistsException(tableName);
       }
+
+      // During master initialization, the ZK state could be inconsistent from failed DDL
+      // in the past. If we fail here, it would prevent master to start.  We should force
+      // setting the system table state regardless the table state.
+      boolean skipTableStateCheck =
+          !((HMaster) this.server).isInitialized() && tableName.isSystemTable();
+      checkAndSetEnablingTable(assignmentManager, tableName, skipTableStateCheck);
       success = true;
     } finally {
       if (!success) {
@@ -129,6 +135,52 @@ public CreateTableHandler prepare()
     return this;
   }
 
+  static void checkAndSetEnablingTable(final AssignmentManager assignmentManager,
+      final TableName tableName, boolean skipTableStateCheck) throws IOException {
+    // If we have multiple client threads trying to create the table at the
+    // same time, given the async nature of the operation, the table
+    // could be in a state where hbase:meta table hasn't been updated yet in
+    // the process() function.
+    // Use enabling state to tell if there is already a request for the same
+    // table in progress. This will introduce a new zookeeper call. Given
+    // createTable isn't a frequent operation, that should be ok.
+    // TODO: now that we have table locks, re-evaluate above -- table locks are not enough.
+    // We could have cleared the hbase.rootdir and not zk.  How can we detect this case?
+    // Having to clean zk AND hdfs is awkward.
+    try {
+      if (skipTableStateCheck) {
+        assignmentManager.getTableStateManager().setTableState(
+          tableName,
+          ZooKeeperProtos.Table.State.ENABLING);
+      } else if (!assignmentManager.getTableStateManager().setTableStateIfNotInStates(
+        tableName,
+        ZooKeeperProtos.Table.State.ENABLING,
+        ZooKeeperProtos.Table.State.ENABLING,
+        ZooKeeperProtos.Table.State.ENABLED)) {
+        throw new TableExistsException(tableName);
+      }
+    } catch (CoordinatedStateException e) {
+      throw new IOException("Unable to ensure that the table will be" +
+        " enabling because of a ZooKeeper issue", e);
+    }
+  }
+
+  static void removeEnablingTable(final AssignmentManager assignmentManager,
+      final TableName tableName) {
+    // Try deleting the enabling node in case of error
+    // If this does not happen then if the client tries to create the table
+    // again with the same Active master
+    // It will block the creation saying TableAlreadyExists.
+    try {
+      assignmentManager.getTableStateManager().checkAndRemoveTableState(tableName,
+        ZooKeeperProtos.Table.State.ENABLING, false);
+    } catch (CoordinatedStateException e) {
+      // Keeper exception should not happen here
+      LOG.error("Got a keeper exception while removing the ENABLING table znode "
+          + tableName, e);
+    }
+  }
+
   @Override
   public String toString() {
     String name = "UnknownServerName";
@@ -176,6 +228,9 @@ protected void completed(final Throwable exception) {
     releaseTableLock();
     LOG.info("Table, " + this.hTableDescriptor.getTableName() + ", creation " +
         (exception == null ? "successful" : "failed. " + exception));
+    if (exception != null) {
+      removeEnablingTable(this.assignmentManager, this.hTableDescriptor.getTableName());
+    }
   }
 
   /**
@@ -198,12 +253,9 @@ private void handleCreateTable(TableName tableName)
     FileSystem fs = fileSystemManager.getFileSystem();
 
     // 1. Create Table Descriptor
-    // using a copy of descriptor, table will be created enabling first
-    TableDescriptor underConstruction = new TableDescriptor(
-        this.hTableDescriptor, TableState.State.ENABLING);
     Path tempTableDir = FSUtils.getTableDir(tempdir, tableName);
     new FSTableDescriptors(this.conf).createTableDescriptorForTableDirectory(
-      tempTableDir, underConstruction, false);
+      tempTableDir, this.hTableDescriptor, false);
     Path tableDir = FSUtils.getTableDir(fileSystemManager.getRootDir(), tableName);
 
     // 2. Create Regions
@@ -228,18 +280,24 @@ private void handleCreateTable(TableName tableName)
       // 7. Trigger immediate assignment of the regions in round-robin fashion
       ModifyRegionUtils.assignRegions(assignmentManager, regionInfos);
     }
+
+    // 8. Set table enabled flag up in zk.
+    try {
+      assignmentManager.getTableStateManager().setTableState(tableName,
+        ZooKeeperProtos.Table.State.ENABLED);
+    } catch (CoordinatedStateException e) {
+      throw new IOException("Unable to ensure that " + tableName + " will be" +
+        " enabled because of a ZooKeeper issue", e);
+    }
+
     // 8. Update the tabledescriptor cache.
     ((HMaster) this.server).getTableDescriptors().get(tableName);
-
-    // 9. Enable table
-    assignmentManager.getTableStateManager().setTableState(tableName,
-            TableState.State.ENABLED);
   }
 
   /**
    * Create any replicas for the regions (the default replicas that was
    * already created is passed to the method)
-   * @param hTableDescriptor descriptor to use
+   * @param hTableDescriptor
    * @param regions default replicas
    * @return the combined list of default and non-default replicas
    */
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DisableTableHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DisableTableHandler.java
index e9b764e0c61a..76f603f3bc26 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DisableTableHandler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DisableTableHandler.java
@@ -25,13 +25,13 @@
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.CoordinatedStateException;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.TableNotEnabledException;
 import org.apache.hadoop.hbase.TableNotFoundException;
 import org.apache.hadoop.hbase.MetaTableAccessor;
-import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.constraint.ConstraintException;
 import org.apache.hadoop.hbase.executor.EventHandler;
 import org.apache.hadoop.hbase.executor.EventType;
@@ -39,10 +39,11 @@
 import org.apache.hadoop.hbase.master.BulkAssigner;
 import org.apache.hadoop.hbase.master.HMaster;
 import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
-import org.apache.hadoop.hbase.master.RegionState;
 import org.apache.hadoop.hbase.master.RegionStates;
 import org.apache.hadoop.hbase.master.TableLockManager;
+import org.apache.hadoop.hbase.master.RegionState.State;
 import org.apache.hadoop.hbase.master.TableLockManager.TableLock;
+import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
 import org.apache.htrace.Trace;
 
 /**
@@ -90,11 +91,16 @@ public DisableTableHandler prepare()
       // DISABLED or ENABLED.
       //TODO: reevaluate this since we have table locks now
       if (!skipTableStateCheck) {
-        if (!this.assignmentManager.getTableStateManager().setTableStateIfInStates(
-          this.tableName, TableState.State.DISABLING,
-          TableState.State.ENABLED)) {
-          LOG.info("Table " + tableName + " isn't enabled; skipping disable");
-          throw new TableNotEnabledException(this.tableName);
+        try {
+          if (!this.assignmentManager.getTableStateManager().setTableStateIfInStates(
+            this.tableName, ZooKeeperProtos.Table.State.DISABLING,
+            ZooKeeperProtos.Table.State.ENABLED)) {
+            LOG.info("Table " + tableName + " isn't enabled; skipping disable");
+            throw new TableNotEnabledException(this.tableName);
+          }
+        } catch (CoordinatedStateException e) {
+          throw new IOException("Unable to ensure that the table will be" +
+            " disabling because of a coordination engine issue", e);
         }
       }
       success = true;
@@ -133,6 +139,8 @@ public void process() {
       }
     } catch (IOException e) {
       LOG.error("Error trying to disable table " + this.tableName, e);
+    } catch (CoordinatedStateException e) {
+      LOG.error("Error trying to disable table " + this.tableName, e);
     } finally {
       releaseTableLock();
     }
@@ -148,10 +156,10 @@ private void releaseTableLock() {
     }
   }
 
-  private void handleDisableTable() throws IOException {
+  private void handleDisableTable() throws IOException, CoordinatedStateException {
     // Set table disabling flag up in zk.
     this.assignmentManager.getTableStateManager().setTableState(this.tableName,
-      TableState.State.DISABLING);
+      ZooKeeperProtos.Table.State.DISABLING);
     boolean done = false;
     while (true) {
       // Get list of online regions that are of this table.  Regions that are
@@ -180,7 +188,7 @@ private void handleDisableTable() throws IOException {
     }
     // Flip the table to disabled if success.
     if (done) this.assignmentManager.getTableStateManager().setTableState(this.tableName,
-      TableState.State.DISABLED);
+      ZooKeeperProtos.Table.State.DISABLED);
     LOG.info("Disabled table, " + this.tableName + ", is done=" + done);
   }
 
@@ -200,7 +208,7 @@ protected void populatePool(ExecutorService pool) {
       RegionStates regionStates = assignmentManager.getRegionStates();
       for (HRegionInfo region: regions) {
         if (regionStates.isRegionInTransition(region)
-            && !regionStates.isRegionInState(region, RegionState.State.FAILED_CLOSE)) {
+            && !regionStates.isRegionInState(region, State.FAILED_CLOSE)) {
           continue;
         }
         final HRegionInfo hri = region;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/EnableTableHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/EnableTableHandler.java
index 0b914d52a946..2e6a10a7eeed 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/EnableTableHandler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/EnableTableHandler.java
@@ -26,15 +26,15 @@
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.CoordinatedStateException;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.TableNotDisabledException;
 import org.apache.hadoop.hbase.TableNotFoundException;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.executor.EventHandler;
 import org.apache.hadoop.hbase.executor.EventType;
 import org.apache.hadoop.hbase.master.AssignmentManager;
@@ -47,7 +47,7 @@
 import org.apache.hadoop.hbase.master.ServerManager;
 import org.apache.hadoop.hbase.master.TableLockManager;
 import org.apache.hadoop.hbase.master.TableLockManager.TableLock;
-import org.apache.hadoop.hbase.master.TableStateManager;
+import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
 
@@ -97,9 +97,14 @@ public EnableTableHandler prepare()
         if (!this.skipTableStateCheck) {
           throw new TableNotFoundException(tableName);
         }
-        TableStateManager tsm = assignmentManager.getTableStateManager();
-        if (tsm.isTableState(tableName, TableState.State.ENABLING)) {
-          tsm.setDeletedTable(tableName);
+        try {
+          this.assignmentManager.getTableStateManager().checkAndRemoveTableState(tableName,
+            ZooKeeperProtos.Table.State.ENABLING, true);
+          throw new TableNotFoundException(tableName);
+        } catch (CoordinatedStateException e) {
+          // TODO : Use HBCK to clear such nodes
+          LOG.warn("Failed to delete the ENABLING node for the table " + tableName
+              + ".  The table will remain unusable. Run HBCK to manually fix the problem.");
         }
       }
 
@@ -108,11 +113,16 @@ public EnableTableHandler prepare()
       // After that, no other requests can be accepted until the table reaches
       // DISABLED or ENABLED.
       if (!skipTableStateCheck) {
-        if (!this.assignmentManager.getTableStateManager().setTableStateIfInStates(
-            this.tableName, TableState.State.ENABLING,
-            TableState.State.DISABLED)) {
-          LOG.info("Table " + tableName + " isn't disabled; skipping enable");
-          throw new TableNotDisabledException(this.tableName);
+        try {
+          if (!this.assignmentManager.getTableStateManager().setTableStateIfInStates(
+              this.tableName, ZooKeeperProtos.Table.State.ENABLING,
+              ZooKeeperProtos.Table.State.DISABLED)) {
+            LOG.info("Table " + tableName + " isn't disabled; skipping enable");
+            throw new TableNotDisabledException(this.tableName);
+          }
+        } catch (CoordinatedStateException e) {
+          throw new IOException("Unable to ensure that the table will be" +
+            " enabling because of a coordination engine issue", e);
         }
       }
       success = true;
@@ -148,7 +158,11 @@ public void process() {
       if (cpHost != null) {
         cpHost.postEnableTableHandler(this.tableName, null);
       }
-    } catch (IOException | InterruptedException e) {
+    } catch (IOException e) {
+      LOG.error("Error trying to enable the table " + this.tableName, e);
+    } catch (CoordinatedStateException e) {
+      LOG.error("Error trying to enable the table " + this.tableName, e);
+    } catch (InterruptedException e) {
       LOG.error("Error trying to enable the table " + this.tableName, e);
     } finally {
       releaseTableLock();
@@ -165,13 +179,14 @@ private void releaseTableLock() {
     }
   }
 
-  private void handleEnableTable() throws IOException,
+  private void handleEnableTable() throws IOException, CoordinatedStateException,
       InterruptedException {
     // I could check table is disabling and if so, not enable but require
     // that user first finish disabling but that might be obnoxious.
 
+    // Set table enabling flag up in zk.
     this.assignmentManager.getTableStateManager().setTableState(this.tableName,
-      TableState.State.ENABLING);
+      ZooKeeperProtos.Table.State.ENABLING);
     boolean done = false;
     ServerManager serverManager = ((HMaster)this.server).getServerManager();
     // Get the regions of this table. We're done when all listed
@@ -236,7 +251,7 @@ private void handleEnableTable() throws IOException,
     if (done) {
       // Flip the table to enabled.
       this.assignmentManager.getTableStateManager().setTableState(
-        this.tableName, TableState.State.ENABLED);
+        this.tableName, ZooKeeperProtos.Table.State.ENABLED);
       LOG.info("Table '" + this.tableName
       + "' was successfully enabled. Status: done=" + done);
     } else {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableEventHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableEventHandler.java
index 0081f16816f7..43a0f65be9a3 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableEventHandler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableEventHandler.java
@@ -29,8 +29,6 @@
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.CoordinatedStateException;
-import org.apache.hadoop.hbase.TableDescriptor;
-import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.HTableDescriptor;
@@ -38,15 +36,16 @@
 import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.TableNotDisabledException;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.RegionLocator;
-import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.executor.EventHandler;
 import org.apache.hadoop.hbase.executor.EventType;
 import org.apache.hadoop.hbase.master.BulkReOpen;
 import org.apache.hadoop.hbase.master.MasterServices;
 import org.apache.hadoop.hbase.master.TableLockManager.TableLock;
+import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
@@ -138,7 +137,7 @@ public void process() {
       handleTableOperation(hris);
       if (eventType.isOnlineSchemaChangeSupported() && this.masterServices.
           getAssignmentManager().getTableStateManager().isTableState(
-          tableName, TableState.State.ENABLED)) {
+          tableName, ZooKeeperProtos.Table.State.ENABLED)) {
         if (reOpenAllRegions(hris)) {
           LOG.info("Completed table operation " + eventType + " on table " +
               tableName);
@@ -237,10 +236,10 @@ public boolean reOpenAllRegions(List regions) throws IOException {
    * @throws FileNotFoundException
    * @throws IOException
    */
-  public TableDescriptor getTableDescriptor()
+  public HTableDescriptor getTableDescriptor()
   throws FileNotFoundException, IOException {
-    TableDescriptor htd =
-      this.masterServices.getTableDescriptors().getDescriptor(tableName);
+    HTableDescriptor htd =
+      this.masterServices.getTableDescriptors().get(tableName);
     if (htd == null) {
       throw new IOException("HTableDescriptor missing for " + tableName);
     }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AddColumnFamilyProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AddColumnFamilyProcedure.java
index c9df56e404ce..a3dc1a49521a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AddColumnFamilyProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AddColumnFamilyProcedure.java
@@ -32,12 +32,12 @@
 import org.apache.hadoop.hbase.InvalidFamilyOperationException;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
 import org.apache.hadoop.hbase.procedure2.StateMachineProcedure;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AddColumnFamilyState;
+import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
 import org.apache.hadoop.hbase.security.User;
 
 /**
@@ -336,7 +336,7 @@ private void postAdd(final MasterProcedureEnv env, final AddColumnFamilyState st
   private void reOpenAllRegionsIfTableIsOnline(final MasterProcedureEnv env) throws IOException {
     // This operation only run when the table is enabled.
     if (!env.getMasterServices().getAssignmentManager().getTableStateManager()
-        .isTableState(getTableName(), TableState.State.ENABLED)) {
+        .isTableState(getTableName(), ZooKeeperProtos.Table.State.ENABLED)) {
       return;
     }
 
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java
index a8459f16bb87..152af450a79d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java
@@ -33,21 +33,20 @@
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.MetaTableAccessor;
-import org.apache.hadoop.hbase.TableDescriptor;
 import org.apache.hadoop.hbase.TableExistsException;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.TableStateManager;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.client.RegionReplicaUtil;
-import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.exceptions.HBaseException;
 import org.apache.hadoop.hbase.master.AssignmentManager;
 import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
 import org.apache.hadoop.hbase.master.MasterFileSystem;
-import org.apache.hadoop.hbase.master.TableStateManager;
 import org.apache.hadoop.hbase.procedure2.StateMachineProcedure;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableState;
+import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.util.FSTableDescriptors;
 import org.apache.hadoop.hbase.util.FSUtils;
@@ -300,8 +299,8 @@ private boolean prepareCreate(final MasterProcedureEnv env) throws IOException {
         !(env.getMasterServices().isInitialized()) && tableName.isSystemTable();
     if (!skipTableStateCheck) {
       TableStateManager tsm = env.getMasterServices().getAssignmentManager().getTableStateManager();
-      if (tsm.isTableState(tableName, TableState.State.ENABLING,
-          TableState.State.ENABLED)) {
+      if (tsm.isTableState(tableName, true, ZooKeeperProtos.Table.State.ENABLING,
+          ZooKeeperProtos.Table.State.ENABLED)) {
         LOG.warn("The table " + tableName + " does not exist in meta but has a znode. " +
                "run hbck to fix inconsistencies.");
         setFailure("master-create-table", new TableExistsException(getTableName()));
@@ -376,7 +375,7 @@ protected static List createFsLayout(final MasterProcedureEnv env,
     // using a copy of descriptor, table will be created enabling first
     final Path tempTableDir = FSUtils.getTableDir(tempdir, hTableDescriptor.getTableName());
     new FSTableDescriptors(env.getMasterConfiguration()).createTableDescriptorForTableDirectory(
-      tempTableDir, new TableDescriptor(hTableDescriptor), false);
+      tempTableDir, hTableDescriptor, false);
 
     // 2. Create Regions
     newRegions = hdfsRegionHandler.createHdfsRegions(env, tempdir,
@@ -449,14 +448,14 @@ protected static void assignRegions(final MasterProcedureEnv env,
 
     // Mark the table as Enabling
     assignmentManager.getTableStateManager().setTableState(tableName,
-        TableState.State.ENABLING);
+        ZooKeeperProtos.Table.State.ENABLING);
 
     // Trigger immediate assignment of the regions in round-robin fashion
     ModifyRegionUtils.assignRegions(assignmentManager, regions);
 
     // Enable table
     assignmentManager.getTableStateManager()
-      .setTableState(tableName, TableState.State.ENABLED);
+      .setTableState(tableName, ZooKeeperProtos.Table.State.ENABLED);
   }
 
   /**
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteColumnFamilyProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteColumnFamilyProcedure.java
index 3e6568bec1c2..5b1a69c0ece3 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteColumnFamilyProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteColumnFamilyProcedure.java
@@ -31,12 +31,12 @@
 import org.apache.hadoop.hbase.InvalidFamilyOperationException;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
 import org.apache.hadoop.hbase.procedure2.StateMachineProcedure;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteColumnFamilyState;
+import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.util.ByteStringer;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -357,7 +357,7 @@ private void postDelete(final MasterProcedureEnv env, final DeleteColumnFamilySt
   private void reOpenAllRegionsIfTableIsOnline(final MasterProcedureEnv env) throws IOException {
     // This operation only run when the table is enabled.
     if (!env.getMasterServices().getAssignmentManager().getTableStateManager()
-        .isTableState(getTableName(), TableState.State.ENABLED)) {
+        .isTableState(getTableName(), ZooKeeperProtos.Table.State.ENABLED)) {
       return;
     }
 
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DisableTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DisableTableProcedure.java
index 7fe2a8973e9d..bec599cbb426 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DisableTableProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DisableTableProcedure.java
@@ -32,8 +32,8 @@
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.TableNotEnabledException;
 import org.apache.hadoop.hbase.TableNotFoundException;
+import org.apache.hadoop.hbase.TableStateManager;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.constraint.ConstraintException;
 import org.apache.hadoop.hbase.exceptions.HBaseException;
 import org.apache.hadoop.hbase.master.AssignmentManager;
@@ -41,11 +41,11 @@
 import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
 import org.apache.hadoop.hbase.master.RegionState;
 import org.apache.hadoop.hbase.master.RegionStates;
-import org.apache.hadoop.hbase.master.TableStateManager;
 import org.apache.hadoop.hbase.procedure2.StateMachineProcedure;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableState;
+import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.htrace.Trace;
@@ -286,8 +286,8 @@ private boolean prepareDisable(final MasterProcedureEnv env) throws HBaseExcepti
       // this issue.
       TableStateManager tsm =
         env.getMasterServices().getAssignmentManager().getTableStateManager();
-      if (!tsm.setTableStateIfInStates(tableName, TableState.State.DISABLING,
-            TableState.State.DISABLING, TableState.State.ENABLED)) {
+      if (!tsm.setTableStateIfInStates(tableName, ZooKeeperProtos.Table.State.DISABLING,
+            ZooKeeperProtos.Table.State.DISABLING, ZooKeeperProtos.Table.State.ENABLED)) {
         LOG.info("Table " + tableName + " isn't enabled; skipping disable");
         setFailure("master-disable-table", new TableNotEnabledException(tableName));
         canTableBeDisabled = false;
@@ -311,7 +311,7 @@ private void undoTableStateChange(final MasterProcedureEnv env) {
       try {
         // If the state was changed, undo it.
         if (env.getMasterServices().getAssignmentManager().getTableStateManager().isTableState(
-            tableName, TableState.State.DISABLING)) {
+            tableName, ZooKeeperProtos.Table.State.DISABLING)) {
           EnableTableProcedure.setTableStateToEnabled(env, tableName);
         }
       } catch (Exception e) {
@@ -344,7 +344,7 @@ protected static void setTableStateToDisabling(
     // Set table disabling flag up in zk.
     env.getMasterServices().getAssignmentManager().getTableStateManager().setTableState(
       tableName,
-      TableState.State.DISABLING);
+      ZooKeeperProtos.Table.State.DISABLING);
   }
 
   /**
@@ -435,7 +435,7 @@ protected static void setTableStateToDisabled(
     // Flip the table to disabled
     env.getMasterServices().getAssignmentManager().getTableStateManager().setTableState(
       tableName,
-      TableState.State.DISABLED);
+      ZooKeeperProtos.Table.State.DISABLED);
     LOG.info("Disabled table, " + tableName + ", is completed.");
   }
 
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/EnableTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/EnableTableProcedure.java
index c06bb07a5079..f4a45388a569 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/EnableTableProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/EnableTableProcedure.java
@@ -35,8 +35,8 @@
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.TableNotDisabledException;
 import org.apache.hadoop.hbase.TableNotFoundException;
+import org.apache.hadoop.hbase.TableStateManager;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.exceptions.HBaseException;
 import org.apache.hadoop.hbase.master.AssignmentManager;
 import org.apache.hadoop.hbase.master.BulkAssigner;
@@ -45,11 +45,11 @@
 import org.apache.hadoop.hbase.master.MasterServices;
 import org.apache.hadoop.hbase.master.RegionStates;
 import org.apache.hadoop.hbase.master.ServerManager;
-import org.apache.hadoop.hbase.master.TableStateManager;
 import org.apache.hadoop.hbase.procedure2.StateMachineProcedure;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.EnableTableState;
+import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
@@ -307,7 +307,7 @@ private boolean prepareEnable(final MasterProcedureEnv env) throws IOException {
       // was implemented. With table lock, there is no need to set the state here (it will
       // set the state later on). A quick state check should be enough for us to move forward.
       TableStateManager tsm = env.getMasterServices().getAssignmentManager().getTableStateManager();
-      if (!tsm.isTableState(tableName, TableState.State.DISABLED)) {
+      if (!tsm.isTableState(tableName, ZooKeeperProtos.Table.State.DISABLED)) {
         LOG.info("Table " + tableName + " isn't disabled; skipping enable");
         setFailure("master-enable-table", new TableNotDisabledException(this.tableName));
         canTableBeEnabled = false;
@@ -344,7 +344,8 @@ protected static void setTableStateToEnabling(
     // Set table disabling flag up in zk.
     LOG.info("Attempting to enable the table " + tableName);
     env.getMasterServices().getAssignmentManager().getTableStateManager().setTableState(
-      tableName, TableState.State.ENABLING);
+      tableName,
+      ZooKeeperProtos.Table.State.ENABLING);
   }
 
   /**
@@ -489,7 +490,8 @@ protected static void setTableStateToEnabled(
       final TableName tableName) throws HBaseException, IOException {
     // Flip the table to Enabled
     env.getMasterServices().getAssignmentManager().getTableStateManager().setTableState(
-      tableName, TableState.State.ENABLED);
+      tableName,
+      ZooKeeperProtos.Table.State.ENABLED);
     LOG.info("Table '" + tableName + "' was successfully enabled.");
   }
 
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterDDLOperationHelper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterDDLOperationHelper.java
index c6ff1b6e3e8e..2e8499f61dd0 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterDDLOperationHelper.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterDDLOperationHelper.java
@@ -37,7 +37,7 @@
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.RegionLocator;
-import org.apache.hadoop.hbase.client.TableState;
+import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
 import org.apache.hadoop.hbase.master.AssignmentManager;
 import org.apache.hadoop.hbase.master.BulkReOpen;
 import org.apache.hadoop.hbase.master.MasterFileSystem;
@@ -78,7 +78,7 @@ public static void checkTableModifiable(final MasterProcedureEnv env, final Tabl
 
     // We only execute this procedure with table online if online schema change config is set.
     if (!env.getMasterServices().getAssignmentManager().getTableStateManager()
-        .isTableState(tableName, TableState.State.DISABLED)
+        .isTableState(tableName, ZooKeeperProtos.Table.State.DISABLED)
         && !MasterDDLOperationHelper.isOnlineSchemaChangeAllowed(env)) {
       throw new TableNotDisabledException(tableName);
     }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyColumnFamilyProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyColumnFamilyProcedure.java
index 590e4ceb432e..5a6b59229f0a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyColumnFamilyProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyColumnFamilyProcedure.java
@@ -32,12 +32,12 @@
 import org.apache.hadoop.hbase.InvalidFamilyOperationException;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
 import org.apache.hadoop.hbase.procedure2.StateMachineProcedure;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyColumnFamilyState;
+import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
 import org.apache.hadoop.hbase.security.User;
 
 /**
@@ -316,7 +316,7 @@ private void postModify(final MasterProcedureEnv env, final ModifyColumnFamilySt
   private void reOpenAllRegionsIfTableIsOnline(final MasterProcedureEnv env) throws IOException {
     // This operation only run when the table is enabled.
     if (!env.getMasterServices().getAssignmentManager().getTableStateManager()
-        .isTableState(getTableName(), TableState.State.ENABLED)) {
+        .isTableState(getTableName(), ZooKeeperProtos.Table.State.ENABLED)) {
       return;
     }
 
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java
index fa9746f62aa4..e78568475b1d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java
@@ -42,11 +42,11 @@
 import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.client.Table;
-import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
 import org.apache.hadoop.hbase.procedure2.StateMachineProcedure;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyTableState;
+import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.util.ServerRegionReplicaUtil;
 
@@ -294,7 +294,7 @@ private void prepareModify(final MasterProcedureEnv env) throws IOException {
         env.getMasterServices().getTableDescriptors().get(getTableName());
 
     if (env.getMasterServices().getAssignmentManager().getTableStateManager()
-        .isTableState(getTableName(), TableState.State.ENABLED)) {
+        .isTableState(getTableName(), ZooKeeperProtos.Table.State.ENABLED)) {
       // We only execute this procedure with table online if online schema change config is set.
       if (!MasterDDLOperationHelper.isOnlineSchemaChangeAllowed(env)) {
         throw new TableNotDisabledException(getTableName());
@@ -432,7 +432,7 @@ private void postModify(final MasterProcedureEnv env, final ModifyTableState sta
   private void reOpenAllRegionsIfTableIsOnline(final MasterProcedureEnv env) throws IOException {
     // This operation only run when the table is enabled.
     if (!env.getMasterServices().getAssignmentManager().getTableStateManager()
-        .isTableState(getTableName(), TableState.State.ENABLED)) {
+        .isTableState(getTableName(), ZooKeeperProtos.Table.State.ENABLED)) {
       return;
     }
 
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java
index ef04cfe2d1e5..b6e7a7c97448 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java
@@ -37,7 +37,6 @@
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.RegionReplicaUtil;
-import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.master.AssignmentManager;
 import org.apache.hadoop.hbase.master.MasterFileSystem;
 import org.apache.hadoop.hbase.master.MasterServices;
@@ -49,6 +48,7 @@
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ServerCrashState;
+import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
 import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode;
 import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
 import org.apache.hadoop.hbase.zookeeper.ZKAssign;
@@ -526,7 +526,7 @@ private List calcRegionsToAssign(final MasterProcedureEnv env)
           } else if (rit != null) {
             if ((rit.isPendingCloseOrClosing() || rit.isOffline())
                 && am.getTableStateManager().isTableState(hri.getTable(),
-                TableState.State.DISABLED, TableState.State.DISABLING) ||
+                ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING) ||
                 am.getReplicasToClose().contains(hri)) {
               // If the table was partially disabled and the RS went down, we should clear the
               // RIT and remove the node for the region.
@@ -713,7 +713,7 @@ private static boolean processDeadRegion(HRegionInfo hri, AssignmentManager assi
     }
     // If table is not disabled but the region is offlined,
     boolean disabled = assignmentManager.getTableStateManager().isTableState(hri.getTable(),
-      TableState.State.DISABLED);
+      ZooKeeperProtos.Table.State.DISABLED);
     if (disabled){
       LOG.info("The table " + hri.getTable() + " was disabled.  Hence not proceeding.");
       return false;
@@ -725,7 +725,7 @@ private static boolean processDeadRegion(HRegionInfo hri, AssignmentManager assi
       return false;
     }
     boolean disabling = assignmentManager.getTableStateManager().isTableState(hri.getTable(),
-      TableState.State.DISABLING);
+      ZooKeeperProtos.Table.State.DISABLING);
     if (disabling) {
       LOG.info("The table " + hri.getTable() + " is disabled.  Hence not assigning region" +
         hri.getEncodedName());
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
index f9990f58463e..cd416681b0d2 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
@@ -50,7 +50,6 @@
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.classification.InterfaceStability;
-import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.errorhandling.ForeignException;
 import org.apache.hadoop.hbase.executor.ExecutorService;
 import org.apache.hadoop.hbase.ipc.RpcServer;
@@ -71,6 +70,7 @@
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Type;
+import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
 import org.apache.hadoop.hbase.quotas.QuotaExceededException;
 import org.apache.hadoop.hbase.security.AccessDeniedException;
 import org.apache.hadoop.hbase.security.User;
@@ -620,7 +620,7 @@ private void takeSnapshotInternal(SnapshotDescription snapshot) throws IOExcepti
     TableName snapshotTable = TableName.valueOf(snapshot.getTable());
     AssignmentManager assignmentMgr = master.getAssignmentManager();
     if (assignmentMgr.getTableStateManager().isTableState(snapshotTable,
-      TableState.State.ENABLED)) {
+        ZooKeeperProtos.Table.State.ENABLED)) {
       if (LOG.isDebugEnabled()) {
         LOG.debug("Table enabled, starting distributed snapshot for "
             + ClientSnapshotDescriptionUtils.toString(snapshot));
@@ -632,7 +632,7 @@ private void takeSnapshotInternal(SnapshotDescription snapshot) throws IOExcepti
     }
     // For disabled table, snapshot is created by the master
     else if (assignmentMgr.getTableStateManager().isTableState(snapshotTable,
-        TableState.State.DISABLED)) {
+        ZooKeeperProtos.Table.State.DISABLED)) {
       if (LOG.isDebugEnabled()) {
         LOG.debug("Table is disabled, running snapshot entirely on master "
             + ClientSnapshotDescriptionUtils.toString(snapshot));
@@ -799,7 +799,7 @@ public void restoreSnapshot(SnapshotDescription reqSnapshot, boolean restoreAcl)
     // Execute the restore/clone operation
     if (MetaTableAccessor.tableExists(master.getConnection(), tableName)) {
       if (master.getAssignmentManager().getTableStateManager().isTableState(
-          TableName.valueOf(snapshot.getTable()), TableState.State.ENABLED)) {
+          TableName.valueOf(snapshot.getTable()), ZooKeeperProtos.Table.State.ENABLED)) {
         throw new UnsupportedOperationException("Table '" +
             TableName.valueOf(snapshot.getTable()) + "' must be disabled in order to " +
             "perform a restore operation" +
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/migration/NamespaceUpgrade.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/migration/NamespaceUpgrade.java
index 8a1c11a0abb3..6da05cdd0e37 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/migration/NamespaceUpgrade.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/migration/NamespaceUpgrade.java
@@ -39,14 +39,12 @@
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.TableDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.exceptions.DeserializationException;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
@@ -375,7 +373,7 @@ public void migrateACL() throws IOException {
       HTableDescriptor newDesc = new HTableDescriptor(oldDesc);
       newDesc.setName(newTableName);
       new FSTableDescriptors(this.conf).createTableDescriptorForTableDirectory(
-        newTablePath, new TableDescriptor(newDesc, TableState.State.ENABLED), true);
+        newTablePath, newDesc, true);
     }
 
 
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java
index 37528b2a11f0..0b483d9d30f8 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java
@@ -34,7 +34,6 @@
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.TableDescriptor;
 import org.apache.hadoop.io.LongWritable;
 import org.apache.hadoop.io.NullWritable;
 import org.apache.hadoop.io.Text;
@@ -112,14 +111,13 @@ public void compact(final Path path, final boolean compactOnce, final boolean ma
       if (isFamilyDir(fs, path)) {
         Path regionDir = path.getParent();
         Path tableDir = regionDir.getParent();
-        TableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir);
+        HTableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir);
         HRegionInfo hri = HRegionFileSystem.loadRegionInfoFileContent(fs, regionDir);
-        compactStoreFiles(tableDir, htd.getHTableDescriptor(), hri,
-            path.getName(), compactOnce, major);
+        compactStoreFiles(tableDir, htd, hri, path.getName(), compactOnce, major);
       } else if (isRegionDir(fs, path)) {
         Path tableDir = path.getParent();
-        TableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir);
-        compactRegion(tableDir, htd.getHTableDescriptor(), path, compactOnce, major);
+        HTableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir);
+        compactRegion(tableDir, htd, path, compactOnce, major);
       } else if (isTableDir(fs, path)) {
         compactTable(path, compactOnce, major);
       } else {
@@ -130,9 +128,9 @@ public void compact(final Path path, final boolean compactOnce, final boolean ma
 
     private void compactTable(final Path tableDir, final boolean compactOnce, final boolean major)
         throws IOException {
-      TableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir);
+      HTableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir);
       for (Path regionDir: FSUtils.getRegionDirs(fs, tableDir)) {
-        compactRegion(tableDir, htd.getHTableDescriptor(), regionDir, compactOnce, major);
+        compactRegion(tableDir, htd, regionDir, compactOnce, major);
       }
     }
 
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALCellCodec.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALCellCodec.java
index 19bb71b63fa4..d885577135b5 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALCellCodec.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALCellCodec.java
@@ -91,7 +91,6 @@ public static Class getWALCellCodecClass(Configuration conf) {
    * Fully prepares the codec for use.
    * @param conf {@link Configuration} to read for the user-specified codec. If none is specified,
    *          uses a {@link WALCellCodec}.
-   * @param cellCodecClsName name of codec
    * @param compression compression the codec should use
    * @return a {@link WALCellCodec} ready for use.
    * @throws UnsupportedOperationException if the codec cannot be instantiated
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java
index f8e05b2261aa..dfb4f0fd3a38 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java
@@ -42,9 +42,7 @@
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.TableDescriptor;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.errorhandling.ForeignExceptionSnare;
 import org.apache.hadoop.hbase.monitoring.MonitoredTask;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
@@ -304,8 +302,7 @@ protected void addRegion(final Path tableDir, final HRegionInfo regionInfo, Regi
   private void load() throws IOException {
     switch (getSnapshotFormat(desc)) {
       case SnapshotManifestV1.DESCRIPTOR_VERSION: {
-        this.htd = FSTableDescriptors.getTableDescriptorFromFs(workingDirFs, workingDir)
-            .getHTableDescriptor();
+        this.htd = FSTableDescriptors.getTableDescriptorFromFs(workingDirFs, workingDir);
         ThreadPoolExecutor tpool = createExecutor("SnapshotManifestLoader");
         try {
           this.regionManifests =
@@ -410,8 +407,7 @@ public void consolidate() throws IOException {
       LOG.info("Using old Snapshot Format");
       // write a copy of descriptor to the snapshot directory
       new FSTableDescriptors(conf, workingDirFs, rootDir)
-        .createTableDescriptorForTableDirectory(workingDir, new TableDescriptor(
-            htd, TableState.State.ENABLED), false);
+        .createTableDescriptorForTableDirectory(workingDir, htd, false);
     } else {
       LOG.debug("Convert to Single Snapshot Manifest for " + this.desc.getName());
       convertToV2SingleManifest();
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
index b80de5d4466c..b0592cfb7a26 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
@@ -38,9 +38,7 @@
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PathFilter;
-import org.apache.hadoop.hbase.TableDescriptor;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.exceptions.DeserializationException;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HTableDescriptor;
@@ -86,10 +84,15 @@ public class FSTableDescriptors implements TableDescriptors {
   // This cache does not age out the old stuff.  Thinking is that the amount
   // of data we keep up in here is so small, no need to do occasional purge.
   // TODO.
-  private final Map cache =
-    new ConcurrentHashMap();
+  private final Map cache =
+    new ConcurrentHashMap();
 
   /**
+   * Table descriptor for hbase:meta catalog table
+   */
+   private final HTableDescriptor metaTableDescriptor;
+
+   /**
    * Construct a FSTableDescriptors instance using the hbase root dir of the given
    * conf and the filesystem where that root dir lives.
    * This instance can do write operations (is not read only).
@@ -114,6 +117,7 @@ public FSTableDescriptors(final Configuration conf, final FileSystem fs,
     this.rootdir = rootdir;
     this.fsreadonly = fsreadonly;
     this.usecache = usecache;
+    this.metaTableDescriptor = HTableDescriptor.metaTableDescriptor(conf);
   }
 
   @Override
@@ -139,12 +143,12 @@ public boolean isUsecache() {
    * to see if a newer file has been created since the cached one was read.
    */
   @Override
-  public TableDescriptor getDescriptor(final TableName tablename)
+  public HTableDescriptor get(final TableName tablename)
   throws IOException {
     invocations++;
     if (TableName.META_TABLE_NAME.equals(tablename)) {
       cachehits++;
-      return new TableDescriptor(HTableDescriptor.META_TABLEDESC, TableState.State.ENABLED);
+      return metaTableDescriptor;
     }
     // hbase:meta is already handled. If some one tries to get the descriptor for
     // .logs, .oldlogs or .corrupt throw an exception.
@@ -154,100 +158,73 @@ public TableDescriptor getDescriptor(final TableName tablename)
 
     if (usecache) {
       // Look in cache of descriptors.
-      TableDescriptor cachedtdm = this.cache.get(tablename);
+      HTableDescriptor cachedtdm = this.cache.get(tablename);
       if (cachedtdm != null) {
         cachehits++;
         return cachedtdm;
       }
     }
-    TableDescriptor tdmt = null;
+    HTableDescriptor tdmt = null;
     try {
-      tdmt = getTableDescriptorFromFs(fs, rootdir, tablename);
+      tdmt = getTableDescriptorFromFs(fs, rootdir, tablename, !fsreadonly);
+    } catch (NullPointerException e) {
+      LOG.debug("Exception during readTableDecriptor. Current table name = "
+          + tablename, e);
     } catch (TableInfoMissingException e) {
       // ignore. This is regular operation
-    } catch (NullPointerException | IOException e) {
+    } catch (IOException ioe) {
       LOG.debug("Exception during readTableDecriptor. Current table name = "
-          + tablename, e);
+          + tablename, ioe);
     }
     // last HTD written wins
     if (usecache && tdmt != null) {
       this.cache.put(tablename, tdmt);
     }
-    return tdmt;
-  }
 
-  /**
-   * Get the current table descriptor for the given table, or null if none exists.
-   *
-   * Uses a local cache of the descriptor but still checks the filesystem on each call
-   * to see if a newer file has been created since the cached one was read.
-   */
-  @Override
-  public HTableDescriptor get(TableName tableName) throws IOException {
-    if (HTableDescriptor.META_TABLEDESC.getTableName().equals(tableName)) {
-      cachehits++;
-      return HTableDescriptor.META_TABLEDESC;
-    }
-    TableDescriptor descriptor = getDescriptor(tableName);
-    return descriptor == null ? null : descriptor.getHTableDescriptor();
+    return tdmt;
   }
 
   /**
    * Returns a map from table name to table descriptor for all tables.
    */
   @Override
-  public Map getAllDescriptors()
+  public Map getAll()
   throws IOException {
-    Map tds = new TreeMap();
+    Map htds = new TreeMap();
 
     if (fsvisited && usecache) {
-      for (Map.Entry entry: this.cache.entrySet()) {
-        tds.put(entry.getKey().toString(), entry.getValue());
+      for (Map.Entry entry: this.cache.entrySet()) {
+        htds.put(entry.getKey().toString(), entry.getValue());
       }
       // add hbase:meta to the response
-      tds.put(HTableDescriptor.META_TABLEDESC.getTableName().getNameAsString(),
-          new TableDescriptor(HTableDescriptor.META_TABLEDESC, TableState.State.ENABLED));
+      htds.put(HTableDescriptor.META_TABLEDESC.getTableName().getNameAsString(),
+        HTableDescriptor.META_TABLEDESC);
     } else {
       LOG.debug("Fetching table descriptors from the filesystem.");
       boolean allvisited = true;
       for (Path d : FSUtils.getTableDirs(fs, rootdir)) {
-        TableDescriptor td = null;
+        HTableDescriptor htd = null;
         try {
-          td = getDescriptor(FSUtils.getTableName(d));
+          htd = get(FSUtils.getTableName(d));
         } catch (FileNotFoundException fnfe) {
           // inability of retrieving one HTD shouldn't stop getting the remaining
           LOG.warn("Trouble retrieving htd", fnfe);
         }
-        if (td == null) {
+        if (htd == null) {
           allvisited = false;
           continue;
         } else {
-          tds.put(td.getHTableDescriptor().getTableName().getNameAsString(), td);
+          htds.put(htd.getTableName().getNameAsString(), htd);
         }
         fsvisited = allvisited;
       }
     }
-    return tds;
-  }
-
-  /**
-   * Returns a map from table name to table descriptor for all tables.
-   */
-  @Override
-  public Map getAll() throws IOException {
-    Map htds = new TreeMap();
-    Map allDescriptors = getAllDescriptors();
-    for (Map.Entry entry : allDescriptors
-        .entrySet()) {
-      htds.put(entry.getKey(), entry.getValue().getHTableDescriptor());
-    }
     return htds;
   }
 
-  /**
-    * Find descriptors by namespace.
-    * @see #get(org.apache.hadoop.hbase.TableName)
-    */
+  /* (non-Javadoc)
+   * @see org.apache.hadoop.hbase.TableDescriptors#getTableDescriptors(org.apache.hadoop.fs.FileSystem, org.apache.hadoop.fs.Path)
+   */
   @Override
   public Map getByNamespace(String name)
   throws IOException {
@@ -268,27 +245,6 @@ public Map getByNamespace(String name)
     return htds;
   }
 
-  /**
-   * Adds (or updates) the table descriptor to the FileSystem
-   * and updates the local cache with it.
-   */
-  @Override
-  public void add(TableDescriptor htd) throws IOException {
-    if (fsreadonly) {
-      throw new NotImplementedException("Cannot add a table descriptor - in read only mode");
-    }
-    TableName tableName = htd.getHTableDescriptor().getTableName();
-    if (TableName.META_TABLE_NAME.equals(tableName)) {
-      throw new NotImplementedException();
-    }
-    if (HConstants.HBASE_NON_USER_TABLE_DIRS.contains(tableName.getNameAsString())) {
-      throw new NotImplementedException(
-        "Cannot add a table descriptor for a reserved subdirectory name: "
-            + htd.getHTableDescriptor().getNameAsString());
-    }
-    updateTableDescriptor(htd);
-  }
-
   /**
    * Adds (or updates) the table descriptor to the FileSystem
    * and updates the local cache with it.
@@ -298,23 +254,14 @@ public void add(HTableDescriptor htd) throws IOException {
     if (fsreadonly) {
       throw new NotImplementedException("Cannot add a table descriptor - in read only mode");
     }
-    TableName tableName = htd.getTableName();
-    if (TableName.META_TABLE_NAME.equals(tableName)) {
+    if (TableName.META_TABLE_NAME.equals(htd.getTableName())) {
       throw new NotImplementedException();
     }
-    if (HConstants.HBASE_NON_USER_TABLE_DIRS.contains(tableName.getNameAsString())) {
+    if (HConstants.HBASE_NON_USER_TABLE_DIRS.contains(htd.getTableName().getNameAsString())) {
       throw new NotImplementedException(
-          "Cannot add a table descriptor for a reserved subdirectory name: "
-              + htd.getNameAsString());
+        "Cannot add a table descriptor for a reserved subdirectory name: " + htd.getNameAsString());
     }
-    TableDescriptor descriptor = getDescriptor(htd.getTableName());
-    if (descriptor == null) {
-      descriptor = new TableDescriptor(htd);
-    }
-    else {
-      descriptor.setHTableDescriptor(htd);
-    }
-    updateTableDescriptor(descriptor);
+    updateTableDescriptor(htd);
   }
 
   /**
@@ -334,11 +281,11 @@ public HTableDescriptor remove(final TableName tablename)
         throw new IOException("Failed delete of " + tabledir.toString());
       }
     }
-    TableDescriptor descriptor = this.cache.remove(tablename);
+    HTableDescriptor descriptor = this.cache.remove(tablename);
     if (descriptor == null) {
       return null;
     } else {
-      return descriptor.getHTableDescriptor();
+      return descriptor;
     }
   }
 
@@ -521,8 +468,8 @@ static String getTableInfoFileName(final int sequenceid) {
    * if it exists, bypassing the local cache.
    * Returns null if it's not found.
    */
-  public static TableDescriptor getTableDescriptorFromFs(FileSystem fs,
-      Path hbaseRootDir, TableName tableName) throws IOException {
+  public static HTableDescriptor getTableDescriptorFromFs(FileSystem fs,
+    Path hbaseRootDir, TableName tableName) throws IOException {
     Path tableDir = FSUtils.getTableDir(hbaseRootDir, tableName);
     return getTableDescriptorFromFs(fs, tableDir);
   }
@@ -532,16 +479,37 @@ public static TableDescriptor getTableDescriptorFromFs(FileSystem fs,
    * directly from the file system if it exists.
    * @throws TableInfoMissingException if there is no descriptor
    */
-  public static TableDescriptor getTableDescriptorFromFs(FileSystem fs, Path tableDir)
+  public static HTableDescriptor getTableDescriptorFromFs(FileSystem fs,
+    Path hbaseRootDir, TableName tableName, boolean rewritePb) throws IOException {
+    Path tableDir = FSUtils.getTableDir(hbaseRootDir, tableName);
+    return getTableDescriptorFromFs(fs, tableDir, rewritePb);
+  }
+  /**
+   * Returns the latest table descriptor for the table located at the given directory
+   * directly from the file system if it exists.
+   * @throws TableInfoMissingException if there is no descriptor
+   */
+  public static HTableDescriptor getTableDescriptorFromFs(FileSystem fs, Path tableDir)
     throws IOException {
+    return getTableDescriptorFromFs(fs, tableDir, false);
+  }
+
+  /**
+   * Returns the latest table descriptor for the table located at the given directory
+   * directly from the file system if it exists.
+   * @throws TableInfoMissingException if there is no descriptor
+   */
+  public static HTableDescriptor getTableDescriptorFromFs(FileSystem fs, Path tableDir,
+    boolean rewritePb)
+  throws IOException {
     FileStatus status = getTableInfoPath(fs, tableDir, false);
     if (status == null) {
       throw new TableInfoMissingException("No table descriptor file under " + tableDir);
     }
-    return readTableDescriptor(fs, status, false);
+    return readTableDescriptor(fs, status, rewritePb);
   }
 
-  private static TableDescriptor readTableDescriptor(FileSystem fs, FileStatus status,
+  private static HTableDescriptor readTableDescriptor(FileSystem fs, FileStatus status,
       boolean rewritePb) throws IOException {
     int len = Ints.checkedCast(status.getLen());
     byte [] content = new byte[len];
@@ -551,32 +519,30 @@ private static TableDescriptor readTableDescriptor(FileSystem fs, FileStatus sta
     } finally {
       fsDataInputStream.close();
     }
-    TableDescriptor td = null;
+    HTableDescriptor htd = null;
     try {
-      td = TableDescriptor.parseFrom(content);
+      htd = HTableDescriptor.parseFrom(content);
     } catch (DeserializationException e) {
       // we have old HTableDescriptor here
       try {
         HTableDescriptor ohtd = HTableDescriptor.parseFrom(content);
         LOG.warn("Found old table descriptor, converting to new format for table " +
           ohtd.getTableName());
-        td = new TableDescriptor(ohtd);
-        if (rewritePb) {
-          rewriteTableDescriptor(fs, status, td);
-        }
+        htd = new HTableDescriptor(ohtd);
+        if (rewritePb) rewriteTableDescriptor(fs, status, htd);
       } catch (DeserializationException e1) {
         throw new IOException("content=" + Bytes.toShort(content), e1);
       }
     }
     if (rewritePb && !ProtobufUtil.isPBMagicPrefix(content)) {
       // Convert the file over to be pb before leaving here.
-      rewriteTableDescriptor(fs, status, td);
+      rewriteTableDescriptor(fs, status, htd);
     }
-    return td;
+    return htd;
   }
 
   private static void rewriteTableDescriptor(final FileSystem fs, final FileStatus status,
-    final TableDescriptor td)
+    final HTableDescriptor td)
   throws IOException {
     Path tableInfoDir = status.getPath().getParent();
     Path tableDir = tableInfoDir.getParent();
@@ -588,18 +554,16 @@ private static void rewriteTableDescriptor(final FileSystem fs, final FileStatus
    * @throws IOException Thrown if failed update.
    * @throws NotImplementedException if in read only mode
    */
-  Path updateTableDescriptor(TableDescriptor td)
-  throws IOException {
+  Path updateTableDescriptor(HTableDescriptor htd) throws IOException {
     if (fsreadonly) {
       throw new NotImplementedException("Cannot update a table descriptor - in read only mode");
     }
-    TableName tableName = td.getHTableDescriptor().getTableName();
-    Path tableDir = getTableDir(tableName);
-    Path p = writeTableDescriptor(fs, td, tableDir, getTableInfoPath(tableDir));
+    Path tableDir = getTableDir(htd.getTableName());
+    Path p = writeTableDescriptor(fs, htd, tableDir, getTableInfoPath(tableDir));
     if (p == null) throw new IOException("Failed update");
     LOG.info("Updated tableinfo=" + p);
     if (usecache) {
-      this.cache.put(td.getHTableDescriptor().getTableName(), td);
+      this.cache.put(htd.getTableName(), htd);
     }
     return p;
   }
@@ -650,8 +614,9 @@ private static void deleteTableDescriptorFiles(FileSystem fs, Path dir, int maxS
    * @return Descriptor file or null if we failed write.
    */
   private static Path writeTableDescriptor(final FileSystem fs,
-    final TableDescriptor htd, final Path tableDir,
-    final FileStatus currentDescriptorFile) throws IOException {
+    final HTableDescriptor htd, final Path tableDir,
+    final FileStatus currentDescriptorFile)
+  throws IOException {
     // Get temporary dir into which we'll first write a file to avoid half-written file phenomenon.
     // This directory is never removed to avoid removing it out from under a concurrent writer.
     Path tmpTableDir = new Path(tableDir, TMP_DIR);
@@ -680,7 +645,7 @@ private static Path writeTableDescriptor(final FileSystem fs,
       }
       tableInfoDirPath = new Path(tableInfoDir, filename);
       try {
-        writeTD(fs, tempPath, htd);
+        writeHTD(fs, tempPath, htd);
         fs.mkdirs(tableInfoDirPath.getParent());
         if (!fs.rename(tempPath, tableInfoDirPath)) {
           throw new IOException("Failed rename of " + tempPath + " to " + tableInfoDirPath);
@@ -704,7 +669,7 @@ private static Path writeTableDescriptor(final FileSystem fs,
     return tableInfoDirPath;
   }
 
-  private static void writeTD(final FileSystem fs, final Path p, final TableDescriptor htd)
+  private static void writeHTD(final FileSystem fs, final Path p, final HTableDescriptor htd)
   throws IOException {
     FSDataOutputStream out = fs.create(p, false);
     try {
@@ -716,22 +681,13 @@ private static void writeTD(final FileSystem fs, final Path p, final TableDescri
     }
   }
 
-  /**
-   * Create new HTableDescriptor in HDFS. Happens when we are creating table.
-   * Used by tests.
-   * @return True if we successfully created file.
-   */
-  public boolean createTableDescriptor(TableDescriptor htd) throws IOException {
-    return createTableDescriptor(htd, false);
-  }
-
   /**
    * Create new HTableDescriptor in HDFS. Happens when we are creating table.
    * Used by tests.
    * @return True if we successfully created file.
    */
   public boolean createTableDescriptor(HTableDescriptor htd) throws IOException {
-    return createTableDescriptor(new TableDescriptor(htd), false);
+    return createTableDescriptor(htd, false);
   }
 
   /**
@@ -741,9 +697,9 @@ public boolean createTableDescriptor(HTableDescriptor htd) throws IOException {
    *
    * @return True if we successfully created file.
    */
-  public boolean createTableDescriptor(TableDescriptor htd, boolean forceCreation)
+  public boolean createTableDescriptor(HTableDescriptor htd, boolean forceCreation)
   throws IOException {
-    Path tableDir = getTableDir(htd.getHTableDescriptor().getTableName());
+    Path tableDir = getTableDir(htd.getTableName());
     return createTableDescriptorForTableDirectory(tableDir, htd, forceCreation);
   }
 
@@ -759,7 +715,7 @@ public boolean createTableDescriptor(TableDescriptor htd, boolean forceCreation)
    * @throws IOException if a filesystem error occurs
    */
   public boolean createTableDescriptorForTableDirectory(Path tableDir,
-      TableDescriptor htd, boolean forceCreation) throws IOException {
+      HTableDescriptor htd, boolean forceCreation) throws IOException {
     if (fsreadonly) {
       throw new NotImplementedException("Cannot create a table descriptor - in read only mode");
     }
@@ -780,3 +736,4 @@ public boolean createTableDescriptorForTableDirectory(Path tableDir,
   }
 
 }
+
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
index 6ed2b3e75d80..6cb3d204a6b6 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
@@ -71,6 +71,7 @@
 import org.apache.hadoop.hbase.Abortable;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.ClusterStatus;
+import org.apache.hadoop.hbase.CoordinatedStateException;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HBaseInterfaceAudience;
 import org.apache.hadoop.hbase.HColumnDescriptor;
@@ -83,7 +84,6 @@
 import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.RegionLocations;
 import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.TableDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.ZooKeeperConnectionException;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
@@ -107,13 +107,13 @@
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.io.FileLink;
 import org.apache.hadoop.hbase.io.HFileLink;
-import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.io.hfile.CacheConfig;
 import org.apache.hadoop.hbase.io.hfile.HFile;
 import org.apache.hadoop.hbase.master.MasterFileSystem;
 import org.apache.hadoop.hbase.master.RegionState;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService.BlockingInterface;
+import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
 import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
@@ -128,6 +128,9 @@
 import org.apache.hadoop.hbase.util.hbck.TableIntegrityErrorHandlerImpl;
 import org.apache.hadoop.hbase.util.hbck.TableLockChecker;
 import org.apache.hadoop.hbase.wal.WALSplitter;
+import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
+import org.apache.hadoop.hbase.zookeeper.ZKTableStateClientSideReader;
+import org.apache.hadoop.hbase.zookeeper.ZKTableStateManager;
 import org.apache.hadoop.hbase.zookeeper.ZKUtil;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
 import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
@@ -1334,9 +1337,9 @@ private SortedMap loadHdfsRegionInfos()
         modTInfo = new TableInfo(tableName);
         tablesInfo.put(tableName, modTInfo);
         try {
-          TableDescriptor htd =
+          HTableDescriptor htd =
               FSTableDescriptors.getTableDescriptorFromFs(fs, hbaseRoot, tableName);
-          modTInfo.htds.add(htd.getHTableDescriptor());
+          modTInfo.htds.add(htd);
         } catch (IOException ioe) {
           if (!orphanTableDirs.containsKey(tableName)) {
             LOG.warn("Unable to read .tableinfo from " + hbaseRoot, ioe);
@@ -1391,7 +1394,7 @@ private boolean fabricateTableInfo(FSTableDescriptors fstd, TableName tableName,
     for (String columnfamimly : columns) {
       htd.addFamily(new HColumnDescriptor(columnfamimly));
     }
-    fstd.createTableDescriptor(new TableDescriptor(htd, TableState.State.ENABLED), true);
+    fstd.createTableDescriptor(htd, true);
     return true;
   }
 
@@ -1439,7 +1442,7 @@ public void fixOrphanTables() throws IOException {
           if (tableName.equals(htds[j].getTableName())) {
             HTableDescriptor htd = htds[j];
             LOG.info("fixing orphan table: " + tableName + " from cache");
-            fstd.createTableDescriptor(new TableDescriptor(htd, TableState.State.ENABLED), true);
+            fstd.createTableDescriptor(htd, true);
             j++;
             iter.remove();
           }
@@ -1799,16 +1802,19 @@ Path sidelineOldMeta() throws IOException {
    * @throws IOException
    */
   private void loadDisabledTables()
-  throws IOException {
+  throws ZooKeeperConnectionException, IOException {
     HConnectionManager.execute(new HConnectable(getConf()) {
       @Override
       public Void connect(HConnection connection) throws IOException {
-        TableName[] tables = connection.listTableNames();
-        for (TableName table : tables) {
-          if (connection.getTableState(table)
-              .inStates(TableState.State.DISABLED, TableState.State.DISABLING)) {
-            disabledTables.add(table);
+        try {
+          for (TableName tableName :
+              ZKTableStateClientSideReader.getDisabledOrDisablingTables(zkw)) {
+            disabledTables.add(tableName);
           }
+        } catch (KeeperException ke) {
+          throw new IOException(ke);
+        } catch (InterruptedException e) {
+          throw new InterruptedIOException();
         }
         return null;
       }
@@ -3540,15 +3546,12 @@ private void checkAndFixReplication() throws IOException {
   /**
    * Check whether a orphaned table ZNode exists and fix it if requested.
    * @throws IOException
+   * @throws KeeperException
+   * @throws InterruptedException
    */
   private void checkAndFixOrphanedTableZNodes()
-      throws IOException {
-    Set enablingTables = new HashSet<>();
-    for (TableName tableName: admin.listTableNames()) {
-      if (connection.getTableState(tableName).getState().equals(TableState.State.ENABLING)) {
-        enablingTables.add(tableName);
-      }
-    }
+      throws IOException, KeeperException, InterruptedException {
+    Set enablingTables = ZKTableStateClientSideReader.getEnablingTables(zkw);
     String msg;
     TableInfo tableInfo;
 
@@ -3567,12 +3570,21 @@ private void checkAndFixOrphanedTableZNodes()
     }
 
     if (orphanedTableZNodes.size() > 0 && this.fixTableZNodes) {
+      ZKTableStateManager zkTableStateMgr = new ZKTableStateManager(zkw);
+
       for (TableName tableName : orphanedTableZNodes) {
-        // Set the table state to be disabled so that if we made mistake, we can trace
-        // the history and figure it out.
-        // Another choice is to call checkAndRemoveTableState() to delete the orphaned ZNode.
-        // Both approaches works.
-        admin.disableTable(tableName);
+        try {
+          // Set the table state to be disabled so that if we made mistake, we can trace
+          // the history and figure it out.
+          // Another choice is to call checkAndRemoveTableState() to delete the orphaned ZNode.
+          // Both approaches works.
+          zkTableStateMgr.setTableState(tableName, ZooKeeperProtos.Table.State.DISABLED);
+        } catch (CoordinatedStateException e) {
+          // This exception should not happen here
+          LOG.error(
+            "Got a CoordinatedStateException while fixing the ENABLING table znode " + tableName,
+            e);
+        }
       }
     }
   }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java
index 02b598066884..7f2c85db060d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java
@@ -155,8 +155,7 @@ protected Merger(Configuration conf, FileSystem fs, final TableName tableName)
 
       this.rootDir = FSUtils.getRootDir(conf);
       Path tabledir = FSUtils.getTableDir(this.rootDir, tableName);
-      this.htd = FSTableDescriptors.getTableDescriptorFromFs(this.fs, tabledir)
-          .getHTableDescriptor();
+      this.htd = FSTableDescriptors.getTableDescriptorFromFs(this.fs, tabledir);
       String logname = "merge_" + System.currentTimeMillis() + HConstants.HREGION_LOGDIR_NAME;
 
       final Configuration walConf = new Configuration(conf);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/Merge.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/Merge.java
index 1530d2845dc9..adab20351284 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/Merge.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/Merge.java
@@ -29,7 +29,6 @@
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HBaseInterfaceAudience;
-import org.apache.hadoop.hbase.TableDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HConstants;
@@ -154,9 +153,9 @@ private void mergeTwoRegions() throws IOException {
     if (info2 == null) {
       throw new NullPointerException("info2 is null using key " + meta);
     }
-    TableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(FileSystem.get(getConf()),
+    HTableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(FileSystem.get(getConf()),
       this.rootdir, this.tableName);
-    HRegion merged = merge(htd.getHTableDescriptor(), meta, info1, info2);
+    HRegion merged = merge(htd, meta, info1, info2);
 
     LOG.info("Adding " + merged.getRegionInfo() + " to " +
         meta.getRegionInfo());
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ZKDataMigrator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ZKDataMigrator.java
index 82308be78b29..57ec87d8e48e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ZKDataMigrator.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ZKDataMigrator.java
@@ -18,11 +18,8 @@
 package org.apache.hadoop.hbase.util;
 
 import java.io.IOException;
-import java.util.HashMap;
 import java.util.List;
-import java.util.Map;
 
-import com.google.protobuf.InvalidProtocolBufferException;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -30,9 +27,6 @@
 import org.apache.hadoop.hbase.Abortable;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.TableState;
-import org.apache.hadoop.hbase.exceptions.DeserializationException;
 import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
@@ -159,9 +153,8 @@ private void checkAndMigrateTableStatesToPB(ZooKeeperWatcher zkw) throws KeeperE
       }
       byte[] data = ZKUtil.getData(zkw, znode);
       if (ProtobufUtil.isPBMagicPrefix(data)) continue;
-      ZooKeeperProtos.DeprecatedTableState.Builder builder =
-          ZooKeeperProtos.DeprecatedTableState.newBuilder();
-      builder.setState(ZooKeeperProtos.DeprecatedTableState.State.valueOf(Bytes.toString(data)));
+      ZooKeeperProtos.Table.Builder builder = ZooKeeperProtos.Table.newBuilder();
+      builder.setState(ZooKeeperProtos.Table.State.valueOf(Bytes.toString(data)));
       data = ProtobufUtil.prependPBMagic(builder.build().toByteArray());
       ZKUtil.setData(zkw, znode, data);
     }
@@ -239,14 +232,15 @@ private void checkAndMigratePeerZnodesToPB(ZooKeeperWatcher zkw, String znode,
   }
 
   private void migrateClusterKeyToPB(ZooKeeperWatcher zkw, String peerZnode, byte[] data)
-      throws KeeperException {
+      throws KeeperException, NoNodeException {
     ReplicationPeer peer = ZooKeeperProtos.ReplicationPeer.newBuilder()
         .setClusterkey(Bytes.toString(data)).build();
     ZKUtil.setData(zkw, peerZnode, ProtobufUtil.prependPBMagic(peer.toByteArray()));
   }
 
   private void migratePeerStateToPB(ZooKeeperWatcher zkw, byte[] data,
-     String peerStatePath) throws KeeperException {
+ String peerStatePath)
+      throws KeeperException, NoNodeException {
     String state = Bytes.toString(data);
     if (ZooKeeperProtos.ReplicationState.State.ENABLED.name().equals(state)) {
       ZKUtil.setData(zkw, peerStatePath, ReplicationStateZKBase.ENABLED_ZNODE_BYTES);
@@ -255,80 +249,6 @@ private void migratePeerStateToPB(ZooKeeperWatcher zkw, byte[] data,
     }
   }
 
-  /**
-   * Method for table states migration.
-   * Reading state from zk, applying them to internal state
-   * and delete.
-   * Used by master to clean migration from zk based states to
-   * table descriptor based states.
-   */
-  @Deprecated
-  public static Map queryForTableStates(ZooKeeperWatcher zkw)
-      throws KeeperException, InterruptedException {
-    Map rv = new HashMap<>();
-    List children = ZKUtil.listChildrenNoWatch(zkw, zkw.tableZNode);
-    if (children == null) {
-      return rv;
-    }
-    for (String child: children) {
-      TableName tableName = TableName.valueOf(child);
-      ZooKeeperProtos.DeprecatedTableState.State state = getTableState(zkw, tableName);
-      TableState.State newState = TableState.State.ENABLED;
-      if (state != null) {
-        switch (state) {
-          case ENABLED:
-            newState = TableState.State.ENABLED;
-            break;
-          case DISABLED:
-            newState = TableState.State.DISABLED;
-            break;
-          case DISABLING:
-            newState = TableState.State.DISABLING;
-            break;
-          case ENABLING:
-            newState = TableState.State.ENABLING;
-            break;
-          default:
-        }
-      }
-      rv.put(tableName, newState);
-    }
-    return rv;
-  }
-
-  /**
-   * Gets table state from ZK.
-   * @param zkw ZooKeeperWatcher instance to use
-   * @param tableName table we're checking
-   * @return Null or {@link ZooKeeperProtos.DeprecatedTableState.State} found in znode.
-   * @throws KeeperException
-   */
-  @Deprecated
-  private static  ZooKeeperProtos.DeprecatedTableState.State getTableState(
-      final ZooKeeperWatcher zkw, final TableName tableName)
-      throws KeeperException, InterruptedException {
-    String znode = ZKUtil.joinZNode(zkw.tableZNode, tableName.getNameAsString());
-    byte [] data = ZKUtil.getData(zkw, znode);
-    if (data == null || data.length <= 0) {
-      return null;
-    }
-    try {
-      ProtobufUtil.expectPBMagicPrefix(data);
-      ZooKeeperProtos.DeprecatedTableState.Builder builder =
-          ZooKeeperProtos.DeprecatedTableState.newBuilder();
-      int magicLen = ProtobufUtil.lengthOfPBMagic();
-      ZooKeeperProtos.DeprecatedTableState t = builder.mergeFrom(data,
-          magicLen, data.length - magicLen).build();
-      return t.getState();
-    } catch (InvalidProtocolBufferException e) {
-      KeeperException ke = new KeeperException.DataInconsistencyException();
-      ke.initCause(e);
-      throw ke;
-    } catch (DeserializationException e) {
-      throw ZKUtil.convert(e);
-    }
-  }
-
   public static void main(String args[]) throws Exception {
     System.exit(ToolRunner.run(HBaseConfiguration.create(), new ZKDataMigrator(), args));
   }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
index aa6a4c170eab..d6c181315543 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
@@ -18,10 +18,6 @@
  */
 package org.apache.hadoop.hbase.wal;
 
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-import com.google.protobuf.ServiceException;
-import com.google.protobuf.TextFormat;
 import java.io.EOFException;
 import java.io.FileNotFoundException;
 import java.io.IOException;
@@ -53,6 +49,7 @@
 import java.util.concurrent.atomic.AtomicReference;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -64,6 +61,7 @@
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellScanner;
 import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.CoordinatedStateException;
 import org.apache.hadoop.hbase.CoordinatedStateManager;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HConstants;
@@ -73,6 +71,7 @@
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.TableNotFoundException;
+import org.apache.hadoop.hbase.TableStateManager;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.client.ConnectionUtils;
 import org.apache.hadoop.hbase.client.Delete;
@@ -83,7 +82,6 @@
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.coordination.BaseCoordinatedStateManager;
 import org.apache.hadoop.hbase.coordination.ZKSplitLogManagerCoordination;
-import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.exceptions.RegionOpeningException;
 import org.apache.hadoop.hbase.io.HeapSize;
 import org.apache.hadoop.hbase.master.SplitLogManager;
@@ -100,6 +98,7 @@
 import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
 import org.apache.hadoop.hbase.protobuf.generated.WALProtos.CompactionDescriptor;
+import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
 import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.LastSequenceId;
@@ -122,6 +121,11 @@
 import org.apache.hadoop.hbase.zookeeper.ZKSplitLog;
 import org.apache.hadoop.io.MultipleIOException;
 
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Lists;
+import com.google.protobuf.ServiceException;
+import com.google.protobuf.TextFormat;
+
 /**
  * This class is responsible for splitting up a bunch of regionserver commit log
  * files that are no longer being written to, into new files, one per region for
@@ -327,14 +331,13 @@ boolean splitLogFile(FileStatus logfile, CancelableProgressable reporter) throws
         LOG.warn("Nothing to split in log file " + logPath);
         return true;
       }
-      if(csm != null) {
-        HConnection scc = csm.getServer().getConnection();
-        TableName[] tables = scc.listTableNames();
-        for (TableName table : tables) {
-          if (scc.getTableState(table)
-              .inStates(TableState.State.DISABLED, TableState.State.DISABLING)) {
-            disablingOrDisabledTables.add(table);
-          }
+      if (csm != null) {
+        try {
+          TableStateManager tsm = csm.getTableStateManager();
+          disablingOrDisabledTables = tsm.getTablesInStates(
+          ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING);
+        } catch (CoordinatedStateException e) {
+          throw new IOException("Can't get disabling/disabled tables", e);
         }
       }
       int numOpenedFilesBeforeReporting = conf.getInt("hbase.splitlog.report.openedfiles", 3);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKTableStateManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKTableStateManager.java
new file mode 100644
index 000000000000..db00c14ce23a
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKTableStateManager.java
@@ -0,0 +1,369 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.zookeeper;
+
+import java.io.IOException;
+import java.io.InterruptedIOException;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.CoordinatedStateException;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.TableStateManager;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.exceptions.DeserializationException;
+import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
+import org.apache.zookeeper.KeeperException;
+
+/**
+ * Implementation of TableStateManager which reads, caches and sets state
+ * up in ZooKeeper.  If multiple read/write clients, will make for confusion.
+ * Code running on client side without consensus context should use
+ * {@link ZKTableStateClientSideReader} instead.
+ *
+ * 

To save on trips to the zookeeper ensemble, internally we cache table + * state. + */ +@InterfaceAudience.Private +public class ZKTableStateManager implements TableStateManager { + // A znode will exist under the table directory if it is in any of the + // following states: {@link TableState#ENABLING} , {@link TableState#DISABLING}, + // or {@link TableState#DISABLED}. If {@link TableState#ENABLED}, there will + // be no entry for a table in zk. Thats how it currently works. + + private static final Log LOG = LogFactory.getLog(ZKTableStateManager.class); + private final ZooKeeperWatcher watcher; + + /** + * Cache of what we found in zookeeper so we don't have to go to zk ensemble + * for every query. Synchronize access rather than use concurrent Map because + * synchronization needs to span query of zk. + */ + private final Map cache = + new HashMap(); + + public ZKTableStateManager(final ZooKeeperWatcher zkw) throws KeeperException, + InterruptedException { + super(); + this.watcher = zkw; + populateTableStates(); + } + + /** + * Gets a list of all the tables set as disabled in zookeeper. + * @throws KeeperException, InterruptedException + */ + private void populateTableStates() throws KeeperException, InterruptedException { + synchronized (this.cache) { + List children = ZKUtil.listChildrenNoWatch(this.watcher, this.watcher.tableZNode); + if (children == null) return; + for (String child: children) { + TableName tableName = TableName.valueOf(child); + ZooKeeperProtos.Table.State state = getTableState(this.watcher, tableName); + if (state != null) this.cache.put(tableName, state); + } + } + } + + /** + * Sets table state in ZK. Sets no watches. + * + * {@inheritDoc} + */ + @Override + public void setTableState(TableName tableName, ZooKeeperProtos.Table.State state) + throws CoordinatedStateException { + synchronized (this.cache) { + LOG.info("Moving table " + tableName + " state from " + this.cache.get(tableName) + + " to " + state); + try { + setTableStateInZK(tableName, state); + } catch (KeeperException e) { + throw new CoordinatedStateException(e); + } + } + } + + /** + * Checks and sets table state in ZK. Sets no watches. + * {@inheritDoc} + */ + @Override + public boolean setTableStateIfInStates(TableName tableName, + ZooKeeperProtos.Table.State newState, + ZooKeeperProtos.Table.State... states) + throws CoordinatedStateException { + synchronized (this.cache) { + // Transition ENABLED->DISABLING has to be performed with a hack, because + // we treat empty state as enabled in this case because 0.92- clusters. + if ( + (newState == ZooKeeperProtos.Table.State.DISABLING) && + this.cache.get(tableName) != null && !isTableState(tableName, states) || + (newState != ZooKeeperProtos.Table.State.DISABLING && + !isTableState(tableName, states) )) { + return false; + } + try { + setTableStateInZK(tableName, newState); + } catch (KeeperException e) { + throw new CoordinatedStateException(e); + } + return true; + } + } + + /** + * Checks and sets table state in ZK. Sets no watches. + * {@inheritDoc} + */ + @Override + public boolean setTableStateIfNotInStates(TableName tableName, + ZooKeeperProtos.Table.State newState, + ZooKeeperProtos.Table.State... states) + throws CoordinatedStateException { + synchronized (this.cache) { + if (isTableState(tableName, states)) { + // If the table is in the one of the states from the states list, the cache + // might be out-of-date, try to find it out from the master source (zookeeper server). + // + // Note: this adds extra zookeeper server calls and might have performance impact. + // However, this is not the happy path so we should not reach here often. Therefore, + // the performance impact should be minimal to none. + try { + ZooKeeperProtos.Table.State curstate = getTableState(watcher, tableName); + + if (isTableInState(Arrays.asList(states), curstate)) { + return false; + } + } catch (KeeperException e) { + throw new CoordinatedStateException(e); + } catch (InterruptedException e) { + throw new CoordinatedStateException(e); + } + } + try { + setTableStateInZK(tableName, newState); + } catch (KeeperException e) { + throw new CoordinatedStateException(e); + } + return true; + } + } + + private void setTableStateInZK(final TableName tableName, + final ZooKeeperProtos.Table.State state) + throws KeeperException { + String znode = ZKUtil.joinZNode(this.watcher.tableZNode, tableName.getNameAsString()); + if (ZKUtil.checkExists(this.watcher, znode) == -1) { + ZKUtil.createAndFailSilent(this.watcher, znode); + } + synchronized (this.cache) { + ZooKeeperProtos.Table.Builder builder = ZooKeeperProtos.Table.newBuilder(); + builder.setState(state); + byte [] data = ProtobufUtil.prependPBMagic(builder.build().toByteArray()); + ZKUtil.setData(this.watcher, znode, data); + this.cache.put(tableName, state); + } + } + + /** + * Checks if table is marked in specified state in ZK (using cache only). {@inheritDoc} + */ + @Override + public boolean isTableState(final TableName tableName, + final ZooKeeperProtos.Table.State... states) { + return isTableState(tableName, false, states); // only check cache + } + + /** + * Checks if table is marked in specified state in ZK. {@inheritDoc} + */ + @Override + public boolean isTableState(final TableName tableName, final boolean checkSource, + final ZooKeeperProtos.Table.State... states) { + boolean isTableInSpecifiedState; + synchronized (this.cache) { + ZooKeeperProtos.Table.State currentState = this.cache.get(tableName); + if (checkSource) { + // The cache might be out-of-date, try to find it out from the master source (zookeeper + // server) and update the cache. + try { + ZooKeeperProtos.Table.State stateInZK = getTableState(watcher, tableName); + + if (currentState != stateInZK) { + if (stateInZK != null) { + this.cache.put(tableName, stateInZK); + } else { + this.cache.remove(tableName); + } + currentState = stateInZK; + } + } catch (KeeperException | InterruptedException e) { + // Contacting zookeeper failed. Let us just trust the value in cache. + } + } + return isTableInState(Arrays.asList(states), currentState); + } + } + + /** + * Deletes the table in zookeeper. Fails silently if the table is not currently disabled in + * zookeeper. Sets no watches. {@inheritDoc} + */ + @Override + public void setDeletedTable(final TableName tableName) + throws CoordinatedStateException { + synchronized (this.cache) { + if (this.cache.remove(tableName) == null) { + LOG.warn("Moving table " + tableName + " state to deleted but was already deleted"); + } + try { + ZKUtil.deleteNodeFailSilent(this.watcher, + ZKUtil.joinZNode(this.watcher.tableZNode, tableName.getNameAsString())); + } catch (KeeperException e) { + throw new CoordinatedStateException(e); + } + } + } + + /** + * check if table is present. + * + * @param tableName table we're working on + * @return true if the table is present + */ + @Override + public boolean isTablePresent(final TableName tableName) { + synchronized (this.cache) { + ZooKeeperProtos.Table.State state = this.cache.get(tableName); + return !(state == null); + } + } + + /** + * Gets a list of all the tables set as disabling in zookeeper. + * @return Set of disabling tables, empty Set if none + * @throws CoordinatedStateException if error happened in underlying coordination engine + */ + @Override + public Set getTablesInStates(ZooKeeperProtos.Table.State... states) + throws InterruptedIOException, CoordinatedStateException { + try { + return getAllTables(states); + } catch (KeeperException e) { + throw new CoordinatedStateException(e); + } + } + + /** + * {@inheritDoc} + */ + @Override + public void checkAndRemoveTableState(TableName tableName, ZooKeeperProtos.Table.State states, + boolean deletePermanentState) + throws CoordinatedStateException { + synchronized (this.cache) { + if (isTableState(tableName, states)) { + this.cache.remove(tableName); + if (deletePermanentState) { + try { + ZKUtil.deleteNodeFailSilent(this.watcher, + ZKUtil.joinZNode(this.watcher.tableZNode, tableName.getNameAsString())); + } catch (KeeperException e) { + throw new CoordinatedStateException(e); + } + } + } + } + } + + /** + * Gets a list of all the tables of specified states in zookeeper. + * @return Set of tables of specified states, empty Set if none + * @throws KeeperException + */ + Set getAllTables(final ZooKeeperProtos.Table.State... states) + throws KeeperException, InterruptedIOException { + + Set allTables = new HashSet(); + List children = + ZKUtil.listChildrenNoWatch(watcher, watcher.tableZNode); + if(children == null) return allTables; + for (String child: children) { + TableName tableName = TableName.valueOf(child); + ZooKeeperProtos.Table.State state; + try { + state = getTableState(watcher, tableName); + } catch (InterruptedException e) { + throw new InterruptedIOException(); + } + for (ZooKeeperProtos.Table.State expectedState: states) { + if (state == expectedState) { + allTables.add(tableName); + break; + } + } + } + return allTables; + } + + /** + * Gets table state from ZK. + * @param zkw ZooKeeperWatcher instance to use + * @param tableName table we're checking + * @return Null or {@link ZooKeeperProtos.Table.State} found in znode. + * @throws KeeperException + */ + private ZooKeeperProtos.Table.State getTableState(final ZooKeeperWatcher zkw, + final TableName tableName) + throws KeeperException, InterruptedException { + String znode = ZKUtil.joinZNode(zkw.tableZNode, tableName.getNameAsString()); + byte [] data = ZKUtil.getData(zkw, znode); + if (data == null || data.length <= 0) return null; + try { + ProtobufUtil.expectPBMagicPrefix(data); + ZooKeeperProtos.Table.Builder builder = ZooKeeperProtos.Table.newBuilder(); + int magicLen = ProtobufUtil.lengthOfPBMagic(); + ProtobufUtil.mergeFrom(builder, data, magicLen, data.length - magicLen); + return builder.getState(); + } catch (IOException e) { + KeeperException ke = new KeeperException.DataInconsistencyException(); + ke.initCause(e); + throw ke; + } catch (DeserializationException e) { + throw ZKUtil.convert(e); + } + } + + /** + * @return true if current state isn't null and is contained + * in the list of expected states. + */ + private boolean isTableInState(final List expectedStates, + final ZooKeeperProtos.Table.State currentState) { + return currentState != null && expectedStates.contains(currentState); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java index aa355a157db7..6eee5aa66325 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java @@ -3395,7 +3395,6 @@ public void waitTableDisabled(Admin admin, byte[] table, long timeoutMillis) } } - /** * Make sure that at least the specified number of region servers * are running diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestDrainingServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestDrainingServer.java index 5b7ba496b360..946b812ed234 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestDrainingServer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestDrainingServer.java @@ -32,7 +32,6 @@ import org.apache.hadoop.hbase.master.RegionPlan; import org.apache.hadoop.hbase.master.RegionState; import org.apache.hadoop.hbase.master.ServerManager; -import org.apache.hadoop.hbase.master.TableStateManager; import org.apache.hadoop.hbase.master.balancer.LoadBalancerFactory; import org.apache.hadoop.hbase.regionserver.RegionOpeningState; import org.apache.hadoop.hbase.testclassification.MediumTests; @@ -55,7 +54,6 @@ import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotEquals; import static org.junit.Assert.assertTrue; -import static org.mockito.Mockito.mock; /** @@ -100,72 +98,70 @@ public void testAssignmentManagerDoesntUseDrainingServer() throws Exception { final HRegionInfo REGIONINFO = new HRegionInfo(TableName.valueOf("table_test"), HConstants.EMPTY_START_ROW, HConstants.EMPTY_START_ROW); - try (ZooKeeperWatcher zkWatcher = new ZooKeeperWatcher(TEST_UTIL.getConfiguration(), - "zkWatcher-Test", abortable, true)) { + ZooKeeperWatcher zkWatcher = new ZooKeeperWatcher(TEST_UTIL.getConfiguration(), + "zkWatcher-Test", abortable, true); - Map onlineServers = new HashMap(); + Map onlineServers = new HashMap(); - onlineServers.put(SERVERNAME_A, ServerLoad.EMPTY_SERVERLOAD); - onlineServers.put(SERVERNAME_B, ServerLoad.EMPTY_SERVERLOAD); + onlineServers.put(SERVERNAME_A, ServerLoad.EMPTY_SERVERLOAD); + onlineServers.put(SERVERNAME_B, ServerLoad.EMPTY_SERVERLOAD); - Mockito.when(server.getConfiguration()).thenReturn(conf); - Mockito.when(server.getServerName()).thenReturn(ServerName.valueOf("masterMock,1,1")); - Mockito.when(server.getZooKeeper()).thenReturn(zkWatcher); - Mockito.when(server.getRegionServerVersion(Mockito.any(ServerName.class))).thenReturn("0.0.0"); + Mockito.when(server.getConfiguration()).thenReturn(conf); + Mockito.when(server.getServerName()).thenReturn(ServerName.valueOf("masterMock,1,1")); + Mockito.when(server.getZooKeeper()).thenReturn(zkWatcher); + Mockito.when(server.getRegionServerVersion(Mockito.any(ServerName.class))).thenReturn("0.0.0"); - CoordinatedStateManager cp = new ZkCoordinatedStateManager(); - cp.initialize(server); - cp.start(); + CoordinatedStateManager cp = new ZkCoordinatedStateManager(); + cp.initialize(server); + cp.start(); - Mockito.when(server.getCoordinatedStateManager()).thenReturn(cp); + Mockito.when(server.getCoordinatedStateManager()).thenReturn(cp); - Mockito.when(serverManager.getOnlineServers()).thenReturn(onlineServers); - Mockito.when(serverManager.getOnlineServersList()) - .thenReturn(new ArrayList(onlineServers.keySet())); + Mockito.when(serverManager.getOnlineServers()).thenReturn(onlineServers); + Mockito.when(serverManager.getOnlineServersList()) + .thenReturn(new ArrayList(onlineServers.keySet())); - Mockito.when(serverManager.createDestinationServersList()) - .thenReturn(new ArrayList(onlineServers.keySet())); - Mockito.when(serverManager.createDestinationServersList(null)) - .thenReturn(new ArrayList(onlineServers.keySet())); - Mockito.when(serverManager.createDestinationServersList(Mockito.anyList())).thenReturn( - new ArrayList(onlineServers.keySet())); + Mockito.when(serverManager.createDestinationServersList()) + .thenReturn(new ArrayList(onlineServers.keySet())); + Mockito.when(serverManager.createDestinationServersList(null)) + .thenReturn(new ArrayList(onlineServers.keySet())); + Mockito.when(serverManager.createDestinationServersList(Mockito.anyList())).thenReturn( + new ArrayList(onlineServers.keySet())); - for (ServerName sn : onlineServers.keySet()) { - Mockito.when(serverManager.isServerOnline(sn)).thenReturn(true); - Mockito.when(serverManager.sendRegionClose(sn, REGIONINFO, -1)).thenReturn(true); - Mockito.when(serverManager.sendRegionClose(sn, REGIONINFO, -1, null, false)).thenReturn(true); - Mockito.when(serverManager.sendRegionOpen(sn, REGIONINFO, -1, new ArrayList())) - .thenReturn(RegionOpeningState.OPENED); - Mockito.when(serverManager.sendRegionOpen(sn, REGIONINFO, -1, null)) - .thenReturn(RegionOpeningState.OPENED); - Mockito.when(serverManager.addServerToDrainList(sn)).thenReturn(true); - } + for (ServerName sn : onlineServers.keySet()) { + Mockito.when(serverManager.isServerOnline(sn)).thenReturn(true); + Mockito.when(serverManager.sendRegionClose(sn, REGIONINFO, -1)).thenReturn(true); + Mockito.when(serverManager.sendRegionClose(sn, REGIONINFO, -1, null, false)).thenReturn(true); + Mockito.when(serverManager.sendRegionOpen(sn, REGIONINFO, -1, new ArrayList())) + .thenReturn(RegionOpeningState.OPENED); + Mockito.when(serverManager.sendRegionOpen(sn, REGIONINFO, -1, null)) + .thenReturn(RegionOpeningState.OPENED); + Mockito.when(serverManager.addServerToDrainList(sn)).thenReturn(true); + } - Mockito.when(master.getServerManager()).thenReturn(serverManager); + Mockito.when(master.getServerManager()).thenReturn(serverManager); - TableStateManager tsm = mock(TableStateManager.class); - am = new AssignmentManager(server, serverManager, - balancer, startupMasterExecutor("mockExecutorService"), null, null, tsm); + am = new AssignmentManager(server, serverManager, + balancer, startupMasterExecutor("mockExecutorService"), null, null); - Mockito.when(master.getAssignmentManager()).thenReturn(am); - Mockito.when(master.getZooKeeper()).thenReturn(zkWatcher); + Mockito.when(master.getAssignmentManager()).thenReturn(am); + Mockito.when(master.getZooKeeper()).thenReturn(zkWatcher); - am.addPlan(REGIONINFO.getEncodedName(), new RegionPlan(REGIONINFO, null, SERVERNAME_A)); + am.addPlan(REGIONINFO.getEncodedName(), new RegionPlan(REGIONINFO, null, SERVERNAME_A)); - zkWatcher.registerListenerFirst(am); + zkWatcher.registerListenerFirst(am); - addServerToDrainedList(SERVERNAME_A, onlineServers, serverManager); + addServerToDrainedList(SERVERNAME_A, onlineServers, serverManager); - am.assign(REGIONINFO, true); + am.assign(REGIONINFO, true); - setRegionOpenedOnZK(zkWatcher, SERVERNAME_A, REGIONINFO); - setRegionOpenedOnZK(zkWatcher, SERVERNAME_B, REGIONINFO); + setRegionOpenedOnZK(zkWatcher, SERVERNAME_A, REGIONINFO); + setRegionOpenedOnZK(zkWatcher, SERVERNAME_B, REGIONINFO); - am.waitForAssignment(REGIONINFO); + am.waitForAssignment(REGIONINFO); - assertTrue(am.getRegionStates().isRegionOnline(REGIONINFO)); - assertNotEquals(am.getRegionStates().getRegionServerOfRegion(REGIONINFO), SERVERNAME_A); - } + assertTrue(am.getRegionStates().isRegionOnline(REGIONINFO)); + assertNotEquals(am.getRegionStates().getRegionServerOfRegion(REGIONINFO), SERVERNAME_A); } @Test @@ -211,82 +207,80 @@ public void testAssignmentManagerDoesntUseDrainedServerWithBulkAssign() throws E bulk.put(REGIONINFO_D, SERVERNAME_D); bulk.put(REGIONINFO_E, SERVERNAME_E); - try (ZooKeeperWatcher zkWatcher = new ZooKeeperWatcher(TEST_UTIL.getConfiguration(), - "zkWatcher-BulkAssignTest", abortable, true)) { - - Mockito.when(server.getConfiguration()).thenReturn(conf); - Mockito.when(server.getServerName()).thenReturn(ServerName.valueOf("masterMock,1,1")); - Mockito.when(server.getZooKeeper()).thenReturn(zkWatcher); - - CoordinatedStateManager cp = new ZkCoordinatedStateManager(); - cp.initialize(server); - cp.start(); - - Mockito.when(server.getCoordinatedStateManager()).thenReturn(cp); - - Mockito.when(serverManager.getOnlineServers()).thenReturn(onlineServers); - Mockito.when(serverManager.getOnlineServersList()).thenReturn( - new ArrayList(onlineServers.keySet())); - - Mockito.when(serverManager.createDestinationServersList()).thenReturn( - new ArrayList(onlineServers.keySet())); - Mockito.when(serverManager.createDestinationServersList(null)).thenReturn( - new ArrayList(onlineServers.keySet())); - Mockito.when(serverManager.createDestinationServersList(Mockito.anyList())).thenReturn( - new ArrayList(onlineServers.keySet())); - - for (Entry entry : bulk.entrySet()) { - Mockito.when(serverManager.isServerOnline(entry.getValue())).thenReturn(true); - Mockito.when(serverManager.sendRegionClose(entry.getValue(), - entry.getKey(), -1)).thenReturn(true); - Mockito.when(serverManager.sendRegionOpen(entry.getValue(), - entry.getKey(), -1, null)).thenReturn(RegionOpeningState.OPENED); - Mockito.when(serverManager.addServerToDrainList(entry.getValue())).thenReturn(true); - } - - Mockito.when(master.getServerManager()).thenReturn(serverManager); - - drainedServers.add(SERVERNAME_A); - drainedServers.add(SERVERNAME_B); - drainedServers.add(SERVERNAME_C); - drainedServers.add(SERVERNAME_D); - - TableStateManager tsm = mock(TableStateManager.class); - am = new AssignmentManager(server, serverManager, balancer, - startupMasterExecutor("mockExecutorServiceBulk"), null, null, tsm); - - Mockito.when(master.getAssignmentManager()).thenReturn(am); - - zkWatcher.registerListener(am); - - for (ServerName drained : drainedServers) { - addServerToDrainedList(drained, onlineServers, serverManager); - } - - am.assign(bulk); - - Set regionsInTransition = am.getRegionStates().getRegionsInTransition(); - for (RegionState rs : regionsInTransition) { - setRegionOpenedOnZK(zkWatcher, rs.getServerName(), rs.getRegion()); - } - - am.waitForAssignment(REGIONINFO_A); - am.waitForAssignment(REGIONINFO_B); - am.waitForAssignment(REGIONINFO_C); - am.waitForAssignment(REGIONINFO_D); - am.waitForAssignment(REGIONINFO_E); - - Map regionAssignments = am.getRegionStates().getRegionAssignments(); - for (Entry entry : regionAssignments.entrySet()) { - LOG.info("Region Assignment: " - + entry.getKey().getRegionNameAsString() + " Server: " + entry.getValue()); - bunchServersAssigned.add(entry.getValue()); - } - - for (ServerName sn : drainedServers) { - assertFalse(bunchServersAssigned.contains(sn)); - } - } + ZooKeeperWatcher zkWatcher = new ZooKeeperWatcher(TEST_UTIL.getConfiguration(), + "zkWatcher-BulkAssignTest", abortable, true); + + Mockito.when(server.getConfiguration()).thenReturn(conf); + Mockito.when(server.getServerName()).thenReturn(ServerName.valueOf("masterMock,1,1")); + Mockito.when(server.getZooKeeper()).thenReturn(zkWatcher); + + CoordinatedStateManager cp = new ZkCoordinatedStateManager(); + cp.initialize(server); + cp.start(); + + Mockito.when(server.getCoordinatedStateManager()).thenReturn(cp); + + Mockito.when(serverManager.getOnlineServers()).thenReturn(onlineServers); + Mockito.when(serverManager.getOnlineServersList()).thenReturn( + new ArrayList(onlineServers.keySet())); + + Mockito.when(serverManager.createDestinationServersList()).thenReturn( + new ArrayList(onlineServers.keySet())); + Mockito.when(serverManager.createDestinationServersList(null)).thenReturn( + new ArrayList(onlineServers.keySet())); + Mockito.when(serverManager.createDestinationServersList(Mockito.anyList())).thenReturn( + new ArrayList(onlineServers.keySet())); + + for (Entry entry : bulk.entrySet()) { + Mockito.when(serverManager.isServerOnline(entry.getValue())).thenReturn(true); + Mockito.when(serverManager.sendRegionClose(entry.getValue(), + entry.getKey(), -1)).thenReturn(true); + Mockito.when(serverManager.sendRegionOpen(entry.getValue(), + entry.getKey(), -1, null)).thenReturn(RegionOpeningState.OPENED); + Mockito.when(serverManager.addServerToDrainList(entry.getValue())).thenReturn(true); + } + + Mockito.when(master.getServerManager()).thenReturn(serverManager); + + drainedServers.add(SERVERNAME_A); + drainedServers.add(SERVERNAME_B); + drainedServers.add(SERVERNAME_C); + drainedServers.add(SERVERNAME_D); + + am = new AssignmentManager(server, serverManager, + balancer, startupMasterExecutor("mockExecutorServiceBulk"), null, null); + + Mockito.when(master.getAssignmentManager()).thenReturn(am); + + zkWatcher.registerListener(am); + + for (ServerName drained : drainedServers) { + addServerToDrainedList(drained, onlineServers, serverManager); + } + + am.assign(bulk); + + Set regionsInTransition = am.getRegionStates().getRegionsInTransition(); + for (RegionState rs : regionsInTransition) { + setRegionOpenedOnZK(zkWatcher, rs.getServerName(), rs.getRegion()); + } + + am.waitForAssignment(REGIONINFO_A); + am.waitForAssignment(REGIONINFO_B); + am.waitForAssignment(REGIONINFO_C); + am.waitForAssignment(REGIONINFO_D); + am.waitForAssignment(REGIONINFO_E); + + Map regionAssignments = am.getRegionStates().getRegionAssignments(); + for (Entry entry : regionAssignments.entrySet()) { + LOG.info("Region Assignment: " + + entry.getKey().getRegionNameAsString() + " Server: " + entry.getValue()); + bunchServersAssigned.add(entry.getValue()); + } + + for (ServerName sn : drainedServers) { + assertFalse(bunchServersAssigned.contains(sn)); + } } private void addServerToDrainedList(ServerName serverName, diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestFSTableDescriptorForceCreation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestFSTableDescriptorForceCreation.java index 9d5259a9af05..f963461b7c20 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestFSTableDescriptorForceCreation.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestFSTableDescriptorForceCreation.java @@ -42,8 +42,8 @@ public void testShouldCreateNewTableDescriptorIfForcefulCreationIsFalse() Path rootdir = new Path(UTIL.getDataTestDir(), name); FSTableDescriptors fstd = new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir); HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name)); - assertTrue("Should create new table descriptor", - fstd.createTableDescriptor(new TableDescriptor(htd), false)); + + assertTrue("Should create new table descriptor", fstd.createTableDescriptor(htd, false)); } @Test @@ -56,8 +56,7 @@ public void testShouldNotCreateTheSameTableDescriptorIfForcefulCreationIsFalse() FSTableDescriptors fstd = new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir); HTableDescriptor htd = new HTableDescriptor(name); fstd.add(htd); - assertFalse("Should not create new table descriptor", - fstd.createTableDescriptor(new TableDescriptor(htd), false)); + assertFalse("Should not create new table descriptor", fstd.createTableDescriptor(htd, false)); } @Test @@ -68,10 +67,9 @@ public void testShouldAllowForcefulCreationOfAlreadyExistingTableDescriptor() Path rootdir = new Path(UTIL.getDataTestDir(), name); FSTableDescriptors fstd = new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir); HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name)); - TableDescriptor td = new TableDescriptor(htd); - fstd.createTableDescriptor(td, false); + fstd.createTableDescriptor(htd, false); assertTrue("Should create new table descriptor", - fstd.createTableDescriptor(td, true)); + fstd.createTableDescriptor(htd, true)); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHColumnDescriptorDefaultVersions.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHColumnDescriptorDefaultVersions.java index 8d0e4188552e..4660bbbeec0a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHColumnDescriptorDefaultVersions.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHColumnDescriptorDefaultVersions.java @@ -160,8 +160,8 @@ private void verifyHColumnDescriptor(int expected, final TableName tableName, // Verify descriptor from HDFS MasterFileSystem mfs = TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterFileSystem(); Path tableDir = FSUtils.getTableDir(mfs.getRootDir(), tableName); - TableDescriptor td = FSTableDescriptors.getTableDescriptorFromFs(mfs.getFileSystem(), tableDir); - hcds = td.getHTableDescriptor().getColumnFamilies(); + htd = FSTableDescriptors.getTableDescriptorFromFs(mfs.getFileSystem(), tableDir); + hcds = htd.getColumnFamilies(); verifyHColumnDescriptor(expected, hcds, tableName, families); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestTableDescriptor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestTableDescriptor.java deleted file mode 100644 index 19c1136727f0..000000000000 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestTableDescriptor.java +++ /dev/null @@ -1,57 +0,0 @@ -/** - * Copyright The Apache Software Foundation - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase; - -import java.io.IOException; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.client.Durability; -import org.apache.hadoop.hbase.client.TableState; -import org.apache.hadoop.hbase.exceptions.DeserializationException; -import org.apache.hadoop.hbase.testclassification.SmallTests; -import org.junit.Test; -import org.junit.experimental.categories.Category; - -import static org.junit.Assert.assertEquals; - -/** - * Test setting values in the descriptor - */ -@Category(SmallTests.class) -public class TestTableDescriptor { - final static Log LOG = LogFactory.getLog(TestTableDescriptor.class); - - @Test - public void testPb() throws DeserializationException, IOException { - HTableDescriptor htd = new HTableDescriptor(HTableDescriptor.META_TABLEDESC); - final int v = 123; - htd.setMaxFileSize(v); - htd.setDurability(Durability.ASYNC_WAL); - htd.setReadOnly(true); - htd.setRegionReplication(2); - TableDescriptor td = new TableDescriptor(htd, TableState.State.ENABLED); - byte[] bytes = td.toByteArray(); - TableDescriptor deserializedTd = TableDescriptor.parseFrom(bytes); - assertEquals(td, deserializedTd); - assertEquals(td.getHTableDescriptor(), deserializedTd.getHTableDescriptor()); - assertEquals(td.getTableState(), deserializedTd.getTableState()); - } -} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java index 0a9984597352..5b2c06267bbe 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java @@ -41,7 +41,6 @@ import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.InvalidFamilyOperationException; -import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; @@ -50,8 +49,11 @@ import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.Waiter; import org.apache.hadoop.hbase.exceptions.MergeRegionException; +import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; +import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.FSUtils; +import org.apache.hadoop.hbase.zookeeper.ZKTableStateClientSideReader; import org.apache.hadoop.hbase.master.AssignmentManager; import org.apache.hadoop.hbase.master.RegionState; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; @@ -253,7 +255,7 @@ public void testDisableAndEnableTable() throws IOException { this.admin.disableTable(ht.getName()); assertTrue("Table must be disabled.", TEST_UTIL.getHBaseCluster() .getMaster().getAssignmentManager().getTableStateManager().isTableState( - ht.getName(), TableState.State.DISABLED)); + ht.getName(), ZooKeeperProtos.Table.State.DISABLED)); // Test that table is disabled get = new Get(row); @@ -280,7 +282,7 @@ public void testDisableAndEnableTable() throws IOException { this.admin.enableTable(table); assertTrue("Table must be enabled.", TEST_UTIL.getHBaseCluster() .getMaster().getAssignmentManager().getTableStateManager().isTableState( - ht.getName(), TableState.State.ENABLED)); + ht.getName(), ZooKeeperProtos.Table.State.ENABLED)); // Test that table is enabled try { @@ -352,7 +354,7 @@ public void testCreateTable() throws IOException { assertEquals(numTables + 1, tables.length); assertTrue("Table must be enabled.", TEST_UTIL.getHBaseCluster() .getMaster().getAssignmentManager().getTableStateManager().isTableState( - TableName.valueOf("testCreateTable"), TableState.State.ENABLED)); + TableName.valueOf("testCreateTable"), ZooKeeperProtos.Table.State.ENABLED)); } @Test (timeout=300000) @@ -1338,9 +1340,11 @@ public void testInvalidHColumnDescriptor() throws IOException { @Test (timeout=300000) public void testEnableDisableAddColumnDeleteColumn() throws Exception { + ZooKeeperWatcher zkw = HBaseTestingUtility.getZooKeeperWatcher(TEST_UTIL); TableName tableName = TableName.valueOf("testEnableDisableAddColumnDeleteColumn"); TEST_UTIL.createTable(tableName, HConstants.CATALOG_FAMILY).close(); - while (!this.admin.isTableEnabled(tableName)) { + while (!ZKTableStateClientSideReader.isEnabledTable(zkw, + TableName.valueOf("testEnableDisableAddColumnDeleteColumn"))) { Thread.sleep(10); } this.admin.disableTable(tableName); @@ -1483,4 +1487,16 @@ public void testMergeRegions() throws Exception { this.admin.deleteTable(tableName); } } + + @Test (timeout=30000) + public void testTableNotFoundException() throws Exception { + ZooKeeperWatcher zkw = TEST_UTIL.getZooKeeperWatcher(); + TableName table = TableName.valueOf("tableNotExists"); + try { + ZKTableStateClientSideReader.isDisabledTable(zkw, table); + fail("Shouldn't be here"); + } catch (TableNotFoundException e) { + // This is expected. + } + } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMasterRegistry.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMasterRegistry.java index 0695e4b0252e..0e5aa28cc5fb 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMasterRegistry.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMasterRegistry.java @@ -38,6 +38,7 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.Waiter; import org.apache.hadoop.hbase.exceptions.MasterRegistryFetchException; import org.apache.hadoop.hbase.master.HMaster; @@ -142,6 +143,8 @@ public boolean evaluate() throws Exception { }); assertEquals(registry.getClusterId(), activeMaster.getClusterId()); assertEquals(registry.getActiveMaster(), activeMaster.getServerName()); + assertTrue(registry.isTableOnlineState(TableName.META_TABLE_NAME, true)); + assertFalse(registry.isTableOnlineState(TableName.META_TABLE_NAME, false)); List metaLocations = Arrays.asList(registry.getMetaRegionLocations().getRegionLocations()); List actualMetaLocations = diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java index 6258f6db9042..db26d37a2f09 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java @@ -70,11 +70,6 @@ public TableLockManager getTableLockManager() { return null; } - @Override - public TableStateManager getTableStateManager() { - return null; - } - @Override public MasterCoprocessorHost getMasterCoprocessorHost() { return null; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java index 92c045fc56c1..28f9e8315313 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java @@ -60,7 +60,6 @@ import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.coordination.ZkCoordinatedStateManager; -import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.coprocessor.BaseRegionObserver; import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; import org.apache.hadoop.hbase.coprocessor.ObserverContext; @@ -70,6 +69,7 @@ import org.apache.hadoop.hbase.master.RegionState.State; import org.apache.hadoop.hbase.master.balancer.StochasticLoadBalancer; import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode; +import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; @@ -156,9 +156,10 @@ public void testRestartMetaRegionServer() throws Exception { Bytes.toBytes(metaServerName.getServerName())); master.assignmentManager.waitUntilNoRegionsInTransition(60000); } - RegionState metaState = MetaTableLocator.getMetaRegionState(master.getZooKeeper()); - assertEquals("Meta should be not in transition", - metaState.getState(), RegionState.State.OPEN); + RegionState metaState = + MetaTableLocator.getMetaRegionState(master.getZooKeeper()); + assertEquals("Meta should be not in transition", + metaState.getState(), RegionState.State.OPEN); assertNotEquals("Meta should be moved off master", metaServerName, master.getServerName()); cluster.killRegionServer(metaServerName); @@ -288,8 +289,7 @@ public void testAssignRegionOnRestartedServer() throws Exception { String table = "testAssignRegionOnRestartedServer"; TEST_UTIL.getMiniHBaseCluster().getConf().setInt("hbase.assignment.maximum.attempts", 20); TEST_UTIL.getMiniHBaseCluster().stopMaster(0); - //restart the master so that conf take into affect - TEST_UTIL.getMiniHBaseCluster().startMaster(); + TEST_UTIL.getMiniHBaseCluster().startMaster(); //restart the master so that conf take into affect ServerName deadServer = null; HMaster master = null; @@ -888,7 +888,7 @@ public void testSSHWhenDisablingTableRegionsInOpeningOrPendingOpenState() throws } } - am.getTableStateManager().setTableState(table, TableState.State.DISABLING); + am.getTableStateManager().setTableState(table, ZooKeeperProtos.Table.State.DISABLING); List toAssignRegions = am.cleanOutCrashedServerReferences(destServerName); assertTrue("Regions to be assigned should be empty.", toAssignRegions.isEmpty()); assertTrue("Regions to be assigned should be empty.", am.getRegionStates() @@ -897,7 +897,7 @@ public void testSSHWhenDisablingTableRegionsInOpeningOrPendingOpenState() throws if (hri != null && serverName != null) { am.regionOnline(hri, serverName); } - am.getTableStateManager().setTableState(table, TableState.State.DISABLED); + am.getTableStateManager().setTableState(table, ZooKeeperProtos.Table.State.DISABLED); TEST_UTIL.deleteTable(table); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java index 6b499f21cd6f..397d5a80f083 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java @@ -41,8 +41,6 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.ChoreService; import org.apache.hadoop.hbase.CoordinatedStateManager; -import org.apache.hadoop.hbase.TableDescriptor; -import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; @@ -56,13 +54,13 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.TableDescriptors; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.client.HConnectionTestingUtility; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.coordination.BaseCoordinatedStateManager; import org.apache.hadoop.hbase.coordination.SplitLogManagerCoordination; import org.apache.hadoop.hbase.coordination.SplitLogManagerCoordination.SplitLogManagerDetails; -import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.executor.ExecutorService; import org.apache.hadoop.hbase.io.Reference; import org.apache.hadoop.hbase.master.CatalogJanitor.SplitParentFirstComparator; @@ -354,18 +352,13 @@ public TableDescriptors getTableDescriptors() { return new TableDescriptors() { @Override public HTableDescriptor remove(TableName tablename) throws IOException { - // noop + // TODO Auto-generated method stub return null; } @Override public Map getAll() throws IOException { - // noop - return null; - } - - @Override public Map getAllDescriptors() throws IOException { - // noop + // TODO Auto-generated method stub return null; } @@ -375,12 +368,6 @@ public HTableDescriptor get(TableName tablename) return createHTableDescriptor(); } - @Override - public TableDescriptor getDescriptor(TableName tablename) - throws IOException { - return createTableDescriptor(); - } - @Override public Map getByNamespace(String name) throws IOException { return null; @@ -388,12 +375,8 @@ public Map getByNamespace(String name) throws IOExcept @Override public void add(HTableDescriptor htd) throws IOException { - // noop - } + // TODO Auto-generated method stub - @Override - public void add(TableDescriptor htd) throws IOException { - // noop } @Override public void setCacheOn() throws IOException { @@ -557,11 +540,6 @@ public TableNamespaceManager getTableNamespaceManager() { return null; } - @Override - public TableStateManager getTableStateManager() { - return null; - } - @Override public void dispatchMergingRegions(HRegionInfo region_a, HRegionInfo region_b, boolean forcible, User user) throws IOException { @@ -1191,11 +1169,6 @@ private HTableDescriptor createHTableDescriptor() { return htd; } - private TableDescriptor createTableDescriptor() { - TableDescriptor htd = new TableDescriptor(createHTableDescriptor(), TableState.State.ENABLED); - return htd; - } - private MultiResponse buildMultiResponse(MultiRequest req) { MultiResponse.Builder builder = MultiResponse.newBuilder(); RegionActionResult.Builder regionActionResultBuilder = diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMaster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMaster.java index 80e05e00d649..34715aad5a16 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMaster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMaster.java @@ -42,7 +42,7 @@ import org.apache.hadoop.hbase.UnknownRegionException; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.HTable; -import org.apache.hadoop.hbase.client.TableState; +import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.util.StringUtils; @@ -84,7 +84,7 @@ public void testMasterOpsWhileSplitting() throws Exception { try (HTable ht = TEST_UTIL.createTable(TABLENAME, FAMILYNAME)) { assertTrue(m.assignmentManager.getTableStateManager().isTableState(TABLENAME, - TableState.State.ENABLED)); + ZooKeeperProtos.Table.State.ENABLED)); TEST_UTIL.loadTable(ht, FAMILYNAME, false); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java index 99bc21edc41f..5586fdce34fc 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java @@ -45,19 +45,20 @@ import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.Waiter; -import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.MiniHBaseCluster; import org.apache.hadoop.hbase.RegionTransition; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.TableStateManager; import org.apache.hadoop.hbase.client.RegionLocator; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.executor.EventType; import org.apache.hadoop.hbase.master.RegionState.State; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.RequestConverter; +import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.regionserver.Region; @@ -72,8 +73,10 @@ import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; import org.apache.hadoop.hbase.zookeeper.ZKAssign; +import org.apache.hadoop.hbase.zookeeper.ZKTableStateManager; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.apache.zookeeper.data.Stat; +import org.junit.Ignore; import org.junit.Test; import org.junit.experimental.categories.Category; @@ -301,8 +304,8 @@ public void testMasterFailoverWithMockedRIT() throws Exception { log("Beginning to mock scenarios"); // Disable the disabledTable in ZK - TableStateManager tsm = master.getTableStateManager(); - tsm.setTableState(disabledTable, TableState.State.DISABLED); + TableStateManager zktable = new ZKTableStateManager(zkw); + zktable.setTableState(disabledTable, ZooKeeperProtos.Table.State.DISABLED); /* * ZK = OFFLINE @@ -618,7 +621,7 @@ public boolean isAborted() { assertTrue(" Table must be enabled.", master.getAssignmentManager() .getTableStateManager().isTableState(TableName.valueOf("enabledTable"), - TableState.State.ENABLED)); + ZooKeeperProtos.Table.State.ENABLED)); // we also need regions assigned out on the dead server List enabledAndOnDeadRegions = new ArrayList(); enabledAndOnDeadRegions.addAll(enabledRegions.subList(0, 6)); @@ -678,11 +681,13 @@ public boolean isAborted() { log("Beginning to mock scenarios"); // Disable the disabledTable in ZK - TableStateManager tsm = master.getTableStateManager(); - tsm.setTableState(disabledTable, TableState.State.DISABLED); + TableStateManager zktable = new ZKTableStateManager(zkw); + zktable.setTableState(disabledTable, ZooKeeperProtos.Table.State.DISABLED); assertTrue(" The enabled table should be identified on master fail over.", - tsm.isTableState(TableName.valueOf("enabledTable"), TableState.State.ENABLED)); + zktable.isTableState(TableName.valueOf("enabledTable"), + ZooKeeperProtos.Table.State.ENABLED)); + /* * ZK = CLOSING */ diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterRestartAfterDisablingTable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterRestartAfterDisablingTable.java index 5af7b470afd7..a2ecfb4ead63 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterRestartAfterDisablingTable.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterRestartAfterDisablingTable.java @@ -35,7 +35,7 @@ import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.RegionLocator; -import org.apache.hadoop.hbase.client.TableState; +import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread; @@ -102,8 +102,8 @@ public void testForCheckingIfEnableAndDisableWorksFineAfterSwitch() assertTrue("The table should not be in enabled state", cluster.getMaster() .getAssignmentManager().getTableStateManager().isTableState( - TableName.valueOf("tableRestart"), TableState.State.DISABLED, - TableState.State.DISABLING)); + TableName.valueOf("tableRestart"), ZooKeeperProtos.Table.State.DISABLED, + ZooKeeperProtos.Table.State.DISABLING)); log("Enabling table\n"); // Need a new Admin, the previous one is on the old master Admin admin = new HBaseAdmin(TEST_UTIL.getConfiguration()); @@ -118,7 +118,7 @@ public void testForCheckingIfEnableAndDisableWorksFineAfterSwitch() 6, regions.size()); assertTrue("The table should be in enabled state", cluster.getMaster() .getAssignmentManager().getTableStateManager() - .isTableState(TableName.valueOf("tableRestart"), TableState.State.ENABLED)); + .isTableState(TableName.valueOf("tableRestart"), ZooKeeperProtos.Table.State.ENABLED)); ht.close(); TEST_UTIL.shutdownMiniCluster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestOpenedRegionHandler.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestOpenedRegionHandler.java index c1affd56c832..9ecac42883db 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestOpenedRegionHandler.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestOpenedRegionHandler.java @@ -42,9 +42,9 @@ import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.FSTableDescriptors; import org.apache.hadoop.hbase.util.MockServer; import org.apache.hadoop.hbase.zookeeper.ZKAssign; +import org.apache.hadoop.hbase.zookeeper.ZKTableStateManager; import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.apache.zookeeper.KeeperException; @@ -140,10 +140,7 @@ public void testShouldNotCompeleteOpenedRegionSuccessfullyIfVersionMismatches() // create a node with OPENED state zkw = HBaseTestingUtility.createAndForceNodeToOpenedState(TEST_UTIL, region, server.getServerName()); - MasterServices masterServices = Mockito.mock(MasterServices.class); - when(masterServices.getTableDescriptors()).thenReturn(new FSTableDescriptors(conf)); - TableStateManager tsm = new TableStateManager(masterServices); - when(am.getTableStateManager()).thenReturn(tsm); + when(am.getTableStateManager()).thenReturn(new ZKTableStateManager(zkw)); Stat stat = new Stat(); String nodeName = ZKAssign.getNodeName(zkw, region.getRegionInfo() .getEncodedName()); @@ -174,8 +171,8 @@ public void testShouldNotCompeleteOpenedRegionSuccessfullyIfVersionMismatches() } catch (Exception e) { expectedException = true; } - assertFalse("The process method should not throw any exception. " - , expectedException); + assertFalse("The process method should not throw any exception.", + expectedException); List znodes = ZKUtil.listChildrenAndWatchForNewChildren(zkw, zkw.assignmentZNode); String regionName = znodes.get(0); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionStates.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionStates.java index a35e3594b39d..04102947f1f4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionStates.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionStates.java @@ -19,8 +19,10 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.TableStateManager; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.junit.Test; import org.junit.experimental.categories.Category; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestTableLockManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestTableLockManager.java index 7e5656bfa1fb..16a6450aecb8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestTableLockManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestTableLockManager.java @@ -36,6 +36,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ChoreService; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; @@ -52,6 +53,7 @@ import org.apache.hadoop.hbase.coprocessor.BaseMasterObserver; import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.ObserverContext; +import org.apache.hadoop.hbase.exceptions.LockTimeoutException; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.util.Bytes; @@ -386,14 +388,12 @@ public void chore() { choreService.scheduleChore(alterThread); choreService.scheduleChore(splitThread); TEST_UTIL.waitTableEnabled(tableName); - while (true) { List regions = admin.getTableRegions(tableName); LOG.info(String.format("Table #regions: %d regions: %s:", regions.size(), regions)); assertEquals(admin.getTableDescriptor(tableName), desc); for (HRegion region : TEST_UTIL.getMiniHBaseCluster().getRegions(tableName)) { - HTableDescriptor regionTableDesc = region.getTableDesc(); - assertEquals(desc, regionTableDesc); + assertEquals(desc, region.getTableDesc()); } if (regions.size() >= 5) { break; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java index 86a54e5171d0..ff479d48e54a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java @@ -35,7 +35,7 @@ import org.apache.hadoop.hbase.RegionLocations; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.client.TableState; +import org.apache.hadoop.hbase.TableStateManager; import org.apache.hadoop.hbase.client.BufferedMutator; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.Durability; @@ -45,9 +45,9 @@ import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitorBase; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.master.HMaster; -import org.apache.hadoop.hbase.master.TableStateManager; import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility; +import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; import org.apache.hadoop.hbase.util.ModifyRegionUtils; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.FSUtils; @@ -188,13 +188,13 @@ public boolean processRow(Result rowResult) throws IOException { public static void validateTableIsEnabled(final HMaster master, final TableName tableName) throws IOException { TableStateManager tsm = master.getAssignmentManager().getTableStateManager(); - assertTrue(tsm.isTableState(tableName, TableState.State.ENABLED)); + assertTrue(tsm.isTableState(tableName, ZooKeeperProtos.Table.State.ENABLED)); } public static void validateTableIsDisabled(final HMaster master, final TableName tableName) throws IOException { TableStateManager tsm = master.getAssignmentManager().getTableStateManager(); - assertTrue(tsm.isTableState(tableName, TableState.State.DISABLED)); + assertTrue(tsm.isTableState(tableName, ZooKeeperProtos.Table.State.DISABLED)); } /** diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure2.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure2.java index c4ec0acfb937..f27150efc81e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure2.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure2.java @@ -23,10 +23,10 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.HBaseTestingUtility; -import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster; import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; @@ -45,8 +45,6 @@ public void tearDown() throws Exception { TEST_UTIL.shutdownMiniZKCluster(); } - /* - Note: Relevant fix was undone by HBASE-7767. @Test public void testMasterRestartAfterNameSpaceEnablingNodeIsCreated() throws Exception { // Step 1: start mini zk cluster. @@ -56,9 +54,8 @@ public void testMasterRestartAfterNameSpaceEnablingNodeIsCreated() throws Except TableName tableName = TableName.valueOf("hbase:namespace"); ZooKeeperWatcher zkw = TEST_UTIL.getZooKeeperWatcher(); String znode = ZKUtil.joinZNode(zkw.tableZNode, tableName.getNameAsString()); - HBaseProtos.TableState.Builder builder = HBaseProtos.TableState.newBuilder(); - builder.setState(HBaseProtos.TableState.State.ENABLED); - builder.setTable(ProtobufUtil.toProtoTableName(tableName)); + ZooKeeperProtos.Table.Builder builder = ZooKeeperProtos.Table.newBuilder(); + builder.setState(ZooKeeperProtos.Table.State.ENABLED); byte [] data = ProtobufUtil.prependPBMagic(builder.build().toByteArray()); ZKUtil.createSetData(zkw, znode, data); LOG.info("Create an orphaned Znode " + znode); @@ -68,5 +65,4 @@ public void testMasterRestartAfterNameSpaceEnablingNodeIsCreated() throws Except TEST_UTIL.startMiniCluster(); assertTrue(TEST_UTIL.getHBaseCluster().getLiveMasterThreads().size() == 1); } - */ } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTableDescriptorModificationFromClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTableDescriptorModificationFromClient.java index d849f020db59..0b5e83fafd47 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTableDescriptorModificationFromClient.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTableDescriptorModificationFromClient.java @@ -31,7 +31,6 @@ import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.InvalidFamilyOperationException; -import org.apache.hadoop.hbase.TableDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.master.MasterFileSystem; @@ -274,9 +273,8 @@ private void verifyTableDescriptor(final TableName tableName, // Verify descriptor from HDFS MasterFileSystem mfs = TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterFileSystem(); Path tableDir = FSUtils.getTableDir(mfs.getRootDir(), tableName); - TableDescriptor td = - FSTableDescriptors.getTableDescriptorFromFs(mfs.getFileSystem(), tableDir); - verifyTableDescriptor(td.getHTableDescriptor(), tableName, families); + htd = FSTableDescriptors.getTableDescriptorFromFs(mfs.getFileSystem(), tableDir); + verifyTableDescriptor(htd, tableName, families); } private void verifyTableDescriptor(final HTableDescriptor htd, diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java index aafebab201c4..75dc31f081f4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java @@ -44,7 +44,6 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; -import org.apache.hadoop.hbase.TableDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotEnabledException; import org.apache.hadoop.hbase.classification.InterfaceAudience; @@ -502,8 +501,8 @@ public SnapshotBuilder(final Configuration conf, final FileSystem fs, this.desc = desc; this.tableRegions = tableRegions; this.snapshotDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(desc, rootDir, conf); - new FSTableDescriptors(conf).createTableDescriptorForTableDirectory(snapshotDir, - new TableDescriptor(htd), false); + new FSTableDescriptors(conf, snapshotDir.getFileSystem(conf), rootDir) + .createTableDescriptorForTableDirectory(snapshotDir, htd, false); } public HTableDescriptor getTableDescriptor() { @@ -720,8 +719,7 @@ public HTableDescriptor createHtd(final String tableName) { private RegionData[] createTable(final HTableDescriptor htd, final int nregions) throws IOException { Path tableDir = FSUtils.getTableDir(rootDir, htd.getTableName()); - new FSTableDescriptors(conf).createTableDescriptorForTableDirectory(tableDir, - new TableDescriptor(htd), false); + new FSTableDescriptors(conf).createTableDescriptorForTableDirectory(tableDir, htd, false); assertTrue(nregions % 2 == 0); RegionData[] regions = new RegionData[nregions]; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java index 3a22e400d20c..df01d710699f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java @@ -35,16 +35,14 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.TableDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.TableDescriptors; import org.apache.hadoop.hbase.TableExistsException; -import org.apache.hadoop.hbase.client.TableState; -import org.apache.hadoop.hbase.testclassification.MediumTests; import org.junit.Test; import org.junit.experimental.categories.Category; @@ -73,7 +71,6 @@ public void testRegexAgainstOldStyleTableInfo() { public void testCreateAndUpdate() throws IOException { Path testdir = UTIL.getDataTestDir("testCreateAndUpdate"); HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("testCreate")); - TableDescriptor td = new TableDescriptor(htd, TableState.State.ENABLED); FileSystem fs = FileSystem.get(UTIL.getConfiguration()); FSTableDescriptors fstd = new FSTableDescriptors(UTIL.getConfiguration(), fs, testdir); assertTrue(fstd.createTableDescriptor(htd)); @@ -81,7 +78,7 @@ public void testCreateAndUpdate() throws IOException { FileStatus[] statuses = fs.listStatus(testdir); assertTrue("statuses.length=" + statuses.length, statuses.length == 1); for (int i = 0; i < 10; i++) { - fstd.updateTableDescriptor(td); + fstd.updateTableDescriptor(htd); } statuses = fs.listStatus(testdir); assertTrue(statuses.length == 1); @@ -95,29 +92,20 @@ public void testSequenceIdAdvancesOnTableInfo() throws IOException { Path testdir = UTIL.getDataTestDir("testSequenceidAdvancesOnTableInfo"); HTableDescriptor htd = new HTableDescriptor( TableName.valueOf("testSequenceidAdvancesOnTableInfo")); - TableDescriptor td = new TableDescriptor(htd); FileSystem fs = FileSystem.get(UTIL.getConfiguration()); FSTableDescriptors fstd = new FSTableDescriptors(UTIL.getConfiguration(), fs, testdir); - Path p0 = fstd.updateTableDescriptor(td); + Path p0 = fstd.updateTableDescriptor(htd); int i0 = FSTableDescriptors.getTableInfoSequenceId(p0); - Path p1 = fstd.updateTableDescriptor(td); + Path p1 = fstd.updateTableDescriptor(htd); // Assert we cleaned up the old file. assertTrue(!fs.exists(p0)); int i1 = FSTableDescriptors.getTableInfoSequenceId(p1); assertTrue(i1 == i0 + 1); - Path p2 = fstd.updateTableDescriptor(td); + Path p2 = fstd.updateTableDescriptor(htd); // Assert we cleaned up the old file. assertTrue(!fs.exists(p1)); int i2 = FSTableDescriptors.getTableInfoSequenceId(p2); assertTrue(i2 == i1 + 1); - td = new TableDescriptor(htd, TableState.State.DISABLED); - Path p3 = fstd.updateTableDescriptor(td); - // Assert we cleaned up the old file. - assertTrue(!fs.exists(p2)); - int i3 = FSTableDescriptors.getTableInfoSequenceId(p3); - assertTrue(i3 == i2 + 1); - TableDescriptor descriptor = fstd.getDescriptor(htd.getTableName()); - assertEquals(descriptor, td); } @Test @@ -170,13 +158,12 @@ public void testReadingHTDFromFS() final String name = "testReadingHTDFromFS"; FileSystem fs = FileSystem.get(UTIL.getConfiguration()); HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name)); - TableDescriptor td = new TableDescriptor(htd, TableState.State.ENABLED); Path rootdir = UTIL.getDataTestDir(name); FSTableDescriptors fstd = new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir); fstd.createTableDescriptor(htd); - TableDescriptor td2 = + HTableDescriptor htd2 = FSTableDescriptors.getTableDescriptorFromFs(fs, rootdir, htd.getTableName()); - assertTrue(td.equals(td2)); + assertTrue(htd.equals(htd2)); } @Test @@ -190,8 +177,7 @@ public void testHTableDescriptors() final int count = 10; // Write out table infos. for (int i = 0; i < count; i++) { - TableDescriptor htd = new TableDescriptor(new HTableDescriptor(name + i), - TableState.State.ENABLED); + HTableDescriptor htd = new HTableDescriptor(name + i); htds.createTableDescriptor(htd); } @@ -205,7 +191,7 @@ public void testHTableDescriptors() for (int i = 0; i < count; i++) { HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name + i)); htd.addFamily(new HColumnDescriptor("" + i)); - htds.updateTableDescriptor(new TableDescriptor(htd)); + htds.updateTableDescriptor(htd); } // Wait a while so mod time we write is for sure different. Thread.sleep(100); @@ -246,7 +232,7 @@ public void testHTableDescriptorsNoCache() for (int i = 0; i < count; i++) { HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name + i)); htd.addFamily(new HColumnDescriptor("" + i)); - htds.updateTableDescriptor(new TableDescriptor(htd)); + htds.updateTableDescriptor(htd); } // Wait a while so mod time we write is for sure different. Thread.sleep(100); @@ -392,19 +378,18 @@ public void testCreateTableDescriptorUpdatesIfExistsAlready() throws IOException Path testdir = UTIL.getDataTestDir("testCreateTableDescriptorUpdatesIfThereExistsAlready"); HTableDescriptor htd = new HTableDescriptor(TableName.valueOf( "testCreateTableDescriptorUpdatesIfThereExistsAlready")); - TableDescriptor td = new TableDescriptor(htd, TableState.State.ENABLED); FileSystem fs = FileSystem.get(UTIL.getConfiguration()); FSTableDescriptors fstd = new FSTableDescriptors(UTIL.getConfiguration(), fs, testdir); assertTrue(fstd.createTableDescriptor(htd)); assertFalse(fstd.createTableDescriptor(htd)); htd.setValue(Bytes.toBytes("mykey"), Bytes.toBytes("myValue")); - assertTrue(fstd.createTableDescriptor(td)); //this will re-create + assertTrue(fstd.createTableDescriptor(htd)); //this will re-create Path tableDir = fstd.getTableDir(htd.getTableName()); Path tmpTableDir = new Path(tableDir, FSTableDescriptors.TMP_DIR); FileStatus[] statuses = fs.listStatus(tmpTableDir); assertTrue(statuses.length == 0); - assertEquals(td, FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir)); + assertEquals(htd, FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir)); } private static class FSTableDescriptorsTest diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java index 28d355693e09..d7cbf0d57016 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java @@ -68,6 +68,7 @@ import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.MiniHBaseCluster; import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableExistsException; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.Waiter.Predicate; import org.apache.hadoop.hbase.client.Admin; @@ -105,6 +106,7 @@ import org.apache.hadoop.hbase.master.TableLockManager.TableLock; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos; +import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegionFileSystem; import org.apache.hadoop.hbase.regionserver.HRegionServer; @@ -2959,6 +2961,55 @@ public void run() { tableLockManager.tableDeleted(tableName); } + /** + * Test orphaned table ZNode (for table states) + */ + @Test + public void testOrphanedTableZNode() throws Exception { + TableName table = TableName.valueOf("testOrphanedZKTableEntry"); + + try { + TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager().getTableStateManager() + .setTableState(table, ZooKeeperProtos.Table.State.ENABLING); + + try { + setupTable(table); + Assert.fail( + "Create table should fail when its ZNode has already existed with ENABLING state."); + } catch(TableExistsException t) { + //Expected exception + } + // The setup table was interrupted in some state that needs to some cleanup. + try { + cleanupTable(table); + } catch (IOException e) { + // Because create table failed, it is expected that the cleanup table would + // throw some exception. Ignore and continue. + } + + HBaseFsck hbck = doFsck(conf, false); + assertTrue(hbck.getErrors().getErrorList().contains(ERROR_CODE.ORPHANED_ZK_TABLE_ENTRY)); + + // fix the orphaned ZK entry + hbck = doFsck(conf, true); + + // check that orpahned ZK table entry is gone. + hbck = doFsck(conf, false); + assertFalse(hbck.getErrors().getErrorList().contains(ERROR_CODE.ORPHANED_ZK_TABLE_ENTRY)); + // Now create table should succeed. + setupTable(table); + } finally { + // This code could be called that either a table was created successfully or set up + // table failed in some unknown state. Therefore, clean up can either succeed or fail. + try { + cleanupTable(table); + } catch (IOException e) { + // The cleanup table would throw some exception if create table failed in some state. + // Ignore this exception + } + } + } + @Test (timeout=180000) public void testMetaOffline() throws Exception { // check no errors diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKTableStateManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKTableStateManager.java new file mode 100644 index 000000000000..e81c89f03407 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKTableStateManager.java @@ -0,0 +1,114 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.zookeeper; + +import java.io.IOException; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; + +import org.apache.hadoop.hbase.Abortable; +import org.apache.hadoop.hbase.CoordinatedStateException; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.TableStateManager; +import org.apache.zookeeper.KeeperException; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table; + +@Category(MediumTests.class) +public class TestZKTableStateManager { + private static final Log LOG = LogFactory.getLog(TestZKTableStateManager.class); + private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); + + @BeforeClass + public static void setUpBeforeClass() throws Exception { + TEST_UTIL.startMiniZKCluster(); + } + + @AfterClass + public static void tearDownAfterClass() throws Exception { + TEST_UTIL.shutdownMiniZKCluster(); + } + + @Test + public void testTableStates() + throws CoordinatedStateException, IOException, KeeperException, InterruptedException { + final TableName name = + TableName.valueOf("testDisabled"); + Abortable abortable = new Abortable() { + @Override + public void abort(String why, Throwable e) { + LOG.info(why, e); + } + + @Override + public boolean isAborted() { + return false; + } + + }; + ZooKeeperWatcher zkw = new ZooKeeperWatcher(TEST_UTIL.getConfiguration(), + name.getNameAsString(), abortable, true); + TableStateManager zkt = new ZKTableStateManager(zkw); + assertFalse(zkt.isTableState(name, Table.State.ENABLED)); + assertFalse(zkt.isTableState(name, Table.State.DISABLING)); + assertFalse(zkt.isTableState(name, Table.State.DISABLED)); + assertFalse(zkt.isTableState(name, Table.State.ENABLING)); + assertFalse(zkt.isTableState(name, Table.State.DISABLED, Table.State.DISABLING)); + assertFalse(zkt.isTableState(name, Table.State.DISABLED, Table.State.ENABLING)); + assertFalse(zkt.isTablePresent(name)); + zkt.setTableState(name, Table.State.DISABLING); + assertTrue(zkt.isTableState(name, Table.State.DISABLING)); + assertTrue(zkt.isTableState(name, Table.State.DISABLED, Table.State.DISABLING)); + assertFalse(zkt.getTablesInStates(Table.State.DISABLED).contains(name)); + assertTrue(zkt.isTablePresent(name)); + zkt.setTableState(name, Table.State.DISABLED); + assertTrue(zkt.isTableState(name, Table.State.DISABLED)); + assertTrue(zkt.isTableState(name, Table.State.DISABLED, Table.State.DISABLING)); + assertFalse(zkt.isTableState(name, Table.State.DISABLING)); + assertTrue(zkt.getTablesInStates(Table.State.DISABLED).contains(name)); + assertTrue(zkt.isTablePresent(name)); + zkt.setTableState(name, Table.State.ENABLING); + assertTrue(zkt.isTableState(name, Table.State.ENABLING)); + assertTrue(zkt.isTableState(name, Table.State.DISABLED, Table.State.ENABLING)); + assertFalse(zkt.isTableState(name, Table.State.DISABLED)); + assertFalse(zkt.getTablesInStates(Table.State.DISABLED).contains(name)); + assertTrue(zkt.isTablePresent(name)); + zkt.setTableState(name, Table.State.ENABLED); + assertTrue(zkt.isTableState(name, Table.State.ENABLED)); + assertFalse(zkt.isTableState(name, Table.State.ENABLING)); + assertTrue(zkt.isTablePresent(name)); + zkt.setDeletedTable(name); + assertFalse(zkt.isTableState(name, Table.State.ENABLED)); + assertFalse(zkt.isTableState(name, Table.State.DISABLING)); + assertFalse(zkt.isTableState(name, Table.State.DISABLED)); + assertFalse(zkt.isTableState(name, Table.State.ENABLING)); + assertFalse(zkt.isTableState(name, Table.State.DISABLED, Table.State.DISABLING)); + assertFalse(zkt.isTableState(name, Table.State.DISABLED, Table.State.ENABLING)); + assertFalse(zkt.isTablePresent(name)); + } +}