diff --git a/dropwizard-extra-curator/pom.xml b/dropwizard-extra-curator/pom.xml
index b43c124..dbadc72 100644
--- a/dropwizard-extra-curator/pom.xml
+++ b/dropwizard-extra-curator/pom.xml
@@ -5,7 +5,7 @@
* The resulting {@link CuratorFramework} will have its lifecycle managed by the {@link Environment} * and will have {@link com.codahale.metrics.health.HealthCheck}s installed for the underlying ZooKeeper * ensemble. @@ -118,7 +118,7 @@ public void setMaxRetries(final int maxRetries) { /** * Returns the initial time to wait before retrying a failed connection. - *
+ ** Subsequent retries will wait an exponential amount of time more than this. * * @return the initial time to wait before trying to connect again. @@ -130,7 +130,7 @@ public Duration getBackOffBaseTime() { /** * Sets the initial time to wait before retrying a failed connection. - *
+ ** Subsequent retries will wait an exponential amount of time more than this. * * @param backOffBaseTime the initial time to wait before trying to connect again. @@ -142,7 +142,7 @@ public void setBackOffBaseTime(final Duration backOffBaseTime) { /** * Returns a {@link RetryPolicy} for handling failed connection attempts. - *
+ ** Always configures an {@link ExponentialBackoffRetry} based on the {@link #getMaxRetries() * maximum retries} and {@link #getBackOffBaseTime() initial back-off} configured. * diff --git a/dropwizard-extra-curator/src/main/java/com/datasift/dropwizard/curator/ensemble/DropwizardConfiguredZooKeeperFactory.java b/dropwizard-extra-curator/src/main/java/com/datasift/dropwizard/curator/ensemble/DropwizardConfiguredZooKeeperFactory.java index 1ed5879..75bc28f 100644 --- a/dropwizard-extra-curator/src/main/java/com/datasift/dropwizard/curator/ensemble/DropwizardConfiguredZooKeeperFactory.java +++ b/dropwizard-extra-curator/src/main/java/com/datasift/dropwizard/curator/ensemble/DropwizardConfiguredZooKeeperFactory.java @@ -12,7 +12,7 @@ /** * Provides integration for Dropwizard's ZooKeeper functionality with Curator. - *
+ *
* This ensures that {@link ZooKeeper} instances created by Curator integrate properly with the
* Dropwizard application life-cycle.
*/
diff --git a/dropwizard-extra-hbase/pom.xml b/dropwizard-extra-hbase/pom.xml
index bd92ee4..c39a96d 100644
--- a/dropwizard-extra-hbase/pom.xml
+++ b/dropwizard-extra-hbase/pom.xml
@@ -5,7 +5,7 @@
* This client places an upper-bounds on the number of concurrent asynchronous requests awaiting * completion. When this limit is reached, subsequent requests will block until an existing request * completes. - *
+ ** This behaviour is particularly useful for throttling high-throughput applications where HBase is * the bottle-neck. Without backing-off, such an application may run out of memory. By constraining * the maximum number of requests to a sufficiently high limit, but low enough so that it can be * reached without running out of memory, such applications can organically throttle and back-off * their requests. - *
+ ** Book-keeping of in-flight requests is done using a {@link Semaphore} which is configured as * "non-fair" to reduce its impact on request throughput. */ @@ -55,7 +55,7 @@ public BoundedHBaseClient(final HBaseClient client, final int maxRequests) { /** * Create a new instance with the given semaphore for the given underlying {@link HBaseClient} * implementation. - *
+ ** Note: this is only really useful for sharing a {@link Semaphore} between two {@link * BoundedHBaseClient} instances, which only really makes sense for instances configured for * the same cluster, but with different client-side settings. Use with caution!! diff --git a/dropwizard-extra-hbase/src/main/java/com/datasift/dropwizard/hbase/HBaseClient.java b/dropwizard-extra-hbase/src/main/java/com/datasift/dropwizard/hbase/HBaseClient.java index 21ac6d5..ae3a645 100644 --- a/dropwizard-extra-hbase/src/main/java/com/datasift/dropwizard/hbase/HBaseClient.java +++ b/dropwizard-extra-hbase/src/main/java/com/datasift/dropwizard/hbase/HBaseClient.java @@ -11,9 +11,9 @@ /** * Client for interacting with an HBase cluster. - *
+ ** To create an instance, use {@link HBaseClientFactory}. - *
+ ** All implementations are wrapper proxies around {@link org.hbase.async.HBaseClient} providing * additional functionality. * @@ -172,6 +172,7 @@ public interface HBaseClient { * Ensures that a specific table exists. * * @param table the table to check. + * @param family the family to check. * * @return a {@link Deferred} indicating the completion of the assertion. * @@ -186,6 +187,7 @@ public interface HBaseClient { * Ensures that a specific table exists. * * @param table the table to check. + * @param family the family to check. * * @return a {@link Deferred} indicating the completion of the assertion. * diff --git a/dropwizard-extra-hbase/src/main/java/com/datasift/dropwizard/hbase/HBaseClientFactory.java b/dropwizard-extra-hbase/src/main/java/com/datasift/dropwizard/hbase/HBaseClientFactory.java index 228bda5..6546e70 100644 --- a/dropwizard-extra-hbase/src/main/java/com/datasift/dropwizard/hbase/HBaseClientFactory.java +++ b/dropwizard-extra-hbase/src/main/java/com/datasift/dropwizard/hbase/HBaseClientFactory.java @@ -13,10 +13,9 @@ /** * A factory for creating and managing {@link HBaseClient} instances. - *
+ ** The resulting {@link HBaseClient} will have its lifecycle managed by an {@link Environment} and - * will have {@link com.codahale.metrics.health.HealthCheck}s installed for the {@code .META.} and - * {@code -ROOT-} tables. + * will have {@link com.codahale.metrics.health.HealthCheck}s installed for the {@code hbase:meta}. * * @see HBaseClient */ @@ -90,7 +89,7 @@ public void setFlushInterval(final Duration flushInterval) { /** * Returns the maximum size of the buffer for increment operations. - *
+ ** Once this buffer is full, a flush is forced irrespective of the {@link #getFlushInterval() * flushInterval}. * @@ -105,7 +104,7 @@ public Size getIncrementBufferSize() { /** * Sets the maximum size of the buffer for increment operations. - *
+ ** Once this buffer is full, a flush is forced irrespective of the {@link #getFlushInterval() * flushInterval}. * @@ -120,10 +119,10 @@ public void setIncrementBufferSize(final Size incrementBufferSize) { /** * Returns maximum number of concurrent asynchronous requests for the client. - *
+ ** Useful for throttling high-throughput applications when HBase is the bottle-neck to prevent * the client running out of memory. - *
+ ** With this is zero ("0"), no limit will be placed on the number of concurrent asynchronous * requests. * @@ -138,10 +137,10 @@ public int getMaxConcurrentRequests() { /** * Sets the maximum number of concurrent asynchronous requests for the client. - *
+ ** Useful for throttling high-throughput applications when HBase is the bottle-neck to prevent * the client running out of memory. - *
+ ** With this is zero ("0"), no limit will be placed on the number of concurrent asynchronous * requests. * @@ -228,9 +227,8 @@ public HBaseClient build(final Environment environment, final String name) { client.setFlushInterval(getFlushInterval()); client.setIncrementBufferSize(getIncrementBufferSize()); - // add healthchecks for META and ROOT tables - environment.healthChecks().register(name + "-meta", new HBaseHealthCheck(client, ".META.")); - environment.healthChecks().register(name + "-root", new HBaseHealthCheck(client, "-ROOT-")); + // add healthchecks for hbase:meta table + environment.healthChecks().register(name + "-meta", new HBaseHealthCheck(client, "hbase:meta")); // manage client environment.lifecycle().manage(new ManagedHBaseClient( @@ -241,11 +239,11 @@ public HBaseClient build(final Environment environment, final String name) { /** * Builds a new {@link HBaseClient} according to the given {@link HBaseClientFactory}. - *
+ ** If instrumentation {@link #instrumented is enabled} in the * configuration, this will build an {@link InstrumentedHBaseClient} wrapping the given {@link * HBaseClient}. - *
+ ** If instrumentation is not enabled, the given {@link HBaseClient} will be returned verbatim. * * @param client an underlying {@link HBaseClient} implementation. @@ -263,10 +261,10 @@ private HBaseClient instrument(final HBaseClient client, /** * Builds a new {@link HBaseClient} according to the given {@link HBaseClientFactory}. - *
+ ** If the {@link #maxConcurrentRequests} is non-zero in the * configuration, this will build a {@link BoundedHBaseClient} that wraps the given client. - *
+ ** If {@link #maxConcurrentRequests} is zero, the given {@link * HBaseClient} will be returned verbatim. * diff --git a/dropwizard-extra-hbase/src/main/java/com/datasift/dropwizard/hbase/InstrumentedHBaseClient.java b/dropwizard-extra-hbase/src/main/java/com/datasift/dropwizard/hbase/InstrumentedHBaseClient.java index 28cf888..0159e30 100644 --- a/dropwizard-extra-hbase/src/main/java/com/datasift/dropwizard/hbase/InstrumentedHBaseClient.java +++ b/dropwizard-extra-hbase/src/main/java/com/datasift/dropwizard/hbase/InstrumentedHBaseClient.java @@ -16,9 +16,9 @@ /** * An {@link HBaseClient} that is instrumented with {@link Metric}s. - *
+ ** For each asynchronous request method, a {@link Timer} tracks the time taken for the request. - *
+ ** This implementation proxies all requests through an underlying {@link HBaseClient}; it merely * layers instrumentation on top of the underlying {@link HBaseClient}. * @@ -38,9 +38,9 @@ public class InstrumentedHBaseClient implements HBaseClient { /** * Creates a new {@link InstrumentedHBaseClient} for the given underlying client. - *
+ ** Instrumentation will be registered with the given {@link MetricRegistry}. - *
+ ** A new {@link HBaseInstrumentation} container will be created for this {@link HBaseClient} * with the given {@link MetricRegistry}. * diff --git a/dropwizard-extra-hbase/src/main/java/com/datasift/dropwizard/hbase/ManagedHBaseClient.java b/dropwizard-extra-hbase/src/main/java/com/datasift/dropwizard/hbase/ManagedHBaseClient.java index 672c632..e1583e2 100644 --- a/dropwizard-extra-hbase/src/main/java/com/datasift/dropwizard/hbase/ManagedHBaseClient.java +++ b/dropwizard-extra-hbase/src/main/java/com/datasift/dropwizard/hbase/ManagedHBaseClient.java @@ -29,11 +29,11 @@ public ManagedHBaseClient(final HBaseClient client, final Duration connectionTim * To force the connection, we look for the prescence of the .META. table. * * @throws com.stumbleupon.async.TimeoutException if there is a problem connecting to HBase. - * @throws org.hbase.async.TableNotFoundException if the .META. table can't be found. - * @throws Exception if there is a problem verifying the .META. table exists. + * @throws org.hbase.async.TableNotFoundException if the hbase:meta table can't be found. + * @throws Exception if there is a problem verifying the hbase:meta table exists. */ public void start() throws Exception { - client.ensureTableExists(".META.").joinUninterruptibly(connectionTimeout.toMilliseconds()); + client.ensureTableExists("hbase:meta").joinUninterruptibly(connectionTimeout.toMilliseconds()); } /** diff --git a/dropwizard-extra-hbase/src/main/java/com/datasift/dropwizard/hbase/metrics/HBaseInstrumentation.java b/dropwizard-extra-hbase/src/main/java/com/datasift/dropwizard/hbase/metrics/HBaseInstrumentation.java index 2249380..3856293 100644 --- a/dropwizard-extra-hbase/src/main/java/com/datasift/dropwizard/hbase/metrics/HBaseInstrumentation.java +++ b/dropwizard-extra-hbase/src/main/java/com/datasift/dropwizard/hbase/metrics/HBaseInstrumentation.java @@ -30,7 +30,7 @@ public class HBaseInstrumentation { /** * Initialises instrumentation for the given {@link HBaseClient} using the given {@link - * MetricsRegistry}. + * com.codahale.metrics.MetricRegistry}. * * @param client the client to create metrics for. * @param registry the registry to register the metrics with. diff --git a/dropwizard-extra-hbase/src/main/java/com/datasift/dropwizard/hbase/scanner/BoundedRowScanner.java b/dropwizard-extra-hbase/src/main/java/com/datasift/dropwizard/hbase/scanner/BoundedRowScanner.java index f200266..1347330 100644 --- a/dropwizard-extra-hbase/src/main/java/com/datasift/dropwizard/hbase/scanner/BoundedRowScanner.java +++ b/dropwizard-extra-hbase/src/main/java/com/datasift/dropwizard/hbase/scanner/BoundedRowScanner.java @@ -11,7 +11,7 @@ /** * A Scanner that constraints concurrent requests with a {@link Semaphore}. - *
+ ** To obtain an instance of a {@link RowScanner}, call {@link BoundedHBaseClient#scan(byte[])}. */ public class BoundedRowScanner implements RowScanner { diff --git a/dropwizard-extra-hbase/src/main/java/com/datasift/dropwizard/hbase/scanner/InstrumentedRowScanner.java b/dropwizard-extra-hbase/src/main/java/com/datasift/dropwizard/hbase/scanner/InstrumentedRowScanner.java index 13341e9..760bc58 100644 --- a/dropwizard-extra-hbase/src/main/java/com/datasift/dropwizard/hbase/scanner/InstrumentedRowScanner.java +++ b/dropwizard-extra-hbase/src/main/java/com/datasift/dropwizard/hbase/scanner/InstrumentedRowScanner.java @@ -14,7 +14,7 @@ /** * A {@link RowScanner} that is instrumented with {@link Metric}s. - *
+ ** To obtain an instance of a {@link RowScanner}, call {@link InstrumentedHBaseClient#scan(byte[])}. */ public class InstrumentedRowScanner implements RowScanner { diff --git a/dropwizard-extra-hbase/src/main/java/com/datasift/dropwizard/hbase/scanner/RowScanner.java b/dropwizard-extra-hbase/src/main/java/com/datasift/dropwizard/hbase/scanner/RowScanner.java index 585e030..7752e96 100644 --- a/dropwizard-extra-hbase/src/main/java/com/datasift/dropwizard/hbase/scanner/RowScanner.java +++ b/dropwizard-extra-hbase/src/main/java/com/datasift/dropwizard/hbase/scanner/RowScanner.java @@ -8,10 +8,10 @@ /** * Client for scanning over a selection of rows. - *
+ ** To obtain an instance of a {@link RowScanner}, call {@link * com.datasift.dropwizard.hbase.HBaseClient#scan(byte[])}. - *
+ ** All implementations are wrapper proxies around {@link org.hbase.async.Scanner} providing * additional functionality. */ diff --git a/dropwizard-extra-hbase/src/main/java/com/datasift/dropwizard/hbase/scanner/RowScannerProxy.java b/dropwizard-extra-hbase/src/main/java/com/datasift/dropwizard/hbase/scanner/RowScannerProxy.java index 258cd62..2668e1d 100644 --- a/dropwizard-extra-hbase/src/main/java/com/datasift/dropwizard/hbase/scanner/RowScannerProxy.java +++ b/dropwizard-extra-hbase/src/main/java/com/datasift/dropwizard/hbase/scanner/RowScannerProxy.java @@ -9,10 +9,10 @@ /** * Client for scanning over a selection of rows. - *
+ ** To obtain an instance of a {@link RowScanner}, call {@link * com.datasift.dropwizard.hbase.HBaseClient#scan(byte[])}. - *
+ *
* This implementation is a proxy for a {@link org.hbase.async.Scanner}.
*/
public class RowScannerProxy implements RowScanner {
diff --git a/dropwizard-extra-hbase/src/main/java/com/datasift/dropwizard/hbase/util/TimerStoppingCallback.java b/dropwizard-extra-hbase/src/main/java/com/datasift/dropwizard/hbase/util/TimerStoppingCallback.java
index 8714957..7d4aff8 100644
--- a/dropwizard-extra-hbase/src/main/java/com/datasift/dropwizard/hbase/util/TimerStoppingCallback.java
+++ b/dropwizard-extra-hbase/src/main/java/com/datasift/dropwizard/hbase/util/TimerStoppingCallback.java
@@ -4,19 +4,19 @@
import com.codahale.metrics.Timer;
/**
- * A {@link Callback} for stopping a {@link TimerContext} on completion.
+ * A {@link com.stumbleupon.async.Callback} for stopping a {@link com.codahale.metrics.Timer.Context} on completion.
*/
public class TimerStoppingCallback
* Each method is tested first, that it proxies its implementation to the underlying {@link
* HBaseClient}, and then that the method is timed as expected.
*/
diff --git a/dropwizard-extra-kafka/pom.xml b/dropwizard-extra-kafka/pom.xml
index 5a54271..87fe9c3 100644
--- a/dropwizard-extra-kafka/pom.xml
+++ b/dropwizard-extra-kafka/pom.xml
@@ -5,7 +5,7 @@
* The {@link KafkaConsumer} implementation will be determined by the configuration used to create
* it.
- *
* The resultant {@link KafkaConsumer} will have its lifecycle managed by the {@link Environment}
* and will have {@link com.codahale.metrics.health.HealthCheck}s installed to monitor its status.
*/
@@ -42,7 +42,7 @@ public class KafkaConsumerFactory extends KafkaClientFactory {
/**
* A description of the initial offset to consume from a partition when no committed offset
* exists.
- *
*
* Topics not referenced will not be consumed from.
*
* @return a Map of topics to the number of partitions to consume from them.
@@ -162,7 +162,7 @@ public Map
* Topics not referenced will not be consumed from.
*
* @param partitions a Map of topics to the number of partitions to consume from them.
@@ -175,7 +175,7 @@ public void setPartitions(final Map
* When a {@link KafkaConsumer} times out a stream, a {@link
* kafka.consumer.ConsumerTimeoutException} will be thrown by that streams' {@link
* kafka.consumer.ConsumerIterator}.
@@ -194,7 +194,7 @@ public Duration getTimeout() {
/**
* Sets the time the {@link KafkaConsumer} should wait to receive messages before timing out
* the stream.
- *
* When a {@link KafkaConsumer} times out a stream, a {@link
* kafka.consumer.ConsumerTimeoutException} will be thrown by that streams' {@link
* kafka.consumer.ConsumerIterator}.
@@ -230,7 +230,7 @@ public void setReceiveBufferSize(final Size size) {
/**
* Returns the maximum size of a batch of messages to fetch in a single request.
- *
* This dictates the maximum size of a message that may be received by the {@link
* KafkaConsumer}. Messages larger than this size will cause a {@link
* kafka.common.InvalidMessageSizeException} to be thrown during iteration of the stream.
@@ -246,7 +246,7 @@ public Size getFetchSize() {
/**
* Sets the maximum size of a batch of messages to fetch in a single request.
- *
* This dictates the maximum size of a message that may be received by the {@link
* KafkaConsumer}. Messages larger than this size will cause a {@link
* kafka.common.InvalidMessageSizeException} to be thrown during iteration of the stream.
@@ -262,7 +262,7 @@ public void setFetchSize(final Size size) {
/**
* Returns the cumulative delay before polling a broker again when no data is returned.
- *
* When fetching data from a broker, if there is no new data, there will be a delay before
* polling the broker again. This controls the duration of the delay by increasing it linearly,
* on each poll attempt.
@@ -276,7 +276,7 @@ public Duration getBackOffIncrement() {
/**
* Sets the cumulative delay before polling a broker again when no data is returned.
- *
* When fetching data from a broker, if there is no new data, there will be a delay before
* polling the broker again. This controls the duration of the delay by increasing it linearly,
* on each poll attempt.
@@ -290,12 +290,14 @@ public void setBackOffIncrement(final Duration increment) {
/**
* Returns the maximum number of chunks to queue in internal buffers.
- *
* The consumer internally buffers fetched messages in a set of queues, which are used to
* iterate the stream. This controls the size of these queues.
- *
* Once a queue has been filled, it will block subsequent attempts to fill it until (some of) it
* has been iterated.
+ *
+ * @return the maximum number of chunks to queue in internal buffers
*/
@JsonProperty
public int getQueuedChunks() {
@@ -304,12 +306,14 @@ public int getQueuedChunks() {
/**
* Sets the maximum number of chunks to queue in internal buffers.
- *
* The consumer internally buffers fetched messages in a set of queues, which are used to
* iterate the stream. This controls the size of these queues.
- *
* Once a queue has been filled, it will block subsequent attempts to fill it until (some of) it
* has been iterated.
+ *
+ * @param maxChunks the maximium number of chunks to queue
*/
@JsonProperty
public void setQueuedChunks(final int maxChunks) {
@@ -342,7 +346,7 @@ public void setAutoCommit(final boolean autoCommit) {
}
/**
- * Sets the frequency to automatically commit previously consumed offsets, if enabled.
+ * Gets the frequency to automatically commit previously consumed offsets, if enabled.
*
* @return the frequency to automatically commit the previously consumed offsets, when enabled.
*
@@ -355,9 +359,9 @@ public Duration getAutoCommitInterval() {
/**
- * Returns the frequency to automatically commit previously consumed offsets, if enabled.
- *
- * @return the frequency to automatically commit the previously consumed offsets, when enabled.
+ * Sets the frequency to automatically commit previously consumed offsets, if enabled.
+ *
+ * @param autoCommitInterval the frequency with which to auto commit.
*
* @see #getAutoCommit
*/
@@ -492,7 +496,7 @@ public KafkaConsumerBuilder
* The decoder instance is used to decode {@link Message}s in the stream before being passed to
* the processor.
*
@@ -546,10 +550,10 @@ public KafkaConsumer build(final Environment environment) {
/**
* Builds a {@link KafkaConsumer} instance from the given {@link ExecutorService} and name,
* for the given {@link Environment}.
- *
* The name is used to identify the returned {@link KafkaConsumer} instance, for example, as
* the name of its {@link com.codahale.metrics.health.HealthCheck}s, thread pool, etc.
- *
* This implementation creates a new {@link ExecutorService} with a fixed-size thread-pool,
* configured for one thread per-partition the {@link KafkaConsumer} is being configured to
* consume.
@@ -578,7 +582,7 @@ public KafkaConsumer build(final Environment environment, final String name) {
/**
* Builds a {@link KafkaConsumer} instance from the given {@link ExecutorService} and name,
* for the given {@link Environment}.
- *
* The name is used to identify the returned {@link KafkaConsumer} instance, for example, as
* the name of its {@link com.codahale.metrics.health.HealthCheck}s, etc.
*
@@ -607,7 +611,7 @@ public KafkaConsumer build(final Environment environment,
/**
* Builds a {@link SynchronousConsumer} instance with this builders' configuration using the
* given {@link ExecutorService}.
- *
* If possible, it's always preferable to use one of the overloads that take an {@link
* Environment} directly. This overload exists for situations where you don't have access to
* an {@link Environment} (e.g. some Commands or unit tests).
diff --git a/dropwizard-extra-kafka/src/main/java/com/datasift/dropwizard/kafka/KafkaProducerFactory.java b/dropwizard-extra-kafka/src/main/java/com/datasift/dropwizard/kafka/KafkaProducerFactory.java
index 1ed1c1e..21bc19f 100644
--- a/dropwizard-extra-kafka/src/main/java/com/datasift/dropwizard/kafka/KafkaProducerFactory.java
+++ b/dropwizard-extra-kafka/src/main/java/com/datasift/dropwizard/kafka/KafkaProducerFactory.java
@@ -26,10 +26,10 @@
/**
* Configuration for the Kafka producer.
- *
* By default, the producer will be synchronous, blocking the calling thread until the message has
* been sent.
- *
* To use an asynchronous producer, set {@link KafkaProducerFactory#async} with the desired
* properties.
*/
@@ -250,7 +250,7 @@ public Optional
* This {@link StreamProcessor} is instrumented with {@link Metric}s; specifically, a {@link Timer}
* that tracks the time taken to process each message in the stream.
*
diff --git a/dropwizard-extra-kafka/src/main/java/com/datasift/dropwizard/kafka/consumer/StreamProcessor.java b/dropwizard-extra-kafka/src/main/java/com/datasift/dropwizard/kafka/consumer/StreamProcessor.java
index e79f9c7..c75c262 100644
--- a/dropwizard-extra-kafka/src/main/java/com/datasift/dropwizard/kafka/consumer/StreamProcessor.java
+++ b/dropwizard-extra-kafka/src/main/java/com/datasift/dropwizard/kafka/consumer/StreamProcessor.java
@@ -4,10 +4,10 @@
/**
* Processes an {@link Iterable} of messages of type {@code T}.
- *
* If you wish to process each message individually and iteratively, it's advised that you instead
* use a {@link MessageProcessor}, as it provides a higher-level of abstraction.
- *
* Note: since consumers may use multiple threads, it is important that implementations are
* thread-safe.
*/
diff --git a/dropwizard-extra-kafka/src/main/java/com/datasift/dropwizard/kafka/consumer/SynchronousConsumer.java b/dropwizard-extra-kafka/src/main/java/com/datasift/dropwizard/kafka/consumer/SynchronousConsumer.java
index 67cb32c..b65bcb5 100644
--- a/dropwizard-extra-kafka/src/main/java/com/datasift/dropwizard/kafka/consumer/SynchronousConsumer.java
+++ b/dropwizard-extra-kafka/src/main/java/com/datasift/dropwizard/kafka/consumer/SynchronousConsumer.java
@@ -71,11 +71,17 @@ public void run() {
* Creates a {@link SynchronousConsumer} to process a stream.
*
* @param connector the {@link ConsumerConnector} of the underlying consumer.
- * @param partitions a mapping of the topic -> partitions to consume.
+ * @param partitions a mapping of the topic with the partitions to consume.
* @param keyDecoder a {@link Decoder} for decoding the key of each message before being processed.
* @param valueDecoder a {@link Decoder} for decoding each message before being processed.
* @param processor a {@link StreamProcessor} for processing messages.
* @param executor the {@link ExecutorService} to process the stream with.
+ * @param initialRecoveryDelay the initial recovery delay
+ * @param maxRecoveryDelay the max recovery delay
+ * @param retryResetDelay
+ * @param maxRecoveryAttempts the number of time to attempt recovery
+ * @param shutdownOnFatal booelan stating whether or not to shut down on fatal error
+ * @param startDelay the amount of time to delay at start
*/
public SynchronousConsumer(final ConsumerConnector connector,
final Map
* The consumer will immediately begin consuming from the configured topics using the configured
* {@link Decoder} to decode messages and {@link StreamProcessor} to process the decoded
* messages.
- *
* Each partition will be consumed using a separate thread.
*
* @throws Exception if an error occurs starting the consumer
@@ -152,7 +158,7 @@ public void start() throws Exception {
/**
* Stops this {@link SynchronousConsumer} immediately.
*
- * @throws Exception
+ * @throws Exception if an error occurs on stop
*/
@Override
public void stop() throws Exception {
@@ -201,10 +207,10 @@ public StreamProcessorRunnable(final String topic, final KafkaStream
* If an {@link Exception} is thrown during processing, if it is deemed recoverable,
* the stream will continue to be consumed.
- *
* Unrecoverable {@link Exception}s will cause the consumer to shut down completely.
*/
@Override
diff --git a/dropwizard-extra-kafka/src/main/java/com/datasift/dropwizard/kafka/util/Compression.java b/dropwizard-extra-kafka/src/main/java/com/datasift/dropwizard/kafka/util/Compression.java
index c636599..e302667 100644
--- a/dropwizard-extra-kafka/src/main/java/com/datasift/dropwizard/kafka/util/Compression.java
+++ b/dropwizard-extra-kafka/src/main/java/com/datasift/dropwizard/kafka/util/Compression.java
@@ -9,10 +9,10 @@
/**
* A utility for parsing {@link CompressionCodec}s from a {@link
* io.dropwizard.Configuration}.
- *
* To create {@link Compression} instances, use {@link Compression#parse(String)} to parse an
* instance from a {@link String}.
- *
* This is provided to parse textual specifications of a {@link CompressionCodec}, for example in a
* {@link io.dropwizard.Configuration}.
*/
@@ -22,9 +22,9 @@ public class Compression {
/**
* Creates a {@link Compression} instance for the given codec type.
- *
* The valid codec values are defined by {@link CompressionCodec}.
- *
* To create {@link Compression} instances, use the {@link Compression#parse(String)} factory
* method to parse an instance from a {@link String}.
*
diff --git a/dropwizard-extra-kafka7/pom.xml b/dropwizard-extra-kafka7/pom.xml
index 1c5f584..7e1e53c 100644
--- a/dropwizard-extra-kafka7/pom.xml
+++ b/dropwizard-extra-kafka7/pom.xml
@@ -5,7 +5,7 @@
* The {@link KafkaConsumer} implementation will be determined by the configuration used to create
* it.
- *
* The resultant {@link KafkaConsumer} will have its lifecycle managed by the {@link Environment}
* and will have {@link com.codahale.metrics.health.HealthCheck}s installed to monitor its status.
*/
@@ -41,7 +41,7 @@ public class KafkaConsumerFactory extends KafkaClientFactory {
/**
* A description of the initial offset to consume from a partition when no committed offset
* exists.
- *
*
* Topics not referenced will not be consumed from.
*
* @return a Map of topics to the number of partitions to consume from them.
@@ -161,7 +161,7 @@ public Map
* Topics not referenced will not be consumed from.
*
* @param partitions a Map of topics to the number of partitions to consume from them.
@@ -174,7 +174,7 @@ public void setPartitions(final Map
* When a {@link KafkaConsumer} times out a stream, a {@link
* kafka.consumer.ConsumerTimeoutException} will be thrown by that streams' {@link
* kafka.consumer.ConsumerIterator}.
@@ -193,7 +193,7 @@ public Duration getTimeout() {
/**
* Sets the time the {@link KafkaConsumer} should wait to receive messages before timing out
* the stream.
- *
* When a {@link KafkaConsumer} times out a stream, a {@link
* kafka.consumer.ConsumerTimeoutException} will be thrown by that streams' {@link
* kafka.consumer.ConsumerIterator}.
@@ -229,7 +229,7 @@ public void setReceiveBufferSize(final Size size) {
/**
* Returns the maximum size of a batch of messages to fetch in a single request.
- *
* This dictates the maximum size of a message that may be received by the {@link
* KafkaConsumer}. Messages larger than this size will cause a {@link
* kafka.common.InvalidMessageSizeException} to be thrown during iteration of the stream.
@@ -245,7 +245,7 @@ public Size getFetchSize() {
/**
* Sets the maximum size of a batch of messages to fetch in a single request.
- *
* This dictates the maximum size of a message that may be received by the {@link
* KafkaConsumer}. Messages larger than this size will cause a {@link
* kafka.common.InvalidMessageSizeException} to be thrown during iteration of the stream.
@@ -261,7 +261,7 @@ public void setFetchSize(final Size size) {
/**
* Returns the cumulative delay before polling a broker again when no data is returned.
- *
* When fetching data from a broker, if there is no new data, there will be a delay before
* polling the broker again. This controls the duration of the delay by increasing it linearly,
* on each poll attempt.
@@ -275,7 +275,7 @@ public Duration getBackOffIncrement() {
/**
* Sets the cumulative delay before polling a broker again when no data is returned.
- *
* When fetching data from a broker, if there is no new data, there will be a delay before
* polling the broker again. This controls the duration of the delay by increasing it linearly,
* on each poll attempt.
@@ -289,10 +289,10 @@ public void setBackOffIncrement(final Duration increment) {
/**
* Returns the maximum number of chunks to queue in internal buffers.
- *
* The consumer internally buffers fetched messages in a set of queues, which are used to
* iterate the stream. This controls the size of these queues.
- *
* Once a queue has been filled, it will block subsequent attempts to fill it until (some of) it
* has been iterated.
*/
@@ -303,10 +303,10 @@ public int getQueuedChunks() {
/**
* Sets the maximum number of chunks to queue in internal buffers.
- *
* The consumer internally buffers fetched messages in a set of queues, which are used to
* iterate the stream. This controls the size of these queues.
- *
* Once a queue has been filled, it will block subsequent attempts to fill it until (some of) it
* has been iterated.
*/
@@ -491,7 +491,7 @@ public KafkaConsumerBuilder
* The decoder instance is used to decode {@link Message}s in the stream before being passed to
* the processor.
*
@@ -536,10 +536,10 @@ public KafkaConsumer build(final Environment environment) {
/**
* Builds a {@link KafkaConsumer} instance from the given {@link ExecutorService} and name,
* for the given {@link Environment}.
- *
* The name is used to identify the returned {@link KafkaConsumer} instance, for example, as
* the name of its {@link com.codahale.metrics.health.HealthCheck}s, thread pool, etc.
- *
* This implementation creates a new {@link ExecutorService} with a fixed-size thread-pool,
* configured for one thread per-partition the {@link KafkaConsumer} is being configured to
* consume.
@@ -568,7 +568,7 @@ public KafkaConsumer build(final Environment environment, final String name) {
/**
* Builds a {@link KafkaConsumer} instance from the given {@link ExecutorService} and name,
* for the given {@link Environment}.
- *
* The name is used to identify the returned {@link KafkaConsumer} instance, for example, as
* the name of its {@link com.codahale.metrics.health.HealthCheck}s, etc.
*
@@ -597,7 +597,7 @@ public KafkaConsumer build(final Environment environment,
/**
* Builds a {@link SynchronousConsumer} instance with this builders' configuration using the
* given {@link ExecutorService}.
- *
* If possible, it's always preferable to use one of the overloads that take an {@link
* Environment} directly. This overload exists for situations where you don't have access to
* an {@link Environment} (e.g. some Commands or unit tests).
diff --git a/dropwizard-extra-kafka7/src/main/java/com/datasift/dropwizard/kafka/consumer/MessageProcessor.java b/dropwizard-extra-kafka7/src/main/java/com/datasift/dropwizard/kafka/consumer/MessageProcessor.java
index 9b48b71..fbb60ed 100644
--- a/dropwizard-extra-kafka7/src/main/java/com/datasift/dropwizard/kafka/consumer/MessageProcessor.java
+++ b/dropwizard-extra-kafka7/src/main/java/com/datasift/dropwizard/kafka/consumer/MessageProcessor.java
@@ -6,7 +6,7 @@
/**
* Processes messages of type {@code T} from a Kafka message stream.
- *
* This {@link StreamProcessor} is instrumented with {@link Metric}s; specifically, a {@link Timer}
* that tracks the time taken to process each message in the stream.
*
diff --git a/dropwizard-extra-kafka7/src/main/java/com/datasift/dropwizard/kafka/consumer/StreamProcessor.java b/dropwizard-extra-kafka7/src/main/java/com/datasift/dropwizard/kafka/consumer/StreamProcessor.java
index 06698cd..b618eb2 100644
--- a/dropwizard-extra-kafka7/src/main/java/com/datasift/dropwizard/kafka/consumer/StreamProcessor.java
+++ b/dropwizard-extra-kafka7/src/main/java/com/datasift/dropwizard/kafka/consumer/StreamProcessor.java
@@ -4,10 +4,10 @@
/**
* Processes an {@link Iterable} of messages of type {@code T}.
- *
* If you wish to process each message individually and iteratively, it's advised that you instead
* use a {@link MessageProcessor}, as it provides a higher-level of abstraction.
- *
* Note: since consumers may use multiple threads, it is important that implementations are
* thread-safe.
*/
diff --git a/dropwizard-extra-kafka7/src/main/java/com/datasift/dropwizard/kafka/consumer/SynchronousConsumer.java b/dropwizard-extra-kafka7/src/main/java/com/datasift/dropwizard/kafka/consumer/SynchronousConsumer.java
index 45b9e59..c4081b7 100644
--- a/dropwizard-extra-kafka7/src/main/java/com/datasift/dropwizard/kafka/consumer/SynchronousConsumer.java
+++ b/dropwizard-extra-kafka7/src/main/java/com/datasift/dropwizard/kafka/consumer/SynchronousConsumer.java
@@ -116,11 +116,11 @@ public void serverStarted(final Server server) {
/**
* Starts this {@link SynchronousConsumer} immediately.
- *
* The consumer will immediately begin consuming from the configured topics using the configured
* {@link Decoder} to decode messages and {@link StreamProcessor} to process the decoded
* messages.
- *
* Each partition will be consumed using a separate thread.
*
* @throws Exception if an error occurs starting the consumer
@@ -197,10 +197,10 @@ public StreamProcessorRunnable(final String topic, final KafkaStream
* If an {@link Exception} is thrown during processing, if it is deemed recoverable,
* the stream will continue to be consumed.
- *
* Unrecoverable {@link Exception}s will cause the consumer to shut down completely.
*/
@Override
diff --git a/dropwizard-extra-kafka7/src/main/java/com/datasift/dropwizard/kafka/util/Compression.java b/dropwizard-extra-kafka7/src/main/java/com/datasift/dropwizard/kafka/util/Compression.java
index c636599..e302667 100644
--- a/dropwizard-extra-kafka7/src/main/java/com/datasift/dropwizard/kafka/util/Compression.java
+++ b/dropwizard-extra-kafka7/src/main/java/com/datasift/dropwizard/kafka/util/Compression.java
@@ -9,10 +9,10 @@
/**
* A utility for parsing {@link CompressionCodec}s from a {@link
* io.dropwizard.Configuration}.
- *
* To create {@link Compression} instances, use {@link Compression#parse(String)} to parse an
* instance from a {@link String}.
- *
* This is provided to parse textual specifications of a {@link CompressionCodec}, for example in a
* {@link io.dropwizard.Configuration}.
*/
@@ -22,9 +22,9 @@ public class Compression {
/**
* Creates a {@link Compression} instance for the given codec type.
- *
* The valid codec values are defined by {@link CompressionCodec}.
- *
* To create {@link Compression} instances, use the {@link Compression#parse(String)} factory
* method to parse an instance from a {@link String}.
*
diff --git a/dropwizard-extra-util/pom.xml b/dropwizard-extra-util/pom.xml
index 1443075..4753f87 100644
--- a/dropwizard-extra-util/pom.xml
+++ b/dropwizard-extra-util/pom.xml
@@ -5,7 +5,7 @@
* Use this as a basis for {@link HealthCheck}s for remote services, such as databases or
* web-services.
*/
@@ -48,7 +48,7 @@ public SocketHealthCheck(final String hostname, final int port) {
/**
* Generates a String representation of the remote socket being checked.
- *
* This will be the socket address formatted as: hostname:port
*
* @return the String representation of the remote socket being checked.
@@ -91,7 +91,7 @@ protected Socket createSocket(final String hostname, final int port) throws IOEx
/**
* Perform a check of a {@link Socket}.
- *
* Implementations can assume that the {@link Socket} is already connected.
*
* @param socket the {@link Socket} to check the health of
@@ -99,8 +99,6 @@ protected Socket createSocket(final String hostname, final int port) throws IOEx
* @return if the component is healthy, a healthy {@link Result}; otherwise, an unhealthy {@link
* Result} with a description of the error or exception
*
- * @throws Exception if there is an unhandled error during the health check; this will result in
- * a failed health check
*/
protected abstract Result check(Socket socket);
}
diff --git a/dropwizard-extra-util/src/main/java/com/datasift/dropwizard/util/Classes.java b/dropwizard-extra-util/src/main/java/com/datasift/dropwizard/util/Classes.java
index 4495433..00da3a1 100644
--- a/dropwizard-extra-util/src/main/java/com/datasift/dropwizard/util/Classes.java
+++ b/dropwizard-extra-util/src/main/java/com/datasift/dropwizard/util/Classes.java
@@ -12,7 +12,7 @@ public class Classes {
/**
* Creates a new instance of the given {@link Class}, using the given arguments.
- *
* A new instance object of the given {@link Class} is created, using reflection,
* providing the given arguments to the constructor.
*
@@ -45,8 +45,8 @@ public static
+ * Given an object of type
* A new instance object of the given {@link Class} is created, using reflection,
* providing the given arguments to the constructor.
- *
* The visibility of the {@link Constructor} defined by the arguments is ignored and a new
* instance created irrespective of the defined visibility. This is potentially dangerous,
* as the API likely makes no guarantee as to the behaviour when instantiating from a non-public
@@ -124,10 +124,10 @@ public static
+ * Given an object of type
* The visibility of the {@link Constructor} defined by the arguments is ignored and a new
* instance created irrespective of the defined visibility. This is potentially dangerous,
* as the API likely makes no guarantee as to the behaviour when instantiating from a non-public
@@ -161,11 +161,11 @@ public static
* The resulting array of {@link Class} objects that are ordered in parallel with the argument
* list that produced it. This is especially useful for getting a {@link Constructor} for a
* given set of arguments:
- *
*
* Sometimes, you want to pass a single array-typed argument to a method that accepts
* variable arguments. In these situations, that array will be unwrapped in to a list of
* multiple arguments, instead of a single argument that is an array.
- *
* Example:
*
* Resolving will differentiate a variable argument list from a single argument of the
* following types:
*
* If the types of the given arguments are not an exact match for any declared {@link
* Constructor}s, a Constructor that will accept the arguments (e.g. because they are
* sub-types of the parameters) will be searched for.
- *
* If the given {@link Class} has no {@link Constructor} that is applicable to the given
* arguments, a {@link NoSuchMethodException} will be thrown.
- *
* No guarantees are made about the visibility of the {@link Constructor} returned; it may not
* be accessible from the calling scope.
*
@@ -254,14 +254,14 @@ public static
* If the types of the given arguments are not an exact match for any declared {@link Method}s,
* a {@link Method} that will accept the arguments (e.g. because they are sub-types of the
* parameters) will be searched for.
- *
* If the given {@link Class} has no {@link Method} with the given name that is applicable to
* the given arguments, a {@link NoSuchMethodException} will be thrown.
- *
* No guarantees are made about the visibility of the {@link Method} returned; it may not be
* accessible from the calling scope. Similarly, the {@link Method} may be static or may be an
* instance method.
@@ -291,7 +291,7 @@ public static Method getApplicableMethod(final Class clazz,
/**
* Determines if the {@link Class}s for the given target types can be assigned to from the
* types represented by the given source {@link Class}s.
- *
* Each target type will be matched up with the corresponding source type. If the length of the
* two arrays differ, they are considered not assignable.
*
@@ -324,7 +324,7 @@ public static boolean isAssignableFrom(final Class[] targets, final Class[] sour
/***
* Determines if the given source {@link Class} represents a type that can be assigned to the
* given target {@link Class}.
- *
* If the source {@link Class} represents null (via {@link Null}), it can be assigned to any
* {@link Class} that represents a reference type.
*
@@ -345,7 +345,7 @@ public static boolean isAssignableFrom(final Class> target, final Class> sou
/**
* A placeholder Null type.
- *
* This Null type provides a means to get a {@link Class} for null values. This should never be
* used except for special-casing nulls.
*/
diff --git a/dropwizard-extra-util/src/main/java/com/datasift/dropwizard/util/Exceptions.java b/dropwizard-extra-util/src/main/java/com/datasift/dropwizard/util/Exceptions.java
index f8ca8a1..02e2be7 100644
--- a/dropwizard-extra-util/src/main/java/com/datasift/dropwizard/util/Exceptions.java
+++ b/dropwizard-extra-util/src/main/java/com/datasift/dropwizard/util/Exceptions.java
@@ -7,7 +7,7 @@ public class Exceptions {
/**
* Creates a new {@link Exception} instance from the given {@link Class}, using the given args.
- *
* A new {@link Exception} instance object of the given {@link Class} is created, using
* reflection, providing the given arguments to the constructor.
*
@@ -28,7 +28,7 @@ public static
* A new {@link Exception} instance object of the given {@link Class} is created, using
* reflection, providing the given arguments to the constructor.
*
@@ -50,10 +50,10 @@ public static
* A new {@link Exception} instance object of the given {@link Class} is created, using
* reflection, providing the given arguments to the constructor.
- *
* The visibility of the constructor defined by the arguments is ignored and a new instance
* created irrespective of the defined visibility. This is potentially dangerous, as the API
* likely makes no guarantee as to the behaviour when instantiating from a non-public
@@ -77,10 +77,10 @@ public static
* A new {@link Exception} instance object of the given {@link Class} is created, using
* reflection, providing the given arguments to the constructor.
- *
* The visibility of the constructor defined by the arguments is ignored and a new instance
* created irrespective of the defined visibility. This is potentially dangerous, as the API
* likely makes no guarantee as to the behaviour when instantiating from a non-public
diff --git a/dropwizard-extra-util/src/main/java/com/datasift/dropwizard/util/Primitives.java b/dropwizard-extra-util/src/main/java/com/datasift/dropwizard/util/Primitives.java
index 5c42485..fa525b0 100644
--- a/dropwizard-extra-util/src/main/java/com/datasift/dropwizard/util/Primitives.java
+++ b/dropwizard-extra-util/src/main/java/com/datasift/dropwizard/util/Primitives.java
@@ -10,18 +10,18 @@
/**
* Utilities for working with primitive types and reflection.
- *
* Terminology:
- *
* Non-reference primitive types (e.g.
* Reference primitive types (e.g. {@link Integer}, {@link Double}, {@link Void}) are considered
* boxed primitives.
- *
* Conversion between native primitves and boxed primitives can be done with {@link
* Primitives#box(Class)} and {@link Primitives#unbox(Class)}.
- *
* Whenever possible, boxing/unboxing will be implicit and transparent, with a preference for
* native primitive types.
*/
@@ -89,7 +89,7 @@ public static Class unbox(final Class clazz) {
/**
* Determines whether the objects of the given source {@link Class} can be assigned to the
* primitive type of the given target {@link Class}.
- *
* If either type is a boxed-primitive, it will be unboxed automatically; all comparisons
* will be of the native primitive types.
*
@@ -124,7 +124,7 @@ public static boolean isAssignableFrom(final Class target, final Class source) {
/**
* Determines whether the given {@link Class} is for a primitive type; either native or boxed.
- *
* Both boxed and native primitive types are considered "primitives". Example:
*
* Only boxed primitive types are accepted. Example:
*
* Only native primitive types are accepted. Example:
*
* A {@link ZooKeeperHealthCheck} will be registered for each {@link ZooKeeper} client instance that
* checks for the existence of the configured {@link #namespace}.
*
@@ -63,7 +63,7 @@ public void setScheme(final String scheme) {
/**
* Returns the authorization id to use.
- *
* This is dependent on the authorization {@link #getScheme() scheme} being used.
*
* @return the scheme-specific authorization id.
@@ -77,7 +77,7 @@ public String getId() {
/**
* Sets the authorization id to use.
- *
* This is dependent on the authorization {@link #getScheme() scheme} being used.
*
* @param id the scheme-specific authorization id.
@@ -218,7 +218,7 @@ public void setSessionTimeout(final Duration timeout) {
/**
* Returns the namespace to prepend to all paths accessed by the ZooKeeper client.
- *
* Since ZooKeeper is a shared space, this is a useful way to localise a service to a namespace.
*
* @return the namespace to prepend to all paths accessed by the ZooKeeper client.
@@ -230,7 +230,7 @@ public String getNamespace() {
/**
* Sets the namespace to prepend to all paths accessed by the ZooKeeper client.
- *
* Since ZooKeeper is a shared space, this is a useful way to localise a service to a namespace.
*
* @param namespace the namespace to prepend to all paths accessed by the ZooKeeper client.
@@ -242,7 +242,7 @@ public void setNamespace(final String namespace) {
/**
* Returns whether or not this client can connect to read-only ZooKeeper instances.
- *
* During a network partition, some or all nodes in the quorum may be in a read-only state. This
* controls whether the client may enter read-only mode during a network partition.
*
@@ -255,7 +255,7 @@ public boolean isReadOnly() {
/**
* Sets whether or not this client can connect to read-only ZooKeeper instances.
- *
* During a network partition, some or all nodes in the quorum may be in a read-only state. This
* controls whether the client may enter read-only mode during a network partition.
*
@@ -268,7 +268,7 @@ public void isReadOnly(final boolean readOnly) {
/**
* Retrieves a formatted specification of the ZooKeeper quorum..
- *
* The specification is formatted as: host1:port,host2:port[,hostN:port]
*
* @return a specification of the ZooKeeper quorum, formatted as a String
@@ -285,7 +285,7 @@ public String getQuorumSpec() {
/**
* Validates that the ZooKeeper client namespace is a valid ZNode.
- *
* Note: this validation doesn't ensure that the ZNode exists, just that it is valid.
*
* @return true if the namespace is a valid ZNode; false if it is not.
@@ -302,7 +302,7 @@ public boolean isNamespaceValid() {
/**
* Builds a default {@link ZooKeeper} instance..
- *
* No {@link Watcher} will be configured for the built {@link ZooKeeper} instance. If you wish
* to watch all events on the {@link ZooKeeper} client, use {@link #build(Environment, Watcher)}.
*
@@ -319,7 +319,7 @@ public ZooKeeper build(final Environment environment) throws IOException {
/**
* Builds a default {@link ZooKeeper} instance.
- *
* The given {@link Watcher} will be assigned to watch for all events on the {@link ZooKeeper}
* client instance. If you wish to ignore events, use {@link #build(Environment)}.
*
@@ -338,7 +338,7 @@ public ZooKeeper build(final Environment environment, final Watcher watcher)
/**
* Builds a named {@link ZooKeeper} instance.
- *
* No {@link Watcher} will be configured for the built {@link ZooKeeper} instance. If you wish
* to watch all events on the {@link ZooKeeper} client, use {@link
* #build(Environment, Watcher, String)}.
@@ -358,7 +358,7 @@ public ZooKeeper build(final Environment environment, final String name)
/**
* Builds a named {@link ZooKeeper} instance.
- *
* The given {@link Watcher} will be assigned to watch for all events on the {@link ZooKeeper}
* client instance. If you wish to ignore events, use {@link #build(Environment, String)}.
*
diff --git a/dropwizard-extra-zookeeper/src/main/java/com/datasift/dropwizard/zookeeper/health/ZooKeeperHealthCheck.java b/dropwizard-extra-zookeeper/src/main/java/com/datasift/dropwizard/zookeeper/health/ZooKeeperHealthCheck.java
index 488a720..baffbb7 100644
--- a/dropwizard-extra-zookeeper/src/main/java/com/datasift/dropwizard/zookeeper/health/ZooKeeperHealthCheck.java
+++ b/dropwizard-extra-zookeeper/src/main/java/com/datasift/dropwizard/zookeeper/health/ZooKeeperHealthCheck.java
@@ -5,7 +5,7 @@
/**
* A {@link HealthCheck} for a ZooKeeper ensemble.
- *
* Checks that:
*
*
*
T
, a new instance of {@link ClassT
, a new instance of {@link Class} will be created,
* passing the given args to the constructor.
*
* @param template an object that provides the {@link Class} to instantiate.
@@ -80,10 +80,10 @@ public static T
, a new instance of {@link ClassT
, a new instance of {@link Class} will be created,
* passing the given args to the constructor.
- *
* clazz.getConstructor(Classes.of("abc", 123));
*
@@ -184,16 +184,16 @@ public static Class[] of(final Object... arguments) {
/**
* Ensures a variable argument list has been properly passed.
- *
* Classes.of(TableNotFoundException.class, tableName.getBytes());
*
- *
@@ -217,14 +217,14 @@ public static Object[] resolveVarArgs(final Object... args) {
/**
* Gets a {@link Constructor} from the given {@link Class} that is applicable to the given
* arguments.
- *
+ *
int
, double
, void
)
* are considered native primitives, or sometimes, unboxed primitives.
- *
* Primitives.isPrimitive(int.class) == true;
@@ -143,7 +143,7 @@ public static boolean isPrimitive(final Class clazz) {
/**
* Determines whether the given {@link Class} is for a boxed primitive type.
- *
+ *
* Primitives.isPrimitive(int.class) == false;
@@ -162,7 +162,7 @@ public static boolean isBoxedPrimitive(final Class clazz) {
/**
* Determines whether the given {@link Class} is for a native primitive type.
- *
+ *
* Primitives.isPrimitive(int.class) == true;
diff --git a/dropwizard-extra-zookeeper/pom.xml b/dropwizard-extra-zookeeper/pom.xml
index 9fd24a3..bfa1729 100644
--- a/dropwizard-extra-zookeeper/pom.xml
+++ b/dropwizard-extra-zookeeper/pom.xml
@@ -5,7 +5,7 @@
*