diff --git a/dropwizard-extra-curator/pom.xml b/dropwizard-extra-curator/pom.xml index b43c124..dbadc72 100644 --- a/dropwizard-extra-curator/pom.xml +++ b/dropwizard-extra-curator/pom.xml @@ -5,7 +5,7 @@ com.datasift.dropwizard dropwizard-extra - 0.7.1-2-SNAPSHOT + 0.9.1-1-SNAPSHOT ../pom.xml diff --git a/dropwizard-extra-curator/src/main/java/com/datasift/dropwizard/curator/CuratorFactory.java b/dropwizard-extra-curator/src/main/java/com/datasift/dropwizard/curator/CuratorFactory.java index c0ef172..b7c3bf5 100644 --- a/dropwizard-extra-curator/src/main/java/com/datasift/dropwizard/curator/CuratorFactory.java +++ b/dropwizard-extra-curator/src/main/java/com/datasift/dropwizard/curator/CuratorFactory.java @@ -21,7 +21,7 @@ /** * A factory for creating and managing {@link CuratorFramework} instances. - *

+ *

* The resulting {@link CuratorFramework} will have its lifecycle managed by the {@link Environment} * and will have {@link com.codahale.metrics.health.HealthCheck}s installed for the underlying ZooKeeper * ensemble. @@ -118,7 +118,7 @@ public void setMaxRetries(final int maxRetries) { /** * Returns the initial time to wait before retrying a failed connection. - *

+ *

* Subsequent retries will wait an exponential amount of time more than this. * * @return the initial time to wait before trying to connect again. @@ -130,7 +130,7 @@ public Duration getBackOffBaseTime() { /** * Sets the initial time to wait before retrying a failed connection. - *

+ *

* Subsequent retries will wait an exponential amount of time more than this. * * @param backOffBaseTime the initial time to wait before trying to connect again. @@ -142,7 +142,7 @@ public void setBackOffBaseTime(final Duration backOffBaseTime) { /** * Returns a {@link RetryPolicy} for handling failed connection attempts. - *

+ *

* Always configures an {@link ExponentialBackoffRetry} based on the {@link #getMaxRetries() * maximum retries} and {@link #getBackOffBaseTime() initial back-off} configured. * diff --git a/dropwizard-extra-curator/src/main/java/com/datasift/dropwizard/curator/ensemble/DropwizardConfiguredZooKeeperFactory.java b/dropwizard-extra-curator/src/main/java/com/datasift/dropwizard/curator/ensemble/DropwizardConfiguredZooKeeperFactory.java index 1ed5879..75bc28f 100644 --- a/dropwizard-extra-curator/src/main/java/com/datasift/dropwizard/curator/ensemble/DropwizardConfiguredZooKeeperFactory.java +++ b/dropwizard-extra-curator/src/main/java/com/datasift/dropwizard/curator/ensemble/DropwizardConfiguredZooKeeperFactory.java @@ -12,7 +12,7 @@ /** * Provides integration for Dropwizard's ZooKeeper functionality with Curator. - *

+ *

* This ensures that {@link ZooKeeper} instances created by Curator integrate properly with the * Dropwizard application life-cycle. */ diff --git a/dropwizard-extra-hbase/pom.xml b/dropwizard-extra-hbase/pom.xml index bd92ee4..c39a96d 100644 --- a/dropwizard-extra-hbase/pom.xml +++ b/dropwizard-extra-hbase/pom.xml @@ -5,7 +5,7 @@ com.datasift.dropwizard dropwizard-extra - 0.7.1-2-SNAPSHOT + 0.9.1-1-SNAPSHOT ../pom.xml @@ -30,7 +30,7 @@ org.hbase asynchbase - 1.4.1 + 1.7.0 diff --git a/dropwizard-extra-hbase/src/main/java/com/datasift/dropwizard/hbase/BoundedHBaseClient.java b/dropwizard-extra-hbase/src/main/java/com/datasift/dropwizard/hbase/BoundedHBaseClient.java index c4c989b..6102e20 100644 --- a/dropwizard-extra-hbase/src/main/java/com/datasift/dropwizard/hbase/BoundedHBaseClient.java +++ b/dropwizard-extra-hbase/src/main/java/com/datasift/dropwizard/hbase/BoundedHBaseClient.java @@ -14,17 +14,17 @@ /** * An {@link HBaseClient} that constrains the maximum number of concurrent asynchronous requests. - *

+ *

* This client places an upper-bounds on the number of concurrent asynchronous requests awaiting * completion. When this limit is reached, subsequent requests will block until an existing request * completes. - *

+ *

* This behaviour is particularly useful for throttling high-throughput applications where HBase is * the bottle-neck. Without backing-off, such an application may run out of memory. By constraining * the maximum number of requests to a sufficiently high limit, but low enough so that it can be * reached without running out of memory, such applications can organically throttle and back-off * their requests. - *

+ *

* Book-keeping of in-flight requests is done using a {@link Semaphore} which is configured as * "non-fair" to reduce its impact on request throughput. */ @@ -55,7 +55,7 @@ public BoundedHBaseClient(final HBaseClient client, final int maxRequests) { /** * Create a new instance with the given semaphore for the given underlying {@link HBaseClient} * implementation. - *

+ *

* Note: this is only really useful for sharing a {@link Semaphore} between two {@link * BoundedHBaseClient} instances, which only really makes sense for instances configured for * the same cluster, but with different client-side settings. Use with caution!! diff --git a/dropwizard-extra-hbase/src/main/java/com/datasift/dropwizard/hbase/HBaseClient.java b/dropwizard-extra-hbase/src/main/java/com/datasift/dropwizard/hbase/HBaseClient.java index 21ac6d5..ae3a645 100644 --- a/dropwizard-extra-hbase/src/main/java/com/datasift/dropwizard/hbase/HBaseClient.java +++ b/dropwizard-extra-hbase/src/main/java/com/datasift/dropwizard/hbase/HBaseClient.java @@ -11,9 +11,9 @@ /** * Client for interacting with an HBase cluster. - *

+ *

* To create an instance, use {@link HBaseClientFactory}. - *

+ *

* All implementations are wrapper proxies around {@link org.hbase.async.HBaseClient} providing * additional functionality. * @@ -172,6 +172,7 @@ public interface HBaseClient { * Ensures that a specific table exists. * * @param table the table to check. + * @param family the family to check. * * @return a {@link Deferred} indicating the completion of the assertion. * @@ -186,6 +187,7 @@ public interface HBaseClient { * Ensures that a specific table exists. * * @param table the table to check. + * @param family the family to check. * * @return a {@link Deferred} indicating the completion of the assertion. * diff --git a/dropwizard-extra-hbase/src/main/java/com/datasift/dropwizard/hbase/HBaseClientFactory.java b/dropwizard-extra-hbase/src/main/java/com/datasift/dropwizard/hbase/HBaseClientFactory.java index 228bda5..6546e70 100644 --- a/dropwizard-extra-hbase/src/main/java/com/datasift/dropwizard/hbase/HBaseClientFactory.java +++ b/dropwizard-extra-hbase/src/main/java/com/datasift/dropwizard/hbase/HBaseClientFactory.java @@ -13,10 +13,9 @@ /** * A factory for creating and managing {@link HBaseClient} instances. - *

+ *

* The resulting {@link HBaseClient} will have its lifecycle managed by an {@link Environment} and - * will have {@link com.codahale.metrics.health.HealthCheck}s installed for the {@code .META.} and - * {@code -ROOT-} tables. + * will have {@link com.codahale.metrics.health.HealthCheck}s installed for the {@code hbase:meta}. * * @see HBaseClient */ @@ -90,7 +89,7 @@ public void setFlushInterval(final Duration flushInterval) { /** * Returns the maximum size of the buffer for increment operations. - *

+ *

* Once this buffer is full, a flush is forced irrespective of the {@link #getFlushInterval() * flushInterval}. * @@ -105,7 +104,7 @@ public Size getIncrementBufferSize() { /** * Sets the maximum size of the buffer for increment operations. - *

+ *

* Once this buffer is full, a flush is forced irrespective of the {@link #getFlushInterval() * flushInterval}. * @@ -120,10 +119,10 @@ public void setIncrementBufferSize(final Size incrementBufferSize) { /** * Returns maximum number of concurrent asynchronous requests for the client. - *

+ *

* Useful for throttling high-throughput applications when HBase is the bottle-neck to prevent * the client running out of memory. - *

+ *

* With this is zero ("0"), no limit will be placed on the number of concurrent asynchronous * requests. * @@ -138,10 +137,10 @@ public int getMaxConcurrentRequests() { /** * Sets the maximum number of concurrent asynchronous requests for the client. - *

+ *

* Useful for throttling high-throughput applications when HBase is the bottle-neck to prevent * the client running out of memory. - *

+ *

* With this is zero ("0"), no limit will be placed on the number of concurrent asynchronous * requests. * @@ -228,9 +227,8 @@ public HBaseClient build(final Environment environment, final String name) { client.setFlushInterval(getFlushInterval()); client.setIncrementBufferSize(getIncrementBufferSize()); - // add healthchecks for META and ROOT tables - environment.healthChecks().register(name + "-meta", new HBaseHealthCheck(client, ".META.")); - environment.healthChecks().register(name + "-root", new HBaseHealthCheck(client, "-ROOT-")); + // add healthchecks for hbase:meta table + environment.healthChecks().register(name + "-meta", new HBaseHealthCheck(client, "hbase:meta")); // manage client environment.lifecycle().manage(new ManagedHBaseClient( @@ -241,11 +239,11 @@ public HBaseClient build(final Environment environment, final String name) { /** * Builds a new {@link HBaseClient} according to the given {@link HBaseClientFactory}. - *

+ *

* If instrumentation {@link #instrumented is enabled} in the * configuration, this will build an {@link InstrumentedHBaseClient} wrapping the given {@link * HBaseClient}. - *

+ *

* If instrumentation is not enabled, the given {@link HBaseClient} will be returned verbatim. * * @param client an underlying {@link HBaseClient} implementation. @@ -263,10 +261,10 @@ private HBaseClient instrument(final HBaseClient client, /** * Builds a new {@link HBaseClient} according to the given {@link HBaseClientFactory}. - *

+ *

* If the {@link #maxConcurrentRequests} is non-zero in the * configuration, this will build a {@link BoundedHBaseClient} that wraps the given client. - *

+ *

* If {@link #maxConcurrentRequests} is zero, the given {@link * HBaseClient} will be returned verbatim. * diff --git a/dropwizard-extra-hbase/src/main/java/com/datasift/dropwizard/hbase/InstrumentedHBaseClient.java b/dropwizard-extra-hbase/src/main/java/com/datasift/dropwizard/hbase/InstrumentedHBaseClient.java index 28cf888..0159e30 100644 --- a/dropwizard-extra-hbase/src/main/java/com/datasift/dropwizard/hbase/InstrumentedHBaseClient.java +++ b/dropwizard-extra-hbase/src/main/java/com/datasift/dropwizard/hbase/InstrumentedHBaseClient.java @@ -16,9 +16,9 @@ /** * An {@link HBaseClient} that is instrumented with {@link Metric}s. - *

+ *

* For each asynchronous request method, a {@link Timer} tracks the time taken for the request. - *

+ *

* This implementation proxies all requests through an underlying {@link HBaseClient}; it merely * layers instrumentation on top of the underlying {@link HBaseClient}. * @@ -38,9 +38,9 @@ public class InstrumentedHBaseClient implements HBaseClient { /** * Creates a new {@link InstrumentedHBaseClient} for the given underlying client. - *

+ *

* Instrumentation will be registered with the given {@link MetricRegistry}. - *

+ *

* A new {@link HBaseInstrumentation} container will be created for this {@link HBaseClient} * with the given {@link MetricRegistry}. * diff --git a/dropwizard-extra-hbase/src/main/java/com/datasift/dropwizard/hbase/ManagedHBaseClient.java b/dropwizard-extra-hbase/src/main/java/com/datasift/dropwizard/hbase/ManagedHBaseClient.java index 672c632..e1583e2 100644 --- a/dropwizard-extra-hbase/src/main/java/com/datasift/dropwizard/hbase/ManagedHBaseClient.java +++ b/dropwizard-extra-hbase/src/main/java/com/datasift/dropwizard/hbase/ManagedHBaseClient.java @@ -29,11 +29,11 @@ public ManagedHBaseClient(final HBaseClient client, final Duration connectionTim * To force the connection, we look for the prescence of the .META. table. * * @throws com.stumbleupon.async.TimeoutException if there is a problem connecting to HBase. - * @throws org.hbase.async.TableNotFoundException if the .META. table can't be found. - * @throws Exception if there is a problem verifying the .META. table exists. + * @throws org.hbase.async.TableNotFoundException if the hbase:meta table can't be found. + * @throws Exception if there is a problem verifying the hbase:meta table exists. */ public void start() throws Exception { - client.ensureTableExists(".META.").joinUninterruptibly(connectionTimeout.toMilliseconds()); + client.ensureTableExists("hbase:meta").joinUninterruptibly(connectionTimeout.toMilliseconds()); } /** diff --git a/dropwizard-extra-hbase/src/main/java/com/datasift/dropwizard/hbase/metrics/HBaseInstrumentation.java b/dropwizard-extra-hbase/src/main/java/com/datasift/dropwizard/hbase/metrics/HBaseInstrumentation.java index 2249380..3856293 100644 --- a/dropwizard-extra-hbase/src/main/java/com/datasift/dropwizard/hbase/metrics/HBaseInstrumentation.java +++ b/dropwizard-extra-hbase/src/main/java/com/datasift/dropwizard/hbase/metrics/HBaseInstrumentation.java @@ -30,7 +30,7 @@ public class HBaseInstrumentation { /** * Initialises instrumentation for the given {@link HBaseClient} using the given {@link - * MetricsRegistry}. + * com.codahale.metrics.MetricRegistry}. * * @param client the client to create metrics for. * @param registry the registry to register the metrics with. diff --git a/dropwizard-extra-hbase/src/main/java/com/datasift/dropwizard/hbase/scanner/BoundedRowScanner.java b/dropwizard-extra-hbase/src/main/java/com/datasift/dropwizard/hbase/scanner/BoundedRowScanner.java index f200266..1347330 100644 --- a/dropwizard-extra-hbase/src/main/java/com/datasift/dropwizard/hbase/scanner/BoundedRowScanner.java +++ b/dropwizard-extra-hbase/src/main/java/com/datasift/dropwizard/hbase/scanner/BoundedRowScanner.java @@ -11,7 +11,7 @@ /** * A Scanner that constraints concurrent requests with a {@link Semaphore}. - *

+ *

* To obtain an instance of a {@link RowScanner}, call {@link BoundedHBaseClient#scan(byte[])}. */ public class BoundedRowScanner implements RowScanner { diff --git a/dropwizard-extra-hbase/src/main/java/com/datasift/dropwizard/hbase/scanner/InstrumentedRowScanner.java b/dropwizard-extra-hbase/src/main/java/com/datasift/dropwizard/hbase/scanner/InstrumentedRowScanner.java index 13341e9..760bc58 100644 --- a/dropwizard-extra-hbase/src/main/java/com/datasift/dropwizard/hbase/scanner/InstrumentedRowScanner.java +++ b/dropwizard-extra-hbase/src/main/java/com/datasift/dropwizard/hbase/scanner/InstrumentedRowScanner.java @@ -14,7 +14,7 @@ /** * A {@link RowScanner} that is instrumented with {@link Metric}s. - *

+ *

* To obtain an instance of a {@link RowScanner}, call {@link InstrumentedHBaseClient#scan(byte[])}. */ public class InstrumentedRowScanner implements RowScanner { diff --git a/dropwizard-extra-hbase/src/main/java/com/datasift/dropwizard/hbase/scanner/RowScanner.java b/dropwizard-extra-hbase/src/main/java/com/datasift/dropwizard/hbase/scanner/RowScanner.java index 585e030..7752e96 100644 --- a/dropwizard-extra-hbase/src/main/java/com/datasift/dropwizard/hbase/scanner/RowScanner.java +++ b/dropwizard-extra-hbase/src/main/java/com/datasift/dropwizard/hbase/scanner/RowScanner.java @@ -8,10 +8,10 @@ /** * Client for scanning over a selection of rows. - *

+ *

* To obtain an instance of a {@link RowScanner}, call {@link * com.datasift.dropwizard.hbase.HBaseClient#scan(byte[])}. - *

+ *

* All implementations are wrapper proxies around {@link org.hbase.async.Scanner} providing * additional functionality. */ diff --git a/dropwizard-extra-hbase/src/main/java/com/datasift/dropwizard/hbase/scanner/RowScannerProxy.java b/dropwizard-extra-hbase/src/main/java/com/datasift/dropwizard/hbase/scanner/RowScannerProxy.java index 258cd62..2668e1d 100644 --- a/dropwizard-extra-hbase/src/main/java/com/datasift/dropwizard/hbase/scanner/RowScannerProxy.java +++ b/dropwizard-extra-hbase/src/main/java/com/datasift/dropwizard/hbase/scanner/RowScannerProxy.java @@ -9,10 +9,10 @@ /** * Client for scanning over a selection of rows. - *

+ *

* To obtain an instance of a {@link RowScanner}, call {@link * com.datasift.dropwizard.hbase.HBaseClient#scan(byte[])}. - *

+ *

* This implementation is a proxy for a {@link org.hbase.async.Scanner}. */ public class RowScannerProxy implements RowScanner { diff --git a/dropwizard-extra-hbase/src/main/java/com/datasift/dropwizard/hbase/util/TimerStoppingCallback.java b/dropwizard-extra-hbase/src/main/java/com/datasift/dropwizard/hbase/util/TimerStoppingCallback.java index 8714957..7d4aff8 100644 --- a/dropwizard-extra-hbase/src/main/java/com/datasift/dropwizard/hbase/util/TimerStoppingCallback.java +++ b/dropwizard-extra-hbase/src/main/java/com/datasift/dropwizard/hbase/util/TimerStoppingCallback.java @@ -4,19 +4,19 @@ import com.codahale.metrics.Timer; /** - * A {@link Callback} for stopping a {@link TimerContext} on completion. + * A {@link com.stumbleupon.async.Callback} for stopping a {@link com.codahale.metrics.Timer.Context} on completion. */ public class TimerStoppingCallback implements Callback { /** - * The context of the active {@link com.yammer.metrics.core.Timer} to stop. + * The context of the active {@link com.codahale.metrics.Timer} to stop. */ private final Timer.Context timer; /** * Creates a new {@link Callback} that stops the given active timer on completion. * - * @param timer the active {@link com.yammer.metrics.core.Timer} to stop on completion of the + * @param timer the active {@link com.codahale.metrics.Timer} to stop on completion of the * {@link Callback}. */ public TimerStoppingCallback(final Timer.Context timer) { @@ -24,14 +24,14 @@ public TimerStoppingCallback(final Timer.Context timer) { } /** - * Stops the registered {@link com.yammer.metrics.core.Timer} and proxies any argument through + * Stops the registered {@link com.codahale.metrics.Timer} and proxies any argument through * verbatim. * * @param arg the argument (if any) to pass-through. * * @return the argument (if any), proxied verbatim. * - * @throws Exception if an error occurs stopping the {@link com.yammer.metrics.core.Timer}. + * @throws Exception if an error occurs stopping the {@link com.codahale.metrics.Timer}. */ public T call(final T arg) throws Exception { timer.stop(); diff --git a/dropwizard-extra-hbase/src/test/java/com/datasift/dropwizard/hbase/InstrumentedHBaseClientTest.java b/dropwizard-extra-hbase/src/test/java/com/datasift/dropwizard/hbase/InstrumentedHBaseClientTest.java index 377c440..37f0357 100644 --- a/dropwizard-extra-hbase/src/test/java/com/datasift/dropwizard/hbase/InstrumentedHBaseClientTest.java +++ b/dropwizard-extra-hbase/src/test/java/com/datasift/dropwizard/hbase/InstrumentedHBaseClientTest.java @@ -20,7 +20,7 @@ /** * Tests {@link InstrumentedHBaseClient}. - *

+ *

* Each method is tested first, that it proxies its implementation to the underlying {@link * HBaseClient}, and then that the method is timed as expected. */ diff --git a/dropwizard-extra-kafka/pom.xml b/dropwizard-extra-kafka/pom.xml index 5a54271..87fe9c3 100644 --- a/dropwizard-extra-kafka/pom.xml +++ b/dropwizard-extra-kafka/pom.xml @@ -5,7 +5,7 @@ com.datasift.dropwizard dropwizard-extra - 0.7.1-2-SNAPSHOT + 0.9.1-1-SNAPSHOT ../pom.xml @@ -18,10 +18,10 @@ - 0.8.1.1 + 0.8.2.2 2.10 - + io.dropwizard diff --git a/dropwizard-extra-kafka/src/main/java/com/datasift/dropwizard/kafka/KafkaConsumerFactory.java b/dropwizard-extra-kafka/src/main/java/com/datasift/dropwizard/kafka/KafkaConsumerFactory.java index 55f25ae..7bf45ea 100644 --- a/dropwizard-extra-kafka/src/main/java/com/datasift/dropwizard/kafka/KafkaConsumerFactory.java +++ b/dropwizard-extra-kafka/src/main/java/com/datasift/dropwizard/kafka/KafkaConsumerFactory.java @@ -28,10 +28,10 @@ /** * A factory for creating and managing {@link KafkaConsumer} instances. - *

+ *

* The {@link KafkaConsumer} implementation will be determined by the configuration used to create * it. - *

+ *

* The resultant {@link KafkaConsumer} will have its lifecycle managed by the {@link Environment} * and will have {@link com.codahale.metrics.health.HealthCheck}s installed to monitor its status. */ @@ -42,7 +42,7 @@ public class KafkaConsumerFactory extends KafkaClientFactory { /** * A description of the initial offset to consume from a partition when no committed offset * exists. - *

+ *

*

*
SMALLEST
Use the smallest (i.e. earliest) available offset. In effect, * consuming the entire log.
@@ -150,7 +150,7 @@ public void setGroup(final String group) { /** * Returns a mapping of the number of partitions to consume from each topic. - *

+ *

* Topics not referenced will not be consumed from. * * @return a Map of topics to the number of partitions to consume from them. @@ -162,7 +162,7 @@ public Map getPartitions() { /** * Sets a mapping of the number of partitions to consume from each topic. - *

+ *

* Topics not referenced will not be consumed from. * * @param partitions a Map of topics to the number of partitions to consume from them. @@ -175,7 +175,7 @@ public void setPartitions(final Map partitions) { /** * Returns the time the {@link KafkaConsumer} should wait to receive messages before timing out * the stream. - *

+ *

* When a {@link KafkaConsumer} times out a stream, a {@link * kafka.consumer.ConsumerTimeoutException} will be thrown by that streams' {@link * kafka.consumer.ConsumerIterator}. @@ -194,7 +194,7 @@ public Duration getTimeout() { /** * Sets the time the {@link KafkaConsumer} should wait to receive messages before timing out * the stream. - *

+ *

* When a {@link KafkaConsumer} times out a stream, a {@link * kafka.consumer.ConsumerTimeoutException} will be thrown by that streams' {@link * kafka.consumer.ConsumerIterator}. @@ -230,7 +230,7 @@ public void setReceiveBufferSize(final Size size) { /** * Returns the maximum size of a batch of messages to fetch in a single request. - *

+ *

* This dictates the maximum size of a message that may be received by the {@link * KafkaConsumer}. Messages larger than this size will cause a {@link * kafka.common.InvalidMessageSizeException} to be thrown during iteration of the stream. @@ -246,7 +246,7 @@ public Size getFetchSize() { /** * Sets the maximum size of a batch of messages to fetch in a single request. - *

+ *

* This dictates the maximum size of a message that may be received by the {@link * KafkaConsumer}. Messages larger than this size will cause a {@link * kafka.common.InvalidMessageSizeException} to be thrown during iteration of the stream. @@ -262,7 +262,7 @@ public void setFetchSize(final Size size) { /** * Returns the cumulative delay before polling a broker again when no data is returned. - *

+ *

* When fetching data from a broker, if there is no new data, there will be a delay before * polling the broker again. This controls the duration of the delay by increasing it linearly, * on each poll attempt. @@ -276,7 +276,7 @@ public Duration getBackOffIncrement() { /** * Sets the cumulative delay before polling a broker again when no data is returned. - *

+ *

* When fetching data from a broker, if there is no new data, there will be a delay before * polling the broker again. This controls the duration of the delay by increasing it linearly, * on each poll attempt. @@ -290,12 +290,14 @@ public void setBackOffIncrement(final Duration increment) { /** * Returns the maximum number of chunks to queue in internal buffers. - *

+ *

* The consumer internally buffers fetched messages in a set of queues, which are used to * iterate the stream. This controls the size of these queues. - *

+ *

* Once a queue has been filled, it will block subsequent attempts to fill it until (some of) it * has been iterated. + * + * @return the maximum number of chunks to queue in internal buffers */ @JsonProperty public int getQueuedChunks() { @@ -304,12 +306,14 @@ public int getQueuedChunks() { /** * Sets the maximum number of chunks to queue in internal buffers. - *

+ *

* The consumer internally buffers fetched messages in a set of queues, which are used to * iterate the stream. This controls the size of these queues. - *

+ *

* Once a queue has been filled, it will block subsequent attempts to fill it until (some of) it * has been iterated. + * + * @param maxChunks the maximium number of chunks to queue */ @JsonProperty public void setQueuedChunks(final int maxChunks) { @@ -342,7 +346,7 @@ public void setAutoCommit(final boolean autoCommit) { } /** - * Sets the frequency to automatically commit previously consumed offsets, if enabled. + * Gets the frequency to automatically commit previously consumed offsets, if enabled. * * @return the frequency to automatically commit the previously consumed offsets, when enabled. * @@ -355,9 +359,9 @@ public Duration getAutoCommitInterval() { /** - * Returns the frequency to automatically commit previously consumed offsets, if enabled. - * - * @return the frequency to automatically commit the previously consumed offsets, when enabled. + * Sets the frequency to automatically commit previously consumed offsets, if enabled. + * + * @param autoCommitInterval the frequency with which to auto commit. * * @see #getAutoCommit */ @@ -492,7 +496,7 @@ public KafkaConsumerBuilder processWith(final StreamProcessor + *

* The decoder instance is used to decode {@link Message}s in the stream before being passed to * the processor. * @@ -546,10 +550,10 @@ public KafkaConsumer build(final Environment environment) { /** * Builds a {@link KafkaConsumer} instance from the given {@link ExecutorService} and name, * for the given {@link Environment}. - *

+ *

* The name is used to identify the returned {@link KafkaConsumer} instance, for example, as * the name of its {@link com.codahale.metrics.health.HealthCheck}s, thread pool, etc. - *

+ *

* This implementation creates a new {@link ExecutorService} with a fixed-size thread-pool, * configured for one thread per-partition the {@link KafkaConsumer} is being configured to * consume. @@ -578,7 +582,7 @@ public KafkaConsumer build(final Environment environment, final String name) { /** * Builds a {@link KafkaConsumer} instance from the given {@link ExecutorService} and name, * for the given {@link Environment}. - *

+ *

* The name is used to identify the returned {@link KafkaConsumer} instance, for example, as * the name of its {@link com.codahale.metrics.health.HealthCheck}s, etc. * @@ -607,7 +611,7 @@ public KafkaConsumer build(final Environment environment, /** * Builds a {@link SynchronousConsumer} instance with this builders' configuration using the * given {@link ExecutorService}. - *

+ *

* If possible, it's always preferable to use one of the overloads that take an {@link * Environment} directly. This overload exists for situations where you don't have access to * an {@link Environment} (e.g. some Commands or unit tests). diff --git a/dropwizard-extra-kafka/src/main/java/com/datasift/dropwizard/kafka/KafkaProducerFactory.java b/dropwizard-extra-kafka/src/main/java/com/datasift/dropwizard/kafka/KafkaProducerFactory.java index 1ed1c1e..21bc19f 100644 --- a/dropwizard-extra-kafka/src/main/java/com/datasift/dropwizard/kafka/KafkaProducerFactory.java +++ b/dropwizard-extra-kafka/src/main/java/com/datasift/dropwizard/kafka/KafkaProducerFactory.java @@ -26,10 +26,10 @@ /** * Configuration for the Kafka producer. - *

+ *

* By default, the producer will be synchronous, blocking the calling thread until the message has * been sent. - *

+ *

* To use an asynchronous producer, set {@link KafkaProducerFactory#async} with the desired * properties. */ @@ -250,7 +250,7 @@ public Optional getClientIdSuffix() { public void setClientIdSuffix(final Optional clientIdSuffix) { this.clientIdSuffix = clientIdSuffix; } - + public KafkaProducer build(final Class> messageEncoder, final Environment environment, final String name) { diff --git a/dropwizard-extra-kafka/src/main/java/com/datasift/dropwizard/kafka/consumer/MessageProcessor.java b/dropwizard-extra-kafka/src/main/java/com/datasift/dropwizard/kafka/consumer/MessageProcessor.java index 34218af..f586491 100644 --- a/dropwizard-extra-kafka/src/main/java/com/datasift/dropwizard/kafka/consumer/MessageProcessor.java +++ b/dropwizard-extra-kafka/src/main/java/com/datasift/dropwizard/kafka/consumer/MessageProcessor.java @@ -6,7 +6,7 @@ /** * Processes messages of type {@code T} from a Kafka message stream. - *

+ *

* This {@link StreamProcessor} is instrumented with {@link Metric}s; specifically, a {@link Timer} * that tracks the time taken to process each message in the stream. * diff --git a/dropwizard-extra-kafka/src/main/java/com/datasift/dropwizard/kafka/consumer/StreamProcessor.java b/dropwizard-extra-kafka/src/main/java/com/datasift/dropwizard/kafka/consumer/StreamProcessor.java index e79f9c7..c75c262 100644 --- a/dropwizard-extra-kafka/src/main/java/com/datasift/dropwizard/kafka/consumer/StreamProcessor.java +++ b/dropwizard-extra-kafka/src/main/java/com/datasift/dropwizard/kafka/consumer/StreamProcessor.java @@ -4,10 +4,10 @@ /** * Processes an {@link Iterable} of messages of type {@code T}. - *

+ *

* If you wish to process each message individually and iteratively, it's advised that you instead * use a {@link MessageProcessor}, as it provides a higher-level of abstraction. - *

+ *

* Note: since consumers may use multiple threads, it is important that implementations are * thread-safe. */ diff --git a/dropwizard-extra-kafka/src/main/java/com/datasift/dropwizard/kafka/consumer/SynchronousConsumer.java b/dropwizard-extra-kafka/src/main/java/com/datasift/dropwizard/kafka/consumer/SynchronousConsumer.java index 67cb32c..b65bcb5 100644 --- a/dropwizard-extra-kafka/src/main/java/com/datasift/dropwizard/kafka/consumer/SynchronousConsumer.java +++ b/dropwizard-extra-kafka/src/main/java/com/datasift/dropwizard/kafka/consumer/SynchronousConsumer.java @@ -71,11 +71,17 @@ public void run() { * Creates a {@link SynchronousConsumer} to process a stream. * * @param connector the {@link ConsumerConnector} of the underlying consumer. - * @param partitions a mapping of the topic -> partitions to consume. + * @param partitions a mapping of the topic with the partitions to consume. * @param keyDecoder a {@link Decoder} for decoding the key of each message before being processed. * @param valueDecoder a {@link Decoder} for decoding each message before being processed. * @param processor a {@link StreamProcessor} for processing messages. * @param executor the {@link ExecutorService} to process the stream with. + * @param initialRecoveryDelay the initial recovery delay + * @param maxRecoveryDelay the max recovery delay + * @param retryResetDelay + * @param maxRecoveryAttempts the number of time to attempt recovery + * @param shutdownOnFatal booelan stating whether or not to shut down on fatal error + * @param startDelay the amount of time to delay at start */ public SynchronousConsumer(final ConsumerConnector connector, final Map partitions, @@ -120,11 +126,11 @@ public void serverStarted(final Server server) { /** * Starts this {@link SynchronousConsumer} immediately. - *

+ *

* The consumer will immediately begin consuming from the configured topics using the configured * {@link Decoder} to decode messages and {@link StreamProcessor} to process the decoded * messages. - *

+ *

* Each partition will be consumed using a separate thread. * * @throws Exception if an error occurs starting the consumer @@ -152,7 +158,7 @@ public void start() throws Exception { /** * Stops this {@link SynchronousConsumer} immediately. * - * @throws Exception + * @throws Exception if an error occurs on stop */ @Override public void stop() throws Exception { @@ -201,10 +207,10 @@ public StreamProcessorRunnable(final String topic, final KafkaStream strea /** * Process the stream using the configured {@link StreamProcessor}. - *

+ *

* If an {@link Exception} is thrown during processing, if it is deemed recoverable, * the stream will continue to be consumed. - *

+ *

* Unrecoverable {@link Exception}s will cause the consumer to shut down completely. */ @Override diff --git a/dropwizard-extra-kafka/src/main/java/com/datasift/dropwizard/kafka/util/Compression.java b/dropwizard-extra-kafka/src/main/java/com/datasift/dropwizard/kafka/util/Compression.java index c636599..e302667 100644 --- a/dropwizard-extra-kafka/src/main/java/com/datasift/dropwizard/kafka/util/Compression.java +++ b/dropwizard-extra-kafka/src/main/java/com/datasift/dropwizard/kafka/util/Compression.java @@ -9,10 +9,10 @@ /** * A utility for parsing {@link CompressionCodec}s from a {@link * io.dropwizard.Configuration}. - *

+ *

* To create {@link Compression} instances, use {@link Compression#parse(String)} to parse an * instance from a {@link String}. - *

+ *

* This is provided to parse textual specifications of a {@link CompressionCodec}, for example in a * {@link io.dropwizard.Configuration}. */ @@ -22,9 +22,9 @@ public class Compression { /** * Creates a {@link Compression} instance for the given codec type. - *

+ *

* The valid codec values are defined by {@link CompressionCodec}. - *

+ *

* To create {@link Compression} instances, use the {@link Compression#parse(String)} factory * method to parse an instance from a {@link String}. * diff --git a/dropwizard-extra-kafka7/pom.xml b/dropwizard-extra-kafka7/pom.xml index 1c5f584..7e1e53c 100644 --- a/dropwizard-extra-kafka7/pom.xml +++ b/dropwizard-extra-kafka7/pom.xml @@ -5,7 +5,7 @@ com.datasift.dropwizard dropwizard-extra - 0.7.1-2-SNAPSHOT + 0.9.1-1-SNAPSHOT ../pom.xml diff --git a/dropwizard-extra-kafka7/src/main/java/com/datasift/dropwizard/kafka/KafkaConsumerFactory.java b/dropwizard-extra-kafka7/src/main/java/com/datasift/dropwizard/kafka/KafkaConsumerFactory.java index 2dfa365..d6172e1 100644 --- a/dropwizard-extra-kafka7/src/main/java/com/datasift/dropwizard/kafka/KafkaConsumerFactory.java +++ b/dropwizard-extra-kafka7/src/main/java/com/datasift/dropwizard/kafka/KafkaConsumerFactory.java @@ -27,10 +27,10 @@ /** * A factory for creating and managing {@link KafkaConsumer} instances. - *

+ *

* The {@link KafkaConsumer} implementation will be determined by the configuration used to create * it. - *

+ *

* The resultant {@link KafkaConsumer} will have its lifecycle managed by the {@link Environment} * and will have {@link com.codahale.metrics.health.HealthCheck}s installed to monitor its status. */ @@ -41,7 +41,7 @@ public class KafkaConsumerFactory extends KafkaClientFactory { /** * A description of the initial offset to consume from a partition when no committed offset * exists. - *

+ *

*

*
SMALLEST
Use the smallest (i.e. earliest) available offset. In effect, * consuming the entire log.
@@ -149,7 +149,7 @@ public void setGroup(final String group) { /** * Returns a mapping of the number of partitions to consume from each topic. - *

+ *

* Topics not referenced will not be consumed from. * * @return a Map of topics to the number of partitions to consume from them. @@ -161,7 +161,7 @@ public Map getPartitions() { /** * Sets a mapping of the number of partitions to consume from each topic. - *

+ *

* Topics not referenced will not be consumed from. * * @param partitions a Map of topics to the number of partitions to consume from them. @@ -174,7 +174,7 @@ public void setPartitions(final Map partitions) { /** * Returns the time the {@link KafkaConsumer} should wait to receive messages before timing out * the stream. - *

+ *

* When a {@link KafkaConsumer} times out a stream, a {@link * kafka.consumer.ConsumerTimeoutException} will be thrown by that streams' {@link * kafka.consumer.ConsumerIterator}. @@ -193,7 +193,7 @@ public Duration getTimeout() { /** * Sets the time the {@link KafkaConsumer} should wait to receive messages before timing out * the stream. - *

+ *

* When a {@link KafkaConsumer} times out a stream, a {@link * kafka.consumer.ConsumerTimeoutException} will be thrown by that streams' {@link * kafka.consumer.ConsumerIterator}. @@ -229,7 +229,7 @@ public void setReceiveBufferSize(final Size size) { /** * Returns the maximum size of a batch of messages to fetch in a single request. - *

+ *

* This dictates the maximum size of a message that may be received by the {@link * KafkaConsumer}. Messages larger than this size will cause a {@link * kafka.common.InvalidMessageSizeException} to be thrown during iteration of the stream. @@ -245,7 +245,7 @@ public Size getFetchSize() { /** * Sets the maximum size of a batch of messages to fetch in a single request. - *

+ *

* This dictates the maximum size of a message that may be received by the {@link * KafkaConsumer}. Messages larger than this size will cause a {@link * kafka.common.InvalidMessageSizeException} to be thrown during iteration of the stream. @@ -261,7 +261,7 @@ public void setFetchSize(final Size size) { /** * Returns the cumulative delay before polling a broker again when no data is returned. - *

+ *

* When fetching data from a broker, if there is no new data, there will be a delay before * polling the broker again. This controls the duration of the delay by increasing it linearly, * on each poll attempt. @@ -275,7 +275,7 @@ public Duration getBackOffIncrement() { /** * Sets the cumulative delay before polling a broker again when no data is returned. - *

+ *

* When fetching data from a broker, if there is no new data, there will be a delay before * polling the broker again. This controls the duration of the delay by increasing it linearly, * on each poll attempt. @@ -289,10 +289,10 @@ public void setBackOffIncrement(final Duration increment) { /** * Returns the maximum number of chunks to queue in internal buffers. - *

+ *

* The consumer internally buffers fetched messages in a set of queues, which are used to * iterate the stream. This controls the size of these queues. - *

+ *

* Once a queue has been filled, it will block subsequent attempts to fill it until (some of) it * has been iterated. */ @@ -303,10 +303,10 @@ public int getQueuedChunks() { /** * Sets the maximum number of chunks to queue in internal buffers. - *

+ *

* The consumer internally buffers fetched messages in a set of queues, which are used to * iterate the stream. This controls the size of these queues. - *

+ *

* Once a queue has been filled, it will block subsequent attempts to fill it until (some of) it * has been iterated. */ @@ -491,7 +491,7 @@ public KafkaConsumerBuilder processWith(final StreamProcessor /** * Prepares a {@link KafkaConsumerBuilder} for a given {@link Decoder} and {@link * StreamProcessor}. - *

+ *

* The decoder instance is used to decode {@link Message}s in the stream before being passed to * the processor. * @@ -536,10 +536,10 @@ public KafkaConsumer build(final Environment environment) { /** * Builds a {@link KafkaConsumer} instance from the given {@link ExecutorService} and name, * for the given {@link Environment}. - *

+ *

* The name is used to identify the returned {@link KafkaConsumer} instance, for example, as * the name of its {@link com.codahale.metrics.health.HealthCheck}s, thread pool, etc. - *

+ *

* This implementation creates a new {@link ExecutorService} with a fixed-size thread-pool, * configured for one thread per-partition the {@link KafkaConsumer} is being configured to * consume. @@ -568,7 +568,7 @@ public KafkaConsumer build(final Environment environment, final String name) { /** * Builds a {@link KafkaConsumer} instance from the given {@link ExecutorService} and name, * for the given {@link Environment}. - *

+ *

* The name is used to identify the returned {@link KafkaConsumer} instance, for example, as * the name of its {@link com.codahale.metrics.health.HealthCheck}s, etc. * @@ -597,7 +597,7 @@ public KafkaConsumer build(final Environment environment, /** * Builds a {@link SynchronousConsumer} instance with this builders' configuration using the * given {@link ExecutorService}. - *

+ *

* If possible, it's always preferable to use one of the overloads that take an {@link * Environment} directly. This overload exists for situations where you don't have access to * an {@link Environment} (e.g. some Commands or unit tests). diff --git a/dropwizard-extra-kafka7/src/main/java/com/datasift/dropwizard/kafka/consumer/MessageProcessor.java b/dropwizard-extra-kafka7/src/main/java/com/datasift/dropwizard/kafka/consumer/MessageProcessor.java index 9b48b71..fbb60ed 100644 --- a/dropwizard-extra-kafka7/src/main/java/com/datasift/dropwizard/kafka/consumer/MessageProcessor.java +++ b/dropwizard-extra-kafka7/src/main/java/com/datasift/dropwizard/kafka/consumer/MessageProcessor.java @@ -6,7 +6,7 @@ /** * Processes messages of type {@code T} from a Kafka message stream. - *

+ *

* This {@link StreamProcessor} is instrumented with {@link Metric}s; specifically, a {@link Timer} * that tracks the time taken to process each message in the stream. * diff --git a/dropwizard-extra-kafka7/src/main/java/com/datasift/dropwizard/kafka/consumer/StreamProcessor.java b/dropwizard-extra-kafka7/src/main/java/com/datasift/dropwizard/kafka/consumer/StreamProcessor.java index 06698cd..b618eb2 100644 --- a/dropwizard-extra-kafka7/src/main/java/com/datasift/dropwizard/kafka/consumer/StreamProcessor.java +++ b/dropwizard-extra-kafka7/src/main/java/com/datasift/dropwizard/kafka/consumer/StreamProcessor.java @@ -4,10 +4,10 @@ /** * Processes an {@link Iterable} of messages of type {@code T}. - *

+ *

* If you wish to process each message individually and iteratively, it's advised that you instead * use a {@link MessageProcessor}, as it provides a higher-level of abstraction. - *

+ *

* Note: since consumers may use multiple threads, it is important that implementations are * thread-safe. */ diff --git a/dropwizard-extra-kafka7/src/main/java/com/datasift/dropwizard/kafka/consumer/SynchronousConsumer.java b/dropwizard-extra-kafka7/src/main/java/com/datasift/dropwizard/kafka/consumer/SynchronousConsumer.java index 45b9e59..c4081b7 100644 --- a/dropwizard-extra-kafka7/src/main/java/com/datasift/dropwizard/kafka/consumer/SynchronousConsumer.java +++ b/dropwizard-extra-kafka7/src/main/java/com/datasift/dropwizard/kafka/consumer/SynchronousConsumer.java @@ -116,11 +116,11 @@ public void serverStarted(final Server server) { /** * Starts this {@link SynchronousConsumer} immediately. - *

+ *

* The consumer will immediately begin consuming from the configured topics using the configured * {@link Decoder} to decode messages and {@link StreamProcessor} to process the decoded * messages. - *

+ *

* Each partition will be consumed using a separate thread. * * @throws Exception if an error occurs starting the consumer @@ -197,10 +197,10 @@ public StreamProcessorRunnable(final String topic, final KafkaStream stream) /** * Process the stream using the configured {@link StreamProcessor}. - *

+ *

* If an {@link Exception} is thrown during processing, if it is deemed recoverable, * the stream will continue to be consumed. - *

+ *

* Unrecoverable {@link Exception}s will cause the consumer to shut down completely. */ @Override diff --git a/dropwizard-extra-kafka7/src/main/java/com/datasift/dropwizard/kafka/util/Compression.java b/dropwizard-extra-kafka7/src/main/java/com/datasift/dropwizard/kafka/util/Compression.java index c636599..e302667 100644 --- a/dropwizard-extra-kafka7/src/main/java/com/datasift/dropwizard/kafka/util/Compression.java +++ b/dropwizard-extra-kafka7/src/main/java/com/datasift/dropwizard/kafka/util/Compression.java @@ -9,10 +9,10 @@ /** * A utility for parsing {@link CompressionCodec}s from a {@link * io.dropwizard.Configuration}. - *

+ *

* To create {@link Compression} instances, use {@link Compression#parse(String)} to parse an * instance from a {@link String}. - *

+ *

* This is provided to parse textual specifications of a {@link CompressionCodec}, for example in a * {@link io.dropwizard.Configuration}. */ @@ -22,9 +22,9 @@ public class Compression { /** * Creates a {@link Compression} instance for the given codec type. - *

+ *

* The valid codec values are defined by {@link CompressionCodec}. - *

+ *

* To create {@link Compression} instances, use the {@link Compression#parse(String)} factory * method to parse an instance from a {@link String}. * diff --git a/dropwizard-extra-util/pom.xml b/dropwizard-extra-util/pom.xml index 1443075..4753f87 100644 --- a/dropwizard-extra-util/pom.xml +++ b/dropwizard-extra-util/pom.xml @@ -5,7 +5,7 @@ com.datasift.dropwizard dropwizard-extra - 0.7.1-2-SNAPSHOT + 0.9.1-1-SNAPSHOT ../pom.xml diff --git a/dropwizard-extra-util/src/main/java/com/datasift/dropwizard/health/SocketHealthCheck.java b/dropwizard-extra-util/src/main/java/com/datasift/dropwizard/health/SocketHealthCheck.java index 066cacb..66ec2ce 100644 --- a/dropwizard-extra-util/src/main/java/com/datasift/dropwizard/health/SocketHealthCheck.java +++ b/dropwizard-extra-util/src/main/java/com/datasift/dropwizard/health/SocketHealthCheck.java @@ -7,7 +7,7 @@ /** * A base {@link HealthCheck} for remote socket servers. - *

+ *

* Use this as a basis for {@link HealthCheck}s for remote services, such as databases or * web-services. */ @@ -48,7 +48,7 @@ public SocketHealthCheck(final String hostname, final int port) { /** * Generates a String representation of the remote socket being checked. - *

+ *

* This will be the socket address formatted as: hostname:port * * @return the String representation of the remote socket being checked. @@ -91,7 +91,7 @@ protected Socket createSocket(final String hostname, final int port) throws IOEx /** * Perform a check of a {@link Socket}. - *

+ *

* Implementations can assume that the {@link Socket} is already connected. * * @param socket the {@link Socket} to check the health of @@ -99,8 +99,6 @@ protected Socket createSocket(final String hostname, final int port) throws IOEx * @return if the component is healthy, a healthy {@link Result}; otherwise, an unhealthy {@link * Result} with a description of the error or exception * - * @throws Exception if there is an unhandled error during the health check; this will result in - * a failed health check */ protected abstract Result check(Socket socket); } diff --git a/dropwizard-extra-util/src/main/java/com/datasift/dropwizard/util/Classes.java b/dropwizard-extra-util/src/main/java/com/datasift/dropwizard/util/Classes.java index 4495433..00da3a1 100644 --- a/dropwizard-extra-util/src/main/java/com/datasift/dropwizard/util/Classes.java +++ b/dropwizard-extra-util/src/main/java/com/datasift/dropwizard/util/Classes.java @@ -12,7 +12,7 @@ public class Classes { /** * Creates a new instance of the given {@link Class}, using the given arguments. - *

+ *

* A new instance object of the given {@link Class} is created, using reflection, * providing the given arguments to the constructor. * @@ -45,8 +45,8 @@ public static T newInstance(final Class clazz, final Object... args) /** * Creates a new instance of the same {@link Class} as the given template, using the * given constructor arguments. - *

- * Given an object of type T, a new instance of {@link Class} will be created, + *

+ * Given an object of type T, a new instance of {@link Class} will be created, * passing the given args to the constructor. * * @param template an object that provides the {@link Class} to instantiate. @@ -80,10 +80,10 @@ public static T newInstanceFrom(final T template, final Object... args) /** * Creates a new instance of the given {@link Class}, using the given arguments, ignoring * visibility. - *

+ *

* A new instance object of the given {@link Class} is created, using reflection, * providing the given arguments to the constructor. - *

+ *

* The visibility of the {@link Constructor} defined by the arguments is ignored and a new * instance created irrespective of the defined visibility. This is potentially dangerous, * as the API likely makes no guarantee as to the behaviour when instantiating from a non-public @@ -124,10 +124,10 @@ public static T unsafeNewInstance(final Class clazz, final Object... args /** * Creates a new instance of the same {@link Class} as the given template, using the * given constructor arguments and ignoring visibility. - *

- * Given an object of type T, a new instance of {@link Class} will be created, + *

+ * Given an object of type T, a new instance of {@link Class} will be created, * passing the given args to the constructor. - *

+ *

* The visibility of the {@link Constructor} defined by the arguments is ignored and a new * instance created irrespective of the defined visibility. This is potentially dangerous, * as the API likely makes no guarantee as to the behaviour when instantiating from a non-public @@ -161,11 +161,11 @@ public static T unsafeNewInstanceFrom(final T template, final Object... args /** * Gets the {@link Class} for multiple objects. - *

+ *

* The resulting array of {@link Class} objects that are ordered in parallel with the argument * list that produced it. This is especially useful for getting a {@link Constructor} for a * given set of arguments: - *

+ *

* * clazz.getConstructor(Classes.of("abc", 123)); * @@ -184,16 +184,16 @@ public static Class[] of(final Object... arguments) { /** * Ensures a variable argument list has been properly passed. - *

+ *

* Sometimes, you want to pass a single array-typed argument to a method that accepts * variable arguments. In these situations, that array will be unwrapped in to a list of * multiple arguments, instead of a single argument that is an array. - *

+ *

* Example: * * Classes.of(TableNotFoundException.class, tableName.getBytes()); * - *

+ *

* Resolving will differentiate a variable argument list from a single argument of the * following types: *